diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 000000000000..011673016c26
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,693 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 2
+indent_style = space
+insert_final_newline = true
+max_line_length = 100
+tab_width = 2
+ij_continuation_indent_size = 2
+ij_formatter_off_tag = @formatter:off
+ij_formatter_on_tag = @formatter:on
+ij_formatter_tags_enabled = false
+ij_smart_tabs = false
+ij_wrap_on_typing = false
+
+[*.css]
+ij_css_align_closing_brace_with_properties = false
+ij_css_blank_lines_around_nested_selector = 1
+ij_css_blank_lines_between_blocks = 1
+ij_css_brace_placement = 0
+ij_css_hex_color_long_format = false
+ij_css_hex_color_lower_case = false
+ij_css_hex_color_short_format = false
+ij_css_hex_color_upper_case = false
+ij_css_keep_blank_lines_in_code = 2
+ij_css_keep_indents_on_empty_lines = false
+ij_css_keep_single_line_blocks = false
+ij_css_properties_order = font,font-family,font-size,font-weight,font-style,font-variant,font-size-adjust,font-stretch,line-height,position,z-index,top,right,bottom,left,display,visibility,float,clear,overflow,overflow-x,overflow-y,clip,zoom,align-content,align-items,align-self,flex,flex-flow,flex-basis,flex-direction,flex-grow,flex-shrink,flex-wrap,justify-content,order,box-sizing,width,min-width,max-width,height,min-height,max-height,margin,margin-top,margin-right,margin-bottom,margin-left,padding,padding-top,padding-right,padding-bottom,padding-left,table-layout,empty-cells,caption-side,border-spacing,border-collapse,list-style,list-style-position,list-style-type,list-style-image,content,quotes,counter-reset,counter-increment,resize,cursor,user-select,nav-index,nav-up,nav-right,nav-down,nav-left,transition,transition-delay,transition-timing-function,transition-duration,transition-property,transform,transform-origin,animation,animation-name,animation-duration,animation-play-state,animation-timing-function,animation-delay,animation-iteration-count,animation-direction,text-align,text-align-last,vertical-align,white-space,text-decoration,text-emphasis,text-emphasis-color,text-emphasis-style,text-emphasis-position,text-indent,text-justify,letter-spacing,word-spacing,text-outline,text-transform,text-wrap,text-overflow,text-overflow-ellipsis,text-overflow-mode,word-wrap,word-break,tab-size,hyphens,pointer-events,opacity,color,border,border-width,border-style,border-color,border-top,border-top-width,border-top-style,border-top-color,border-right,border-right-width,border-right-style,border-right-color,border-bottom,border-bottom-width,border-bottom-style,border-bottom-color,border-left,border-left-width,border-left-style,border-left-color,border-radius,border-top-left-radius,border-top-right-radius,border-bottom-right-radius,border-bottom-left-radius,border-image,border-image-source,border-image-slice,border-image-width,border-image-outset,border-image-repeat,outline,outline-width,outline-style,outline-color,outline-offset,background,background-color,background-image,background-repeat,background-attachment,background-position,background-position-x,background-position-y,background-clip,background-origin,background-size,box-decoration-break,box-shadow,text-shadow
+ij_css_space_after_colon = true
+ij_css_space_before_opening_brace = true
+ij_css_value_alignment = 0
+
+[*.java]
+ij_java_align_consecutive_assignments = false
+ij_java_align_consecutive_variable_declarations = false
+ij_java_align_group_field_declarations = false
+ij_java_align_multiline_annotation_parameters = false
+ij_java_align_multiline_array_initializer_expression = false
+ij_java_align_multiline_assignment = false
+ij_java_align_multiline_binary_operation = false
+ij_java_align_multiline_chained_methods = false
+ij_java_align_multiline_extends_list = false
+ij_java_align_multiline_for = true
+ij_java_align_multiline_method_parentheses = false
+ij_java_align_multiline_parameters = false
+ij_java_align_multiline_parameters_in_calls = false
+ij_java_align_multiline_parenthesized_expression = false
+ij_java_align_multiline_resources = false
+ij_java_align_multiline_ternary_operation = false
+ij_java_align_multiline_throws_list = false
+ij_java_align_subsequent_simple_methods = false
+ij_java_align_throws_keyword = false
+ij_java_annotation_parameter_wrap = off
+ij_java_array_initializer_new_line_after_left_brace = false
+ij_java_array_initializer_right_brace_on_new_line = false
+ij_java_array_initializer_wrap = normal
+ij_java_assert_statement_colon_on_next_line = false
+ij_java_assert_statement_wrap = off
+ij_java_assignment_wrap = normal
+ij_java_binary_operation_sign_on_next_line = true
+ij_java_binary_operation_wrap = normal
+ij_java_blank_lines_after_anonymous_class_header = 0
+ij_java_blank_lines_after_class_header = 0
+ij_java_blank_lines_after_imports = 1
+ij_java_blank_lines_after_package = 1
+ij_java_blank_lines_around_class = 1
+ij_java_blank_lines_around_field = 0
+ij_java_blank_lines_around_field_in_interface = 0
+ij_java_blank_lines_around_initializer = 1
+ij_java_blank_lines_around_method = 1
+ij_java_blank_lines_around_method_in_interface = 1
+ij_java_blank_lines_before_class_end = 0
+ij_java_blank_lines_before_imports = 1
+ij_java_blank_lines_before_method_body = 0
+ij_java_blank_lines_before_package = 0
+ij_java_block_brace_style = end_of_line
+ij_java_block_comment_at_first_column = true
+ij_java_call_parameters_new_line_after_left_paren = false
+ij_java_call_parameters_right_paren_on_new_line = false
+ij_java_call_parameters_wrap = normal
+ij_java_case_statement_on_separate_line = true
+ij_java_catch_on_new_line = false
+ij_java_class_annotation_wrap = normal
+ij_java_class_brace_style = end_of_line
+ij_java_class_count_to_use_import_on_demand = 999
+ij_java_class_names_in_javadoc = 1
+ij_java_do_not_indent_top_level_class_members = false
+ij_java_do_not_wrap_after_single_annotation = false
+ij_java_do_while_brace_force = always
+ij_java_doc_add_blank_line_after_description = true
+ij_java_doc_add_blank_line_after_param_comments = false
+ij_java_doc_add_blank_line_after_return = false
+ij_java_doc_add_p_tag_on_empty_lines = true
+ij_java_doc_align_exception_comments = true
+ij_java_doc_align_param_comments = true
+ij_java_doc_do_not_wrap_if_one_line = false
+ij_java_doc_enable_formatting = true
+ij_java_doc_enable_leading_asterisks = true
+ij_java_doc_indent_on_continuation = false
+ij_java_doc_keep_empty_lines = false
+ij_java_doc_keep_empty_parameter_tag = true
+ij_java_doc_keep_empty_return_tag = true
+ij_java_doc_keep_empty_throws_tag = true
+ij_java_doc_keep_invalid_tags = true
+ij_java_doc_param_description_on_new_line = false
+ij_java_doc_preserve_line_breaks = false
+ij_java_doc_use_throws_not_exception_tag = true
+ij_java_else_on_new_line = false
+ij_java_entity_dd_suffix = EJB
+ij_java_entity_eb_suffix = Bean
+ij_java_entity_hi_suffix = Home
+ij_java_entity_lhi_prefix = Local
+ij_java_entity_lhi_suffix = Home
+ij_java_entity_li_prefix = Local
+ij_java_entity_pk_class = java.lang.String
+ij_java_entity_vo_suffix = VO
+ij_java_enum_constants_wrap = off
+ij_java_extends_keyword_wrap = normal
+ij_java_extends_list_wrap = normal
+ij_java_field_annotation_wrap = normal
+ij_java_finally_on_new_line = false
+ij_java_for_brace_force = always
+ij_java_for_statement_new_line_after_left_paren = false
+ij_java_for_statement_right_paren_on_new_line = false
+ij_java_for_statement_wrap = off
+ij_java_generate_final_locals = false
+ij_java_generate_final_parameters = false
+ij_java_if_brace_force = always
+ij_java_imports_layout = $*,*,org.apache.hbase.thirdparty.**,org.apache.hadoop.hbase.shaded.**
+ij_java_indent_case_from_switch = true
+ij_java_insert_inner_class_imports = false
+ij_java_insert_override_annotation = true
+ij_java_keep_blank_lines_before_right_brace = 1
+ij_java_keep_blank_lines_between_package_declaration_and_header = 2
+ij_java_keep_blank_lines_in_code = 1
+ij_java_keep_blank_lines_in_declarations = 1
+ij_java_keep_control_statement_in_one_line = true
+ij_java_keep_first_column_comment = false
+ij_java_keep_indents_on_empty_lines = false
+ij_java_keep_line_breaks = false
+ij_java_keep_multiple_expressions_in_one_line = false
+ij_java_keep_simple_blocks_in_one_line = false
+ij_java_keep_simple_classes_in_one_line = false
+ij_java_keep_simple_lambdas_in_one_line = false
+ij_java_keep_simple_methods_in_one_line = false
+ij_java_lambda_brace_style = end_of_line
+ij_java_layout_static_imports_separately = true
+ij_java_line_comment_add_space = false
+ij_java_line_comment_at_first_column = true
+ij_java_message_dd_suffix = EJB
+ij_java_message_eb_suffix = Bean
+ij_java_method_annotation_wrap = normal
+ij_java_method_brace_style = end_of_line
+ij_java_method_call_chain_wrap = normal
+ij_java_method_parameters_new_line_after_left_paren = false
+ij_java_method_parameters_right_paren_on_new_line = false
+ij_java_method_parameters_wrap = normal
+ij_java_modifier_list_wrap = false
+ij_java_names_count_to_use_import_on_demand = 999
+ij_java_parameter_annotation_wrap = normal
+ij_java_parentheses_expression_new_line_after_left_paren = false
+ij_java_parentheses_expression_right_paren_on_new_line = false
+ij_java_place_assignment_sign_on_next_line = false
+ij_java_prefer_longer_names = true
+ij_java_prefer_parameters_wrap = false
+ij_java_repeat_synchronized = true
+ij_java_replace_instanceof_and_cast = false
+ij_java_replace_null_check = true
+ij_java_replace_sum_lambda_with_method_ref = true
+ij_java_resource_list_new_line_after_left_paren = false
+ij_java_resource_list_right_paren_on_new_line = false
+ij_java_resource_list_wrap = on_every_item
+ij_java_session_dd_suffix = EJB
+ij_java_session_eb_suffix = Bean
+ij_java_session_hi_suffix = Home
+ij_java_session_lhi_prefix = Local
+ij_java_session_lhi_suffix = Home
+ij_java_session_li_prefix = Local
+ij_java_session_si_suffix = Service
+ij_java_space_after_closing_angle_bracket_in_type_argument = false
+ij_java_space_after_colon = true
+ij_java_space_after_comma = true
+ij_java_space_after_comma_in_type_arguments = true
+ij_java_space_after_for_semicolon = true
+ij_java_space_after_quest = true
+ij_java_space_after_type_cast = true
+ij_java_space_before_annotation_array_initializer_left_brace = false
+ij_java_space_before_annotation_parameter_list = false
+ij_java_space_before_array_initializer_left_brace = true
+ij_java_space_before_catch_keyword = true
+ij_java_space_before_catch_left_brace = true
+ij_java_space_before_catch_parentheses = true
+ij_java_space_before_class_left_brace = true
+ij_java_space_before_colon = true
+ij_java_space_before_colon_in_foreach = true
+ij_java_space_before_comma = false
+ij_java_space_before_do_left_brace = true
+ij_java_space_before_else_keyword = true
+ij_java_space_before_else_left_brace = true
+ij_java_space_before_finally_keyword = true
+ij_java_space_before_finally_left_brace = true
+ij_java_space_before_for_left_brace = true
+ij_java_space_before_for_parentheses = true
+ij_java_space_before_for_semicolon = false
+ij_java_space_before_if_left_brace = true
+ij_java_space_before_if_parentheses = true
+ij_java_space_before_method_call_parentheses = false
+ij_java_space_before_method_left_brace = true
+ij_java_space_before_method_parentheses = false
+ij_java_space_before_opening_angle_bracket_in_type_parameter = false
+ij_java_space_before_quest = true
+ij_java_space_before_switch_left_brace = true
+ij_java_space_before_switch_parentheses = true
+ij_java_space_before_synchronized_left_brace = true
+ij_java_space_before_synchronized_parentheses = true
+ij_java_space_before_try_left_brace = true
+ij_java_space_before_try_parentheses = true
+ij_java_space_before_type_parameter_list = false
+ij_java_space_before_while_keyword = true
+ij_java_space_before_while_left_brace = true
+ij_java_space_before_while_parentheses = true
+ij_java_space_inside_one_line_enum_braces = false
+ij_java_space_within_empty_array_initializer_braces = false
+ij_java_space_within_empty_method_call_parentheses = false
+ij_java_space_within_empty_method_parentheses = false
+ij_java_spaces_around_additive_operators = true
+ij_java_spaces_around_assignment_operators = true
+ij_java_spaces_around_bitwise_operators = true
+ij_java_spaces_around_equality_operators = true
+ij_java_spaces_around_lambda_arrow = true
+ij_java_spaces_around_logical_operators = true
+ij_java_spaces_around_method_ref_dbl_colon = false
+ij_java_spaces_around_multiplicative_operators = true
+ij_java_spaces_around_relational_operators = true
+ij_java_spaces_around_shift_operators = true
+ij_java_spaces_around_type_bounds_in_type_parameters = true
+ij_java_spaces_around_unary_operator = false
+ij_java_spaces_within_angle_brackets = false
+ij_java_spaces_within_annotation_parentheses = false
+ij_java_spaces_within_array_initializer_braces = true
+ij_java_spaces_within_braces = false
+ij_java_spaces_within_brackets = false
+ij_java_spaces_within_cast_parentheses = false
+ij_java_spaces_within_catch_parentheses = false
+ij_java_spaces_within_for_parentheses = false
+ij_java_spaces_within_if_parentheses = false
+ij_java_spaces_within_method_call_parentheses = false
+ij_java_spaces_within_method_parentheses = false
+ij_java_spaces_within_parentheses = false
+ij_java_spaces_within_switch_parentheses = false
+ij_java_spaces_within_synchronized_parentheses = false
+ij_java_spaces_within_try_parentheses = false
+ij_java_spaces_within_while_parentheses = false
+ij_java_special_else_if_treatment = true
+ij_java_subclass_name_suffix = Impl
+ij_java_ternary_operation_signs_on_next_line = false
+ij_java_ternary_operation_wrap = on_every_item
+ij_java_test_name_suffix = Test
+ij_java_throws_keyword_wrap = normal
+ij_java_throws_list_wrap = normal
+ij_java_use_external_annotations = false
+ij_java_use_fq_class_names = false
+ij_java_use_single_class_imports = true
+ij_java_variable_annotation_wrap = normal
+ij_java_visibility = public
+ij_java_while_brace_force = always
+ij_java_while_on_new_line = false
+ij_java_wrap_comments = false
+ij_java_wrap_first_method_in_call_chain = false
+ij_java_wrap_long_lines = false
+
+[*.proto]
+ij_proto_keep_indents_on_empty_lines = false
+
+[.editorconfig]
+ij_editorconfig_align_group_field_declarations = false
+ij_editorconfig_space_after_colon = false
+ij_editorconfig_space_after_comma = true
+ij_editorconfig_space_before_colon = false
+ij_editorconfig_space_before_comma = false
+ij_editorconfig_spaces_around_assignment_operators = true
+
+[{*.cjs,*.js}]
+ij_javascript_align_imports = false
+ij_javascript_align_multiline_array_initializer_expression = false
+ij_javascript_align_multiline_binary_operation = false
+ij_javascript_align_multiline_chained_methods = false
+ij_javascript_align_multiline_extends_list = false
+ij_javascript_align_multiline_for = true
+ij_javascript_align_multiline_parameters = true
+ij_javascript_align_multiline_parameters_in_calls = false
+ij_javascript_align_multiline_ternary_operation = false
+ij_javascript_align_object_properties = 0
+ij_javascript_align_union_types = false
+ij_javascript_align_var_statements = 0
+ij_javascript_array_initializer_new_line_after_left_brace = false
+ij_javascript_array_initializer_right_brace_on_new_line = false
+ij_javascript_array_initializer_wrap = off
+ij_javascript_assignment_wrap = off
+ij_javascript_binary_operation_sign_on_next_line = false
+ij_javascript_binary_operation_wrap = off
+ij_javascript_blacklist_imports = rxjs/Rx,node_modules/**/*,@angular/material,@angular/material/typings/**
+ij_javascript_blank_lines_after_imports = 1
+ij_javascript_blank_lines_around_class = 1
+ij_javascript_blank_lines_around_field = 0
+ij_javascript_blank_lines_around_function = 1
+ij_javascript_blank_lines_around_method = 1
+ij_javascript_block_brace_style = end_of_line
+ij_javascript_call_parameters_new_line_after_left_paren = false
+ij_javascript_call_parameters_right_paren_on_new_line = false
+ij_javascript_call_parameters_wrap = off
+ij_javascript_catch_on_new_line = false
+ij_javascript_chained_call_dot_on_new_line = true
+ij_javascript_class_brace_style = end_of_line
+ij_javascript_comma_on_new_line = false
+ij_javascript_do_while_brace_force = never
+ij_javascript_else_on_new_line = false
+ij_javascript_enforce_trailing_comma = keep
+ij_javascript_extends_keyword_wrap = off
+ij_javascript_extends_list_wrap = off
+ij_javascript_field_prefix = _
+ij_javascript_file_name_style = relaxed
+ij_javascript_finally_on_new_line = false
+ij_javascript_for_brace_force = never
+ij_javascript_for_statement_new_line_after_left_paren = false
+ij_javascript_for_statement_right_paren_on_new_line = false
+ij_javascript_for_statement_wrap = off
+ij_javascript_force_quote_style = false
+ij_javascript_force_semicolon_style = false
+ij_javascript_function_expression_brace_style = end_of_line
+ij_javascript_if_brace_force = never
+ij_javascript_import_merge_members = global
+ij_javascript_import_prefer_absolute_path = global
+ij_javascript_import_sort_members = true
+ij_javascript_import_sort_module_name = false
+ij_javascript_import_use_node_resolution = true
+ij_javascript_imports_wrap = on_every_item
+ij_javascript_indent_case_from_switch = true
+ij_javascript_indent_chained_calls = true
+ij_javascript_indent_package_children = 0
+ij_javascript_jsx_attribute_value = braces
+ij_javascript_keep_blank_lines_in_code = 2
+ij_javascript_keep_first_column_comment = true
+ij_javascript_keep_indents_on_empty_lines = false
+ij_javascript_keep_line_breaks = true
+ij_javascript_keep_simple_blocks_in_one_line = false
+ij_javascript_keep_simple_methods_in_one_line = false
+ij_javascript_line_comment_add_space = true
+ij_javascript_line_comment_at_first_column = false
+ij_javascript_method_brace_style = end_of_line
+ij_javascript_method_call_chain_wrap = off
+ij_javascript_method_parameters_new_line_after_left_paren = false
+ij_javascript_method_parameters_right_paren_on_new_line = false
+ij_javascript_method_parameters_wrap = off
+ij_javascript_object_literal_wrap = on_every_item
+ij_javascript_parentheses_expression_new_line_after_left_paren = false
+ij_javascript_parentheses_expression_right_paren_on_new_line = false
+ij_javascript_place_assignment_sign_on_next_line = false
+ij_javascript_prefer_as_type_cast = false
+ij_javascript_prefer_parameters_wrap = false
+ij_javascript_reformat_c_style_comments = false
+ij_javascript_space_after_colon = true
+ij_javascript_space_after_comma = true
+ij_javascript_space_after_dots_in_rest_parameter = false
+ij_javascript_space_after_generator_mult = true
+ij_javascript_space_after_property_colon = true
+ij_javascript_space_after_quest = true
+ij_javascript_space_after_type_colon = true
+ij_javascript_space_after_unary_not = false
+ij_javascript_space_before_async_arrow_lparen = true
+ij_javascript_space_before_catch_keyword = true
+ij_javascript_space_before_catch_left_brace = true
+ij_javascript_space_before_catch_parentheses = true
+ij_javascript_space_before_class_lbrace = true
+ij_javascript_space_before_class_left_brace = true
+ij_javascript_space_before_colon = true
+ij_javascript_space_before_comma = false
+ij_javascript_space_before_do_left_brace = true
+ij_javascript_space_before_else_keyword = true
+ij_javascript_space_before_else_left_brace = true
+ij_javascript_space_before_finally_keyword = true
+ij_javascript_space_before_finally_left_brace = true
+ij_javascript_space_before_for_left_brace = true
+ij_javascript_space_before_for_parentheses = true
+ij_javascript_space_before_for_semicolon = false
+ij_javascript_space_before_function_left_parenth = true
+ij_javascript_space_before_generator_mult = false
+ij_javascript_space_before_if_left_brace = true
+ij_javascript_space_before_if_parentheses = true
+ij_javascript_space_before_method_call_parentheses = false
+ij_javascript_space_before_method_left_brace = true
+ij_javascript_space_before_method_parentheses = false
+ij_javascript_space_before_property_colon = false
+ij_javascript_space_before_quest = true
+ij_javascript_space_before_switch_left_brace = true
+ij_javascript_space_before_switch_parentheses = true
+ij_javascript_space_before_try_left_brace = true
+ij_javascript_space_before_type_colon = false
+ij_javascript_space_before_unary_not = false
+ij_javascript_space_before_while_keyword = true
+ij_javascript_space_before_while_left_brace = true
+ij_javascript_space_before_while_parentheses = true
+ij_javascript_spaces_around_additive_operators = true
+ij_javascript_spaces_around_arrow_function_operator = true
+ij_javascript_spaces_around_assignment_operators = true
+ij_javascript_spaces_around_bitwise_operators = true
+ij_javascript_spaces_around_equality_operators = true
+ij_javascript_spaces_around_logical_operators = true
+ij_javascript_spaces_around_multiplicative_operators = true
+ij_javascript_spaces_around_relational_operators = true
+ij_javascript_spaces_around_shift_operators = true
+ij_javascript_spaces_around_unary_operator = false
+ij_javascript_spaces_within_array_initializer_brackets = false
+ij_javascript_spaces_within_brackets = false
+ij_javascript_spaces_within_catch_parentheses = false
+ij_javascript_spaces_within_for_parentheses = false
+ij_javascript_spaces_within_if_parentheses = false
+ij_javascript_spaces_within_imports = false
+ij_javascript_spaces_within_interpolation_expressions = false
+ij_javascript_spaces_within_method_call_parentheses = false
+ij_javascript_spaces_within_method_parentheses = false
+ij_javascript_spaces_within_object_literal_braces = false
+ij_javascript_spaces_within_object_type_braces = true
+ij_javascript_spaces_within_parentheses = false
+ij_javascript_spaces_within_switch_parentheses = false
+ij_javascript_spaces_within_type_assertion = false
+ij_javascript_spaces_within_union_types = true
+ij_javascript_spaces_within_while_parentheses = false
+ij_javascript_special_else_if_treatment = true
+ij_javascript_ternary_operation_signs_on_next_line = false
+ij_javascript_ternary_operation_wrap = off
+ij_javascript_union_types_wrap = on_every_item
+ij_javascript_use_chained_calls_group_indents = false
+ij_javascript_use_double_quotes = true
+ij_javascript_use_explicit_js_extension = global
+ij_javascript_use_path_mapping = always
+ij_javascript_use_public_modifier = false
+ij_javascript_use_semicolon_after_statement = true
+ij_javascript_var_declaration_wrap = normal
+ij_javascript_while_brace_force = never
+ij_javascript_while_on_new_line = false
+ij_javascript_wrap_comments = false
+
+[{*.gradle,*.groovy,*.gant,*.gdsl,*.gy,*.gson,Jenkinsfile*}]
+ij_groovy_align_group_field_declarations = false
+ij_groovy_align_multiline_array_initializer_expression = false
+ij_groovy_align_multiline_assignment = false
+ij_groovy_align_multiline_binary_operation = false
+ij_groovy_align_multiline_chained_methods = false
+ij_groovy_align_multiline_extends_list = false
+ij_groovy_align_multiline_for = true
+ij_groovy_align_multiline_method_parentheses = false
+ij_groovy_align_multiline_parameters = true
+ij_groovy_align_multiline_parameters_in_calls = false
+ij_groovy_align_multiline_resources = true
+ij_groovy_align_multiline_ternary_operation = false
+ij_groovy_align_multiline_throws_list = false
+ij_groovy_align_throws_keyword = false
+ij_groovy_array_initializer_new_line_after_left_brace = false
+ij_groovy_array_initializer_right_brace_on_new_line = false
+ij_groovy_array_initializer_wrap = off
+ij_groovy_assert_statement_wrap = off
+ij_groovy_assignment_wrap = off
+ij_groovy_binary_operation_wrap = off
+ij_groovy_blank_lines_after_class_header = 0
+ij_groovy_blank_lines_after_imports = 1
+ij_groovy_blank_lines_after_package = 1
+ij_groovy_blank_lines_around_class = 1
+ij_groovy_blank_lines_around_field = 0
+ij_groovy_blank_lines_around_field_in_interface = 0
+ij_groovy_blank_lines_around_method = 1
+ij_groovy_blank_lines_around_method_in_interface = 1
+ij_groovy_blank_lines_before_imports = 1
+ij_groovy_blank_lines_before_method_body = 0
+ij_groovy_blank_lines_before_package = 0
+ij_groovy_block_brace_style = end_of_line
+ij_groovy_block_comment_at_first_column = true
+ij_groovy_call_parameters_new_line_after_left_paren = false
+ij_groovy_call_parameters_right_paren_on_new_line = false
+ij_groovy_call_parameters_wrap = off
+ij_groovy_catch_on_new_line = false
+ij_groovy_class_annotation_wrap = split_into_lines
+ij_groovy_class_brace_style = end_of_line
+ij_groovy_do_while_brace_force = never
+ij_groovy_else_on_new_line = false
+ij_groovy_enum_constants_wrap = off
+ij_groovy_extends_keyword_wrap = off
+ij_groovy_extends_list_wrap = off
+ij_groovy_field_annotation_wrap = split_into_lines
+ij_groovy_finally_on_new_line = false
+ij_groovy_for_brace_force = never
+ij_groovy_for_statement_new_line_after_left_paren = false
+ij_groovy_for_statement_right_paren_on_new_line = false
+ij_groovy_for_statement_wrap = off
+ij_groovy_if_brace_force = never
+ij_groovy_indent_case_from_switch = true
+ij_groovy_keep_blank_lines_before_right_brace = 2
+ij_groovy_keep_blank_lines_in_code = 2
+ij_groovy_keep_blank_lines_in_declarations = 2
+ij_groovy_keep_control_statement_in_one_line = true
+ij_groovy_keep_first_column_comment = true
+ij_groovy_keep_indents_on_empty_lines = false
+ij_groovy_keep_line_breaks = true
+ij_groovy_keep_multiple_expressions_in_one_line = false
+ij_groovy_keep_simple_blocks_in_one_line = false
+ij_groovy_keep_simple_classes_in_one_line = true
+ij_groovy_keep_simple_lambdas_in_one_line = true
+ij_groovy_keep_simple_methods_in_one_line = true
+ij_groovy_lambda_brace_style = end_of_line
+ij_groovy_line_comment_add_space = false
+ij_groovy_line_comment_at_first_column = true
+ij_groovy_method_annotation_wrap = split_into_lines
+ij_groovy_method_brace_style = end_of_line
+ij_groovy_method_call_chain_wrap = off
+ij_groovy_method_parameters_new_line_after_left_paren = false
+ij_groovy_method_parameters_right_paren_on_new_line = false
+ij_groovy_method_parameters_wrap = off
+ij_groovy_modifier_list_wrap = false
+ij_groovy_parameter_annotation_wrap = off
+ij_groovy_parentheses_expression_new_line_after_left_paren = false
+ij_groovy_parentheses_expression_right_paren_on_new_line = false
+ij_groovy_prefer_parameters_wrap = false
+ij_groovy_resource_list_new_line_after_left_paren = false
+ij_groovy_resource_list_right_paren_on_new_line = false
+ij_groovy_resource_list_wrap = off
+ij_groovy_space_after_colon = true
+ij_groovy_space_after_comma = true
+ij_groovy_space_after_comma_in_type_arguments = true
+ij_groovy_space_after_for_semicolon = true
+ij_groovy_space_after_quest = true
+ij_groovy_space_after_type_cast = true
+ij_groovy_space_before_annotation_parameter_list = false
+ij_groovy_space_before_array_initializer_left_brace = false
+ij_groovy_space_before_catch_keyword = true
+ij_groovy_space_before_catch_left_brace = true
+ij_groovy_space_before_catch_parentheses = true
+ij_groovy_space_before_class_left_brace = true
+ij_groovy_space_before_colon = true
+ij_groovy_space_before_comma = false
+ij_groovy_space_before_do_left_brace = true
+ij_groovy_space_before_else_keyword = true
+ij_groovy_space_before_else_left_brace = true
+ij_groovy_space_before_finally_keyword = true
+ij_groovy_space_before_finally_left_brace = true
+ij_groovy_space_before_for_left_brace = true
+ij_groovy_space_before_for_parentheses = true
+ij_groovy_space_before_for_semicolon = false
+ij_groovy_space_before_if_left_brace = true
+ij_groovy_space_before_if_parentheses = true
+ij_groovy_space_before_method_call_parentheses = false
+ij_groovy_space_before_method_left_brace = true
+ij_groovy_space_before_method_parentheses = false
+ij_groovy_space_before_quest = true
+ij_groovy_space_before_switch_left_brace = true
+ij_groovy_space_before_switch_parentheses = true
+ij_groovy_space_before_synchronized_left_brace = true
+ij_groovy_space_before_synchronized_parentheses = true
+ij_groovy_space_before_try_left_brace = true
+ij_groovy_space_before_try_parentheses = true
+ij_groovy_space_before_while_keyword = true
+ij_groovy_space_before_while_left_brace = true
+ij_groovy_space_before_while_parentheses = true
+ij_groovy_space_within_empty_array_initializer_braces = false
+ij_groovy_space_within_empty_method_call_parentheses = false
+ij_groovy_spaces_around_additive_operators = true
+ij_groovy_spaces_around_assignment_operators = true
+ij_groovy_spaces_around_bitwise_operators = true
+ij_groovy_spaces_around_equality_operators = true
+ij_groovy_spaces_around_lambda_arrow = true
+ij_groovy_spaces_around_logical_operators = true
+ij_groovy_spaces_around_multiplicative_operators = true
+ij_groovy_spaces_around_relational_operators = true
+ij_groovy_spaces_around_shift_operators = true
+ij_groovy_spaces_within_annotation_parentheses = false
+ij_groovy_spaces_within_array_initializer_braces = false
+ij_groovy_spaces_within_braces = true
+ij_groovy_spaces_within_brackets = false
+ij_groovy_spaces_within_cast_parentheses = false
+ij_groovy_spaces_within_catch_parentheses = false
+ij_groovy_spaces_within_for_parentheses = false
+ij_groovy_spaces_within_if_parentheses = false
+ij_groovy_spaces_within_method_call_parentheses = false
+ij_groovy_spaces_within_method_parentheses = false
+ij_groovy_spaces_within_parentheses = false
+ij_groovy_spaces_within_switch_parentheses = false
+ij_groovy_spaces_within_synchronized_parentheses = false
+ij_groovy_spaces_within_try_parentheses = false
+ij_groovy_spaces_within_while_parentheses = false
+ij_groovy_special_else_if_treatment = true
+ij_groovy_ternary_operation_wrap = off
+ij_groovy_throws_keyword_wrap = off
+ij_groovy_throws_list_wrap = off
+ij_groovy_variable_annotation_wrap = off
+ij_groovy_while_brace_force = never
+ij_groovy_while_on_new_line = false
+ij_groovy_wrap_long_lines = false
+
+[{*.html,*.sht,*.shtm,*.shtml,*.ng,*.htm}]
+ij_html_add_new_line_before_tags = body,div,p,form,h1,h2,h3
+ij_html_align_attributes = true
+ij_html_align_text = false
+ij_html_attribute_wrap = normal
+ij_html_block_comment_at_first_column = true
+ij_html_do_not_align_children_of_min_lines = 0
+ij_html_do_not_break_if_inline_tags = title,h1,h2,h3,h4,h5,h6,p
+ij_html_do_not_indent_children_of_tags = html,body,thead,tbody,tfoot
+ij_html_enforce_quotes = false
+ij_html_inline_tags = a,abbr,acronym,b,basefont,bdo,big,br,cite,cite,code,dfn,em,font,i,img,input,kbd,label,q,s,samp,select,small,span,strike,strong,sub,sup,textarea,tt,u,var
+ij_html_keep_blank_lines = 2
+ij_html_keep_indents_on_empty_lines = false
+ij_html_keep_line_breaks = true
+ij_html_keep_line_breaks_in_text = true
+ij_html_keep_whitespaces = false
+ij_html_keep_whitespaces_inside = span,pre,textarea
+ij_html_line_comment_at_first_column = true
+ij_html_new_line_after_last_attribute = never
+ij_html_new_line_before_first_attribute = never
+ij_html_quote_style = double
+ij_html_remove_new_line_before_tags = br
+ij_html_space_after_tag_name = false
+ij_html_space_around_equality_in_attribute = false
+ij_html_space_inside_empty_tag = false
+ij_html_text_wrap = normal
+
+[{*.jhm,*.xjb,*.rng,*.wsdd,*.wsdl,*.fxml,*.plan,*.bpmn,*.pom,*.xslt,*.jrxml,*.ant,*.xul,*.xsl,*.xsd,*.tld,*.jnlp,*.wadl,*.xml}]
+ij_xml_block_comment_at_first_column = true
+ij_xml_keep_indents_on_empty_lines = false
+ij_xml_line_comment_at_first_column = true
+
+[{*.vsl,*.vm,*.ft}]
+ij_vtl_keep_indents_on_empty_lines = false
+
+[{*.xjsp,*.tagf,*.tag,*.jsf,*.jsp,*.jspf}]
+ij_jsp_jsp_prefer_comma_separated_import_list = false
+ij_jsp_keep_indents_on_empty_lines = false
+
+[{*.yml,*.yaml}]
+ij_yaml_keep_indents_on_empty_lines = false
+ij_yaml_keep_line_breaks = true
+
+[{*.zsh,*.bash,*.sh}]
+ij_shell_binary_ops_start_line = false
+ij_shell_keep_column_alignment_padding = false
+ij_shell_minify_program = false
+ij_shell_redirect_followed_by_space = false
+ij_shell_switch_cases_indented = false
+
+[{.asciidoctorconfig,*.ad,*.adoc,*.asciidoc}]
+ij_asciidoc_formatting_enabled = true
+ij_asciidoc_one_sentence_per_line = true
+
+[{messages.*,spring.schemas,messages,spring.handlers,*.properties}]
+ij_properties_align_group_field_declarations = false
+
+[{rcov,rake,cucumber,rails,spec,spork,capfile,gemfile,rakefile,guardfile,isolate,vagrantfile,Puppetfile,*.thor,*.gemspec,*.rb,*.rake,*.rbw,*.ru,*.jbuilder}]
+ij_continuation_indent_size = 4
+ij_ruby_align_group_field_declarations = false
+ij_ruby_align_multiline_parameters = true
+ij_ruby_blank_lines_around_method = 1
+ij_ruby_convert_brace_block_by_enter = true
+ij_ruby_force_newlines_around_visibility_mods = true
+ij_ruby_indent_private_methods = false
+ij_ruby_indent_protected_methods = false
+ij_ruby_indent_public_methods = false
+ij_ruby_indent_when_cases = false
+ij_ruby_keep_blank_lines_in_declarations = 2
+ij_ruby_keep_indents_on_empty_lines = false
+ij_ruby_keep_line_breaks = true
+ij_ruby_parentheses_around_method_arguments = true
+ij_ruby_spaces_around_hashrocket = true
+ij_ruby_spaces_around_other_operators = true
+ij_ruby_spaces_around_range_operators = false
+ij_ruby_spaces_around_relational_operators = true
+ij_ruby_spaces_within_array_initializer_braces = true
+ij_ruby_spaces_within_braces = false
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index ea7d1c6c22d9..3be546763ac3 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -31,7 +31,7 @@ pipeline {
disableConcurrentBuilds()
}
environment {
- YETUS_RELEASE = '0.11.0'
+ YETUS_RELEASE = '0.11.1'
// where we'll write everything from different steps. Need a copy here so the final step can check for success/failure.
OUTPUT_DIR_RELATIVE_GENERAL = 'output-general'
OUTPUT_DIR_RELATIVE_JDK7 = 'output-jdk7'
diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub
index 9f8def562b71..95f3d28f12a2 100644
--- a/dev-support/Jenkinsfile_GitHub
+++ b/dev-support/Jenkinsfile_GitHub
@@ -37,7 +37,7 @@ pipeline {
DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile"
YETUS='yetus'
// Branch or tag name. Yetus release tags are 'rel/X.Y.Z'
- YETUS_VERSION='rel/0.11.0'
+ YETUS_VERSION='rel/0.11.1'
}
parameters {
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index aaec957a919c..806ac49ebcc9 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -132,6 +132,8 @@ function personality_modules
local repostatus=$1
local testtype=$2
local extra=""
+ local branch1jdk8=()
+ local jdk8module=""
local MODULES=("${CHANGED_MODULES[@]}")
yetus_info "Personality: ${repostatus} ${testtype}"
@@ -169,6 +171,21 @@ function personality_modules
return
fi
+ # This list should include any modules that require jdk8. Maven should be configured to only
+ # include them when a proper JDK is in use, but that doesn' work if we specifically ask for the
+ # module to build as yetus does if something changes in the module. Rather than try to
+ # figure out what jdk is in use so we can duplicate the module activation logic, just
+ # build at the top level if anything changes in one of these modules and let maven sort it out.
+ branch1jdk8=(hbase-error-prone hbase-tinylfu-blockcache)
+ if [[ "${PATCH_BRANCH}" = branch-1* ]]; then
+ for jdk8module in "${branch1jdk8[@]}"; do
+ if [[ "${MODULES[*]}" =~ ${jdk8module} ]]; then
+ MODULES=(.)
+ break
+ fi
+ done
+ fi
+
if [[ ${testtype} == findbugs ]]; then
# Run findbugs on each module individually to diff pre-patch and post-patch results and
# report new warnings for changed modules only.
@@ -189,7 +206,8 @@ function personality_modules
return
fi
- if [[ ${testtype} == compile ]] && [[ "${SKIP_ERRORPRONE}" != "true" ]]; then
+ if [[ ${testtype} == compile ]] && [[ "${SKIP_ERRORPRONE}" != "true" ]] &&
+ [[ "${PATCH_BRANCH}" != branch-1* ]] ; then
extra="${extra} -PerrorProne"
fi
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index 02289d1a3e2b..e8767dfa99fc 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml b/hbase-archetypes/hbase-archetype-builder/pom.xml
index d34f0816dfa7..9992a618b06c 100644
--- a/hbase-archetypes/hbase-archetype-builder/pom.xml
+++ b/hbase-archetypes/hbase-archetype-builder/pom.xml
@@ -25,7 +25,7 @@
hbase-archetypes
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-archetypes/hbase-client-project/pom.xml b/hbase-archetypes/hbase-client-project/pom.xml
index 22d5d4f5d2f9..cbe3a1a11a89 100644
--- a/hbase-archetypes/hbase-client-project/pom.xml
+++ b/hbase-archetypes/hbase-client-project/pom.xml
@@ -26,7 +26,7 @@
hbase-archetypes
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
hbase-client-project
diff --git a/hbase-archetypes/hbase-shaded-client-project/pom.xml b/hbase-archetypes/hbase-shaded-client-project/pom.xml
index 91fe85a04c59..74d7204449b5 100644
--- a/hbase-archetypes/hbase-shaded-client-project/pom.xml
+++ b/hbase-archetypes/hbase-shaded-client-project/pom.xml
@@ -26,7 +26,7 @@
hbase-archetypes
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
hbase-shaded-client-project
diff --git a/hbase-archetypes/pom.xml b/hbase-archetypes/pom.xml
index 069a8be5ce87..2d6be10d6dca 100644
--- a/hbase-archetypes/pom.xml
+++ b/hbase-archetypes/pom.xml
@@ -24,7 +24,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index d1863e063ab5..5fe3d6deefac 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
hbase-assembly
diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml
index 332572b7f961..764f559b76c3 100644
--- a/hbase-checkstyle/pom.xml
+++ b/hbase-checkstyle/pom.xml
@@ -24,14 +24,14 @@
4.0.0
org.apache.hbase
hbase-checkstyle
-1.5.1-SNAPSHOT
+1.6.0-SNAPSHOT
Apache HBase - Checkstyle
Module to hold Checkstyle properties for HBase.
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 38e7ec438da3..455a5e76ad0b 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -24,7 +24,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 4c571e486599..0c2247349a24 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -323,35 +323,60 @@ public AsyncProcess(ClusterConnection hc, Configuration conf, ExecutorService po
this.id = COUNTER.incrementAndGet();
- this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
- HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
- long configuredPauseForCQTBE = conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause);
- if (configuredPauseForCQTBE < pause) {
- LOG.warn("The " + HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: "
- + configuredPauseForCQTBE + " is smaller than " + HConstants.HBASE_CLIENT_PAUSE
- + ", will use " + pause + " instead.");
- this.pauseForCQTBE = pause;
- } else {
- this.pauseForCQTBE = configuredPauseForCQTBE;
- }
- this.numTries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
- HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+ ConnectionConfiguration connConf =
+ hc.getConfiguration() == conf
+ ? hc.getConnectionConfiguration()
+ // Slow: parse conf in ConnectionConfiguration constructor
+ : new ConnectionConfiguration(conf);
+ if (connConf == null) {
+ // Slow: parse conf in ConnectionConfiguration constructor
+ connConf = new ConnectionConfiguration(conf);
+ }
+
+ this.pause = connConf.getPause();
+ this.pauseForCQTBE = connConf.getPauseForCQTBE();
+
+ this.numTries = connConf.getRetriesNumber();
this.rpcTimeout = rpcTimeout;
- this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
- HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
- this.primaryCallTimeoutMicroseconds = conf.getInt(PRIMARY_CALL_TIMEOUT_KEY, 10000);
-
- this.maxTotalConcurrentTasks = conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
- HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS);
- this.maxConcurrentTasksPerServer = conf.getInt(HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS,
- HConstants.DEFAULT_HBASE_CLIENT_MAX_PERSERVER_TASKS);
- this.maxConcurrentTasksPerRegion = conf.getInt(HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS,
- HConstants.DEFAULT_HBASE_CLIENT_MAX_PERREGION_TASKS);
- this.maxHeapSizePerRequest = conf.getLong(HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE,
- DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE);
- this.maxHeapSizeSubmit = conf.getLong(HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE, DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE);
+ this.operationTimeout = connConf.getOperationTimeout();
+
+ // Parse config once and reuse config values of hc's AsyncProcess in AsyncProcess for put
+ // Can be null when constructing hc's AsyncProcess or it's not reusable
+ AsyncProcess globalAsyncProcess = hc.getConfiguration() == conf ? hc.getAsyncProcess() : null;
+
+ this.primaryCallTimeoutMicroseconds =
+ globalAsyncProcess == null
+ ? conf.getInt(PRIMARY_CALL_TIMEOUT_KEY, 10000)
+ : globalAsyncProcess.primaryCallTimeoutMicroseconds;
+
+ this.maxTotalConcurrentTasks =
+ globalAsyncProcess == null
+ ? conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
+ HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)
+ : globalAsyncProcess.maxTotalConcurrentTasks;
+ this.maxConcurrentTasksPerServer =
+ globalAsyncProcess == null
+ ? conf.getInt(HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS,
+ HConstants.DEFAULT_HBASE_CLIENT_MAX_PERSERVER_TASKS)
+ : globalAsyncProcess.maxConcurrentTasksPerServer;
+ this.maxConcurrentTasksPerRegion =
+ globalAsyncProcess == null
+ ? conf.getInt(HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS,
+ HConstants.DEFAULT_HBASE_CLIENT_MAX_PERREGION_TASKS)
+ : globalAsyncProcess.maxConcurrentTasksPerRegion;
+ this.maxHeapSizePerRequest =
+ globalAsyncProcess == null
+ ? conf.getLong(HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE,
+ DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE)
+ : globalAsyncProcess.maxHeapSizePerRequest;
+ this.maxHeapSizeSubmit =
+ globalAsyncProcess == null
+ ? conf.getLong(HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE, DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE)
+ : globalAsyncProcess.maxHeapSizeSubmit;
this.startLogErrorsCnt =
- conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT);
+ globalAsyncProcess == null
+ ? conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT)
+ : globalAsyncProcess.startLogErrorsCnt;
if (this.maxTotalConcurrentTasks <= 0) {
throw new IllegalArgumentException("maxTotalConcurrentTasks=" + maxTotalConcurrentTasks);
@@ -387,11 +412,16 @@ public AsyncProcess(ClusterConnection hc, Configuration conf, ExecutorService po
this.rpcCallerFactory = rpcCaller;
this.rpcFactory = rpcFactory;
- this.logBatchErrorDetails = conf.getBoolean(LOG_DETAILS_FOR_BATCH_ERROR, false);
+ this.logBatchErrorDetails =
+ globalAsyncProcess == null
+ ? conf.getBoolean(LOG_DETAILS_FOR_BATCH_ERROR, false)
+ : globalAsyncProcess.logBatchErrorDetails;
this.thresholdToLogUndoneTaskDetails =
- conf.getInt(THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS,
- DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS);
+ globalAsyncProcess == null
+ ? conf.getInt(THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS,
+ DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS)
+ : globalAsyncProcess.thresholdToLogUndoneTaskDetails;
}
public void setRpcTimeout(int rpcTimeout) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index e33bd7ce369f..dafc66fd03a1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -34,7 +34,6 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants; // Needed for write rpc timeout
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
@@ -112,32 +111,32 @@ public class BufferedMutatorImpl implements BufferedMutator {
this.pool = params.getPool();
this.listener = params.getListener();
- ConnectionConfiguration tableConf = new ConnectionConfiguration(conf);
+ ConnectionConfiguration connConf = conn.getConnectionConfiguration();
+ if (connConf == null) {
+ // Slow: parse conf in ConnectionConfiguration constructor
+ connConf = new ConnectionConfiguration(conf);
+ }
this.writeBufferSize = params.getWriteBufferSize() != BufferedMutatorParams.UNSET ?
- params.getWriteBufferSize() : tableConf.getWriteBufferSize();
+ params.getWriteBufferSize() : connConf.getWriteBufferSize();
// Set via the setter because it does value validation and starts/stops the TimerTask
long newWriteBufferPeriodicFlushTimeoutMs =
params.getWriteBufferPeriodicFlushTimeoutMs() != UNSET
? params.getWriteBufferPeriodicFlushTimeoutMs()
- : tableConf.getWriteBufferPeriodicFlushTimeoutMs();
+ : connConf.getWriteBufferPeriodicFlushTimeoutMs();
long newWriteBufferPeriodicFlushTimerTickMs =
params.getWriteBufferPeriodicFlushTimerTickMs() != UNSET
? params.getWriteBufferPeriodicFlushTimerTickMs()
- : tableConf.getWriteBufferPeriodicFlushTimerTickMs();
+ : connConf.getWriteBufferPeriodicFlushTimerTickMs();
this.setWriteBufferPeriodicFlush(
newWriteBufferPeriodicFlushTimeoutMs,
newWriteBufferPeriodicFlushTimerTickMs);
this.maxKeyValueSize = params.getMaxKeyValueSize() != BufferedMutatorParams.UNSET ?
- params.getMaxKeyValueSize() : tableConf.getMaxKeyValueSize();
-
- this.writeRpcTimeout = conn.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
- conn.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
- HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
- this.operationTimeout = conn.getConfiguration().getInt(
- HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
- HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+ params.getMaxKeyValueSize() : connConf.getMaxKeyValueSize();
+
+ this.writeRpcTimeout = connConf.getWriteRpcTimeout();
+ this.operationTimeout = connConf.getOperationTimeout();
// puts need to track errors globally due to how the APIs currently work.
ap = new AsyncProcess(connection, conf, pool, rpcCallerFactory, true, rpcFactory, writeRpcTimeout);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
index 0e5164447e28..1189802edf02 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
@@ -12,6 +12,8 @@
package org.apache.hadoop.hbase.client;
import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -26,6 +28,7 @@
*/
@InterfaceAudience.Private
public class ConnectionConfiguration {
+ static final Log LOG = LogFactory.getLog(ConnectionConfiguration.class);
public static final String WRITE_BUFFER_SIZE_KEY = "hbase.client.write.buffer";
public static final long WRITE_BUFFER_SIZE_DEFAULT = 2097152;
@@ -50,6 +53,10 @@ public class ConnectionConfiguration {
private final int metaReplicaCallTimeoutMicroSecondScan;
private final int retries;
private final int maxKeyValueSize;
+ private final int readRpcTimeout;
+ private final int writeRpcTimeout;
+ private final long pause;
+ private final long pauseForCQTBE;
/**
* Constructor
@@ -90,9 +97,28 @@ public class ConnectionConfiguration {
HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT);
this.retries = conf.getInt(
- HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+ HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, MAX_KEYVALUE_SIZE_DEFAULT);
+
+ this.readRpcTimeout = conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY,
+ conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
+ HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
+
+ this.writeRpcTimeout = conf.getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
+ conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
+ HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
+
+ this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
+ long configuredPauseForCQTBE = conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause);
+ if (configuredPauseForCQTBE < pause) {
+ LOG.warn("The " + HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: "
+ + configuredPauseForCQTBE + " is smaller than " + HConstants.HBASE_CLIENT_PAUSE
+ + ", will use " + pause + " instead.");
+ this.pauseForCQTBE = pause;
+ } else {
+ this.pauseForCQTBE = configuredPauseForCQTBE;
+ }
}
/**
@@ -115,6 +141,10 @@ protected ConnectionConfiguration() {
HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT;
this.retries = HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER;
this.maxKeyValueSize = MAX_KEYVALUE_SIZE_DEFAULT;
+ this.readRpcTimeout = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+ this.writeRpcTimeout = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+ this.pause = HConstants.DEFAULT_HBASE_CLIENT_PAUSE;
+ this.pauseForCQTBE = HConstants.DEFAULT_HBASE_CLIENT_PAUSE;
}
public long getWriteBufferSize() {
@@ -164,4 +194,20 @@ public int getMaxKeyValueSize() {
public long getScannerMaxResultSize() {
return scannerMaxResultSize;
}
+
+ public int getReadRpcTimeout() {
+ return readRpcTimeout;
+ }
+
+ public int getWriteRpcTimeout() {
+ return writeRpcTimeout;
+ }
+
+ public long getPause() {
+ return pause;
+ }
+
+ public long getPauseForCQTBE() {
+ return pauseForCQTBE;
+ }
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 7d9af647feb6..e8498c4c597b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -669,17 +669,8 @@ static class HConnectionImplementation implements ClusterConnection, Closeable {
this.managed = managed;
this.connectionConfig = new ConnectionConfiguration(conf);
this.closed = false;
- this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
- HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
- long configuredPauseForCQTBE = conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause);
- if (configuredPauseForCQTBE < pause) {
- LOG.warn("The " + HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: "
- + configuredPauseForCQTBE + " is smaller than " + HConstants.HBASE_CLIENT_PAUSE
- + ", will use " + pause + " instead.");
- this.pauseForCQTBE = pause;
- } else {
- this.pauseForCQTBE = configuredPauseForCQTBE;
- }
+ this.pause = connectionConfig.getPause();
+ this.pauseForCQTBE = connectionConfig.getPauseForCQTBE();
this.useMetaReplicas = conf.getBoolean(HConstants.USE_META_REPLICAS,
HConstants.DEFAULT_USE_META_REPLICAS);
this.metaReplicaCallTimeoutScanInMicroSecond =
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 91a8f922bfb1..cbb7b01bcee7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -366,12 +366,8 @@ private void finishSetup() throws IOException {
}
this.operationTimeout = tableName.isSystemTable() ?
connConfiguration.getMetaOperationTimeout() : connConfiguration.getOperationTimeout();
- this.readRpcTimeout = configuration.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY,
- configuration.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
- HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
- this.writeRpcTimeout = configuration.getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
- configuration.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
- HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
+ this.readRpcTimeout = connConfiguration.getReadRpcTimeout();
+ this.writeRpcTimeout = connConfiguration.getWriteRpcTimeout();
this.scannerCaching = connConfiguration.getScannerCaching();
this.scannerMaxResultSize = connConfiguration.getScannerMaxResultSize();
if (this.rpcCallerFactory == null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
index 10c20d7aa24b..50cfe1cae877 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
@@ -94,7 +94,12 @@ public ScannerCallableWithReplicas(TableName tableName, ClusterConnection cConne
}
public void setClose() {
- currentScannerCallable.setClose();
+ if(currentScannerCallable != null) {
+ currentScannerCallable.setClose();
+ } else {
+ LOG.warn("Calling close on ScannerCallable reference that is already null, "
+ + "which shouldn't happen.");
+ }
}
public void setRenew(boolean val) {
@@ -136,6 +141,10 @@ public MoreResults moreResultsForScan() {
Result[] r = currentScannerCallable.call(timeout);
currentScannerCallable = null;
return r;
+ } else if(currentScannerCallable == null) {
+ LOG.warn("Another call received, but our ScannerCallable is already null. "
+ + "This shouldn't happen, but there's not much to do, so logging and returning null.");
+ return null;
}
// We need to do the following:
//1. When a scan goes out to a certain replica (default or not), we need to
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java
index 297e96e74979..b2e1e1eeb15f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java
@@ -868,7 +868,15 @@ public static int transitionNode(ZooKeeperWatcher zkw, HRegionInfo region,
try {
rt = RegionTransition.createRegionTransition(
endState, region.getRegionName(), serverName, payload);
- if(!ZKUtil.setData(zkw, node, rt.toByteArray(), stat.getVersion())) {
+ boolean isDataSet;
+ try {
+ isDataSet = ZKUtil.setData(zkw, node, rt.toByteArray(), stat.getVersion());
+ } catch (KeeperException.BadVersionException e) {
+ isDataSet = false;
+ LOG.error("Received BadVersionException from ZK for " + encoded
+ + ", version: " + stat.getVersion());
+ }
+ if (!isDataSet) {
LOG.warn(zkw.prefix("Attempt to transition the " +
"unassigned node for " + encoded +
" from " + beginState + " to " + endState + " failed, " +
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 1795399ad863..b76f8ceaa13e 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
index 49ac9070fd60..249a7a25b14d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
@@ -152,9 +152,10 @@ public synchronized boolean scheduleChore(ScheduledChore chore) {
try {
if (chore.getPeriod() <= 0) {
- LOG.info("The period is " + chore.getPeriod() + " seconds, " + chore.getName() + " is disabled");
+ LOG.info("Chore " + chore + " is disabled because its period is not positive.");
return false;
}
+ LOG.info("Chore " + chore + " is enabled.");
chore.setChoreServicer(this);
ScheduledFuture> future =
scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), chore.getPeriod(),
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 59d4fb3ae39c..4607de986a06 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -175,6 +175,11 @@ public enum OperationStatusCode {
/** Configuration key for master web API port */
public static final String MASTER_INFO_PORT = "hbase.master.info.port";
+ /** Configuration key for the list of master host:ports **/
+ public static final String MASTER_ADDRS_KEY = "hbase.master.addrs";
+
+ public static final String MASTER_ADDRS_DEFAULT = "localhost:" + DEFAULT_MASTER_PORT;
+
/** Parameter name for the master type being backup (waits for primary to go inactive). */
public static final String MASTER_TYPE_BACKUP = "hbase.master.backup";
@@ -1348,6 +1353,11 @@ public static enum Modify {
// default -1 indicates there is no threshold on high storeRefCount
public static final int DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD = -1;
+ public static final String REGIONS_RECOVERY_INTERVAL =
+ "hbase.master.regions.recovery.check.interval";
+
+ public static final int DEFAULT_REGIONS_RECOVERY_INTERVAL = 1200 * 1000; // Default 20 min
+
/**
* Configurations for master executor services.
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
index 716e1b097719..91644918859a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
@@ -57,6 +57,8 @@ public class HFileContext implements HeapSize, Cloneable {
private Encryption.Context cryptoContext = Encryption.Context.NONE;
private long fileCreateTime;
private String hfileName;
+ private byte[] columnFamily;
+ private byte[] tableName;
//Empty constructor. Go with setters
public HFileContext() {
@@ -79,12 +81,15 @@ public HFileContext(HFileContext context) {
this.cryptoContext = context.cryptoContext;
this.fileCreateTime = context.fileCreateTime;
this.hfileName = context.hfileName;
+ this.columnFamily = context.columnFamily;
+ this.tableName = context.tableName;
}
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
- Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
- int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
- Encryption.Context cryptoContext, long fileCreateTime, String hfileName) {
+ Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
+ int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
+ Encryption.Context cryptoContext, long fileCreateTime, String hfileName,
+ byte[] columnFamily, byte[] tableName) {
this.usesHBaseChecksum = useHBaseChecksum;
this.includesMvcc = includesMvcc;
this.includesTags = includesTags;
@@ -99,6 +104,8 @@ public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean incl
this.cryptoContext = cryptoContext;
this.fileCreateTime = fileCreateTime;
this.hfileName = hfileName;
+ this.columnFamily = columnFamily;
+ this.tableName = tableName;
}
/**
@@ -194,6 +201,14 @@ public String getHFileName() {
return this.hfileName;
}
+ public byte[] getColumnFamily() {
+ return this.columnFamily;
+ }
+
+ public byte[] getTableName() {
+ return this.tableName;
+ }
+
/**
* HeapSize implementation
* NOTE : The heapsize should be altered as and when new state variable are added
@@ -207,7 +222,15 @@ public long heapSize() {
2 * Bytes.SIZEOF_INT +
// usesHBaseChecksum, includesMvcc, includesTags and compressTags
4 * Bytes.SIZEOF_BOOLEAN +
+ //column family, table name byte arrays
+ 2 * ClassSize.ARRAY + 2 * ClassSize.REFERENCE +
Bytes.SIZEOF_LONG);
+ if (this.columnFamily != null){
+ size += ClassSize.sizeOf(this.columnFamily, this.columnFamily.length);
+ }
+ if (this.tableName != null){
+ size += ClassSize.sizeOf(this.tableName, this.tableName.length);
+ }
return size;
}
@@ -233,9 +256,17 @@ public String toString() {
sb.append(" includesTags="); sb.append(includesTags);
sb.append(" compressAlgo="); sb.append(compressAlgo);
sb.append(" compressTags="); sb.append(compressTags);
- sb.append(" cryptoContext=[ "); sb.append(cryptoContext); sb.append(" ]");
+ sb.append(" cryptoContext=[ "); sb.append(cryptoContext);
+ sb.append(" ]");
+ if (tableName != null) {
+ sb.append(", tableName=");
+ sb.append(Bytes.toString(tableName));
+ }
+ if (columnFamily != null) {
+ sb.append(", columnFamily=");
+ sb.append(Bytes.toString(columnFamily));
+ }
sb.append(" ]");
return sb.toString();
}
-
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
index d620d553ae5e..10ee69ff0dc7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
@@ -54,6 +54,8 @@ public class HFileContextBuilder {
private long fileCreateTime = 0;
private String hfileName = null;
+ private byte[] columnFamily = null;
+ private byte[] tableName = null;
public HFileContextBuilder() {}
@@ -73,6 +75,8 @@ public HFileContextBuilder(final HFileContext hfc) {
this.cryptoContext = hfc.getEncryptionContext();
this.fileCreateTime = hfc.getFileCreateTime();
this.hfileName = hfc.getHFileName();
+ this.columnFamily = hfc.getColumnFamily();
+ this.tableName = hfc.getTableName();
}
public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
@@ -135,9 +139,19 @@ public HFileContextBuilder withHFileName(String name) {
return this;
}
+ public HFileContextBuilder withColumnFamily(byte[] columnFamily){
+ this.columnFamily = columnFamily;
+ return this;
+ }
+
+ public HFileContextBuilder withTableName(byte[] tableName){
+ this.tableName = tableName;
+ return this;
+ }
+
public HFileContext build() {
return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression,
compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext,
- fileCreateTime, hfileName);
+ fileCreateTime, hfileName, columnFamily, tableName);
}
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DynamicClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DynamicClassLoader.java
index 07ca3485c254..7f9e08c3710e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DynamicClassLoader.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DynamicClassLoader.java
@@ -69,7 +69,10 @@ public class DynamicClassLoader extends ClassLoaderBase {
private static final String DYNAMIC_JARS_OPTIONAL_CONF_KEY = "hbase.use.dynamic.jars";
private static final boolean DYNAMIC_JARS_OPTIONAL_DEFAULT = true;
- private boolean useDynamicJars;
+ // The user-provided value for using the DynamicClassLoader
+ private final boolean userConfigUseDynamicJars;
+ // The current state of whether to use the DynamicClassLoader
+ private final boolean useDynamicJars;
private File localDir;
@@ -91,12 +94,23 @@ public DynamicClassLoader(
final Configuration conf, final ClassLoader parent) {
super(parent);
- useDynamicJars = conf.getBoolean(
+ // Save off the user's original configuration value for the DynamicClassLoader
+ userConfigUseDynamicJars = conf.getBoolean(
DYNAMIC_JARS_OPTIONAL_CONF_KEY, DYNAMIC_JARS_OPTIONAL_DEFAULT);
- if (useDynamicJars) {
- initTempDir(conf);
+ boolean dynamicJarsEnabled = userConfigUseDynamicJars;
+ if (dynamicJarsEnabled) {
+ try {
+ initTempDir(conf);
+ dynamicJarsEnabled = true;
+ } catch (Exception e) {
+ LOG.error("Disabling the DynamicClassLoader as it failed to initialize its temp directory."
+ + " Check your configuration and filesystem permissions. Custom coprocessor code may"
+ + " not be loaded as a result of this failure.", e);
+ dynamicJarsEnabled = false;
+ }
}
+ useDynamicJars = dynamicJarsEnabled;
}
// FindBugs: Making synchronized to avoid IS2_INCONSISTENT_SYNC complaints about
@@ -132,12 +146,13 @@ public Class> loadClass(String name)
try {
return parent.loadClass(name);
} catch (ClassNotFoundException e) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Class " + name + " not found - using dynamical class loader");
- }
-
if (useDynamicJars) {
+ LOG.debug("Class " + name + " not found - using dynamical class loader");
return tryRefreshClass(name);
+ } else if (userConfigUseDynamicJars) {
+ // If the user tried to enable the DCL, then warn again.
+ LOG.debug("Not checking DynamicClassLoader for missing class because it is disabled."
+ + " See the log for previous errors.");
}
throw e;
}
diff --git a/hbase-error-prone/pom.xml b/hbase-error-prone/pom.xml
index c3681dfd1ddf..3302cafcaa2c 100644
--- a/hbase-error-prone/pom.xml
+++ b/hbase-error-prone/pom.xml
@@ -23,11 +23,11 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
hbase-error-prone
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
Apache HBase - Error Prone Rules
Module to hold error prone custom rules for HBase.
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index 7720ab584f28..02eef28b4390 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
hbase-examples
diff --git a/hbase-external-blockcache/pom.xml b/hbase-external-blockcache/pom.xml
index 2ae2dbde7914..2fa9a20b32db 100644
--- a/hbase-external-blockcache/pom.xml
+++ b/hbase-external-blockcache/pom.xml
@@ -25,7 +25,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
hbase-external-blockcache
diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml
index d606ab981e3c..01055a77e79a 100644
--- a/hbase-hadoop-compat/pom.xml
+++ b/hbase-hadoop-compat/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml
index 0d01ff9a050a..0b5547ae6c1c 100644
--- a/hbase-hadoop2-compat/pom.xml
+++ b/hbase-hadoop2-compat/pom.xml
@@ -21,7 +21,7 @@ limitations under the License.
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
index 6a2f2039fc7a..bfa24dc4ef42 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
@@ -42,10 +42,6 @@ public MutableHistogram(MetricsInfo info) {
}
public MutableHistogram(String name, String description) {
- this(name, description, Integer.MAX_VALUE << 2);
- }
-
- protected MutableHistogram(String name, String description, long maxExpected) {
this.name = StringUtils.capitalize(name);
this.desc = StringUtils.uncapitalize(description);
this.histogram = new HistogramImpl();
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java
index e15d0a86fd03..bd04b02c761a 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java
@@ -35,11 +35,7 @@ public MutableRangeHistogram(MetricsInfo info) {
}
public MutableRangeHistogram(String name, String description) {
- this(name, description, Integer.MAX_VALUE << 2);
- }
-
- public MutableRangeHistogram(String name, String description, long expectedMax) {
- super(name, description, expectedMax);
+ super(name, description);
}
/**
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java
index 38e78a2324e2..67e27c773e41 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java
@@ -26,6 +26,7 @@
*/
@InterfaceAudience.Private
public class MutableSizeHistogram extends MutableRangeHistogram {
+
private final static String RANGE_TYPE = "SizeRangeCount";
private final static long[] RANGES = {10,100,1000,10000,100000,1000000,10000000,100000000};
@@ -34,11 +35,7 @@ public MutableSizeHistogram(MetricsInfo info) {
}
public MutableSizeHistogram(String name, String description) {
- this(name, description, RANGES[RANGES.length-2]);
- }
-
- public MutableSizeHistogram(String name, String description, long expectedMax) {
- super(name, description, expectedMax);
+ super(name, description);
}
@Override
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
index aaf4359f18cb..7847202523a4 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
@@ -35,11 +35,7 @@ public MutableTimeHistogram(MetricsInfo info) {
}
public MutableTimeHistogram(String name, String description) {
- this(name, description, RANGES[RANGES.length - 2]);
- }
-
- public MutableTimeHistogram(String name, String description, long expectedMax) {
- super(name, description, expectedMax);
+ super(name, description);
}
@Override
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java
index 3155e66fc985..aa8c3d75ee68 100644
--- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java
@@ -47,10 +47,8 @@ public void testCompareToHashCodeEquals() throws Exception {
assertTrue(one.compareTo(two) != 0);
assertTrue(two.compareTo(one) != 0);
assertTrue(two.compareTo(one) != one.compareTo(two));
- assertTrue(two.compareTo(two) == 0);
}
-
@Test (expected = RuntimeException.class)
public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
// This should throw an exception because MetricsUserSourceImpl should only
diff --git a/hbase-hbtop/pom.xml b/hbase-hbtop/pom.xml
index a1cf67e16a2f..ad0aa370abd1 100644
--- a/hbase-hbtop/pom.xml
+++ b/hbase-hbtop/pom.xml
@@ -25,7 +25,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
hbase-hbtop
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index fc515cf7c6db..2e2e25dc7581 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
index 4d34a6e0a631..a751b92b2f36 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
@@ -27,8 +27,6 @@
import java.util.Set;
import org.apache.commons.lang.math.RandomUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseCluster;
@@ -39,6 +37,8 @@
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A (possibly mischievous) action that the ChaosMonkey can perform.
@@ -64,7 +64,7 @@ public class Action {
public static final String START_NAMENODE_TIMEOUT_KEY =
"hbase.chaosmonkey.action.startnamenodetimeout";
- protected static final Log LOG = LogFactory.getLog(Action.class);
+ private static final Logger LOG = LoggerFactory.getLogger(Action.class);
protected static final long KILL_MASTER_TIMEOUT_DEFAULT = PolicyBasedChaosMonkey.TIMEOUT;
protected static final long START_MASTER_TIMEOUT_DEFAULT = PolicyBasedChaosMonkey.TIMEOUT;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java
index 27268a4632a2..08eef68727e9 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java
@@ -25,6 +25,8 @@
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action the adds a column family to a table.
@@ -32,6 +34,7 @@
public class AddColumnAction extends Action {
private final TableName tableName;
private Admin admin;
+ private static final Logger LOG = LoggerFactory.getLogger(AddColumnAction.class);
public AddColumnAction(TableName tableName) {
this.tableName = tableName;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java
index ce660006b273..129721be72fc 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java
@@ -24,12 +24,16 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Restarts a ratio of the running regionservers at the same time
*/
public class BatchRestartRsAction extends RestartActionBaseAction {
float ratio; //ratio of regionservers to restart
+ private static final Logger LOG =
+ LoggerFactory.getLogger(BatchRestartRsAction.class);
public BatchRestartRsAction(long sleepTime, float ratio) {
super(sleepTime);
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java
index 684cd629863c..907c3f963450 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java
@@ -26,6 +26,8 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to adjust the bloom filter setting on all the columns of a
@@ -34,6 +36,7 @@
public class ChangeBloomFilterAction extends Action {
private final long sleepTime;
private final TableName tableName;
+ private static final Logger LOG = LoggerFactory.getLogger(ChangeBloomFilterAction.class);
public ChangeBloomFilterAction(TableName tableName) {
this(-1, tableName);
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java
index 9c7bf45e3abe..9a2a5f01e451 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java
@@ -28,6 +28,9 @@
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.io.compress.Compressor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
/**
* Action that changes the compression algorithm on a column family from a list of tables.
*/
@@ -36,6 +39,7 @@ public class ChangeCompressionAction extends Action {
private Admin admin;
private Random random;
+ private static final Logger LOG = LoggerFactory.getLogger(ChangeCompressionAction.class);
public ChangeCompressionAction(TableName tableName) {
this.tableName = tableName;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java
index c4553f155240..e678afe1d463 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java
@@ -26,6 +26,8 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that changes the encoding on a column family from a list of tables.
@@ -35,6 +37,7 @@ public class ChangeEncodingAction extends Action {
private Admin admin;
private Random random;
+ private static final Logger LOG = LoggerFactory.getLogger(ChangeEncodingAction.class);
public ChangeEncodingAction(TableName tableName) {
this.tableName = tableName;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.java
index b5f759f3bcb7..dc1cfee5c559 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.chaos.actions;
+import java.util.Random;
+
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
@@ -24,10 +26,11 @@
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy;
-
-import java.util.Random;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class ChangeSplitPolicyAction extends Action {
+ private static final Logger LOG = LoggerFactory.getLogger(ChangeSplitPolicyAction.class);
private final TableName tableName;
private final String[] possiblePolicies;
private final Random random;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java
index 76e152f19f13..7e2332c89677 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java
@@ -25,6 +25,8 @@
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that changes the number of versions on a column family from a list of tables.
@@ -33,6 +35,7 @@
*/
public class ChangeVersionsAction extends Action {
private final TableName tableName;
+ private static final Logger LOG = LoggerFactory.getLogger(ChangeVersionsAction.class);
private Admin admin;
private Random random;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactRandomRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactRandomRegionOfTableAction.java
index 114b511ee865..6003240c48f6 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactRandomRegionOfTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactRandomRegionOfTableAction.java
@@ -26,6 +26,8 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Region that queues a compaction of a random region from the table.
@@ -34,6 +36,8 @@ public class CompactRandomRegionOfTableAction extends Action {
private final int majorRatio;
private final long sleepTime;
private final TableName tableName;
+ private static final Logger LOG =
+ LoggerFactory.getLogger(CompactRandomRegionOfTableAction.class);
public CompactRandomRegionOfTableAction(
TableName tableName, float majorRatio) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java
index 796cc43a0876..2f5436a4217c 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java
@@ -22,6 +22,8 @@
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that queues a table compaction.
@@ -30,6 +32,7 @@ public class CompactTableAction extends Action {
private final TableName tableName;
private final int majorRatio;
private final long sleepTime;
+ private static final Logger LOG = LoggerFactory.getLogger(CompactTableAction.class);
public CompactTableAction(TableName tableName, float majorRatio) {
this(-1, tableName, majorRatio);
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DumpClusterStatusAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DumpClusterStatusAction.java
index 0403fe047f92..11246ea4a279 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DumpClusterStatusAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DumpClusterStatusAction.java
@@ -20,10 +20,15 @@
import java.io.IOException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
/**
* Action to dump the cluster status.
*/
public class DumpClusterStatusAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(DumpClusterStatusAction.class);
@Override
public void init(ActionContext context) throws IOException {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushRandomRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushRandomRegionOfTableAction.java
index c919789f9fb2..c4286dbf1f9e 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushRandomRegionOfTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushRandomRegionOfTableAction.java
@@ -25,16 +25,20 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to flush a random region of a table.
*/
public class FlushRandomRegionOfTableAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(FlushRandomRegionOfTableAction.class);
private final long sleepTime;
private final TableName tableName;
public FlushRandomRegionOfTableAction(TableName tableName) {
- this (-1, tableName);
+ this (-1, tableName);
}
public FlushRandomRegionOfTableAction(int sleepTime, TableName tableName) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java
index ddce57e8b53b..994fd057a5f1 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java
@@ -21,11 +21,15 @@
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to flush a table.
*/
public class FlushTableAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(FlushTableAction.class);
private final long sleepTime;
private final TableName tableName;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java
index 04f389a9519e..d75475432a12 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java
@@ -18,10 +18,16 @@
package org.apache.hadoop.hbase.chaos.actions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
/**
* Action that tries to force a balancer run.
*/
public class ForceBalancerAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ForceBalancerAction.class);
+
@Override
public void perform() throws Exception {
// Don't try the flush if we're stopping
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java
index 8645dc4c9407..eac7d30100a9 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java
@@ -25,11 +25,15 @@
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action to merge regions of a table.
*/
public class MergeRandomAdjacentRegionsOfTableAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MergeRandomAdjacentRegionsOfTableAction.class);
private final TableName tableName;
private final long sleepTime;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java
index 96cd0086456b..52816f323d41 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java
@@ -25,11 +25,15 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to move a random region of a table.
*/
public class MoveRandomRegionOfTableAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MoveRandomRegionOfTableAction.class);
private final long sleepTime;
private final TableName tableName;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
index d5f0e9652bf4..e38309b4d36c 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
@@ -29,11 +29,15 @@
import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to move every region of a table.
*/
public class MoveRegionsOfTableAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MoveRegionsOfTableAction.class);
private final long sleepTime;
private final TableName tableName;
private final long maxTime;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
index 20bdaa3c1d4b..e5ca3e857d2a 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
@@ -27,11 +27,15 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that removes a column family.
*/
public class RemoveColumnAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RemoveColumnAction.class);
private final TableName tableName;
private final Set protectedColumns;
private Admin admin;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java
index 22d7e2618e3d..8376f51d81a9 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java
@@ -22,11 +22,15 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.util.Threads;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Base class for restarting HBaseServer's
*/
public class RestartActionBaseAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RestartActionBaseAction.class);
long sleepTime; // how long should we sleep
public RestartActionBaseAction(long sleepTime) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveMasterAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveMasterAction.java
index a9bc23ab7542..ab7decd3f1c8 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveMasterAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveMasterAction.java
@@ -19,11 +19,15 @@
package org.apache.hadoop.hbase.chaos.actions;
import org.apache.hadoop.hbase.ServerName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to restart the active master.
*/
public class RestartActiveMasterAction extends RestartActionBaseAction {
+ private static final Logger LOG = LoggerFactory.getLogger(RestartActionBaseAction.class);
+
public RestartActiveMasterAction(long sleepTime) {
super(sleepTime);
}
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveNameNodeAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveNameNodeAction.java
index 710ac1477862..d9cbfbddc8ae 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveNameNodeAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveNameNodeAction.java
@@ -29,11 +29,15 @@
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to restart the active namenode.
*/
public class RestartActiveNameNodeAction extends RestartActionBaseAction {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RestartActiveNameNodeAction.class);
// Value taken from org.apache.hadoop.ha.ActiveStandbyElector.java, variable :- LOCK_FILENAME
private static final String ACTIVE_NN_LOCK_NAME = "ActiveStandbyElectorLock";
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java
index 7299e79ed16b..09e2990db613 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java
@@ -18,6 +18,10 @@
package org.apache.hadoop.hbase.chaos.actions;
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -25,15 +29,15 @@
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that restarts a random datanode.
*/
public class RestartRandomDataNodeAction extends RestartActionBaseAction {
+ private static final Logger LOG = LoggerFactory.getLogger(RestartRandomDataNodeAction.class);
+
public RestartRandomDataNodeAction(long sleepTime) {
super(sleepTime);
}
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java
index 7b09dd310513..48458b68dcf4 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java
@@ -20,11 +20,15 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that restarts a random HRegionServer
*/
public class RestartRandomRsAction extends RestartActionBaseAction {
+ private static final Logger LOG = LoggerFactory.getLogger(RestartRandomRsAction.class);
+
public RestartRandomRsAction(long sleepTime) {
super(sleepTime);
}
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomZKNodeAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomZKNodeAction.java
index 6043acde1bae..7984af7ba4a3 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomZKNodeAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomZKNodeAction.java
@@ -21,11 +21,15 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.zookeeper.ZKServerTool;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that restarts a random zookeeper node.
*/
public class RestartRandomZKNodeAction extends RestartActionBaseAction {
+ private static final Logger LOG = LoggerFactory.getLogger(RestartRandomZKNodeAction.class);
+
public RestartRandomZKNodeAction(long sleepTime) {
super(sleepTime);
}
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java
index a6b4fc766013..09b3db69c3d5 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java
@@ -20,11 +20,16 @@
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ServerName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to restart the HRegionServer holding Meta.
*/
public class RestartRsHoldingMetaAction extends RestartActionBaseAction {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RestartRsHoldingMetaAction.class);
+
public RestartRsHoldingMetaAction(long sleepTime) {
super(sleepTime);
}
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java
index b1ea8e531262..79e91fe08202 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java
@@ -26,11 +26,15 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that restarts an HRegionServer holding one of the regions of the table.
*/
public class RestartRsHoldingTableAction extends RestartActionBaseAction {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RestartRsHoldingTableAction.class);
private final String tableName;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java
index e79ff5b5739d..347340799c1d 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java
@@ -32,8 +32,9 @@
/**
* Restarts a ratio of the regionservers in a rolling fashion. At each step, either kills a
- * server, or starts one, sleeping randomly (0-sleepTime) in between steps. The parameter maxDeadServers
- * limits the maximum number of servers that can be down at the same time during rolling restarts.
+ * server, or starts one, sleeping randomly (0-sleepTime) in between steps.
+ * The parameter maxDeadServers limits the maximum number of servers that
+ * can be down at the same time during rolling restarts.
*/
public class RollingBatchRestartRsAction extends BatchRestartRsAction {
private static final Log LOG = LogFactory.getLog(RollingBatchRestartRsAction.class);
@@ -81,27 +82,27 @@ public void perform() throws Exception {
ServerName server;
switch (action) {
- case KILL:
- server = serversToBeKilled.remove();
- try {
- killRs(server);
- } catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
- // We've seen this in test runs where we timeout but the kill went through. HBASE-9743
- // So, add to deadServers even if exception so the start gets called.
- LOG.info("Problem killing but presume successful; code=" + e.getExitCode(), e);
- }
- deadServers.add(server);
- break;
- case START:
- try {
- server = deadServers.remove();
- startRs(server);
- } catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
- // The start may fail but better to just keep going though we may lose server.
- //
- LOG.info("Problem starting, will retry; code=" + e.getExitCode(), e);
- }
- break;
+ case KILL:
+ server = serversToBeKilled.remove();
+ try {
+ killRs(server);
+ } catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
+ // We've seen this in test runs where we timeout but the kill went through. HBASE-9743
+ // So, add to deadServers even if exception so the start gets called.
+ LOG.info("Problem killing but presume successful; code=" + e.getExitCode(), e);
+ }
+ deadServers.add(server);
+ break;
+ case START:
+ try {
+ server = deadServers.remove();
+ startRs(server);
+ } catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
+ // The start may fail but better to just keep going though we may lose server.
+ //
+ LOG.info("Problem starting, will retry; code=" + e.getExitCode(), e);
+ }
+ break;
}
sleep(RandomUtils.nextInt((int)sleepTime));
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java
index 15b8e86feec9..7e7dc8da24ce 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java
@@ -21,11 +21,15 @@
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to take a snapshot of a table.
*/
public class SnapshotTableAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SnapshotTableAction.class);
private final TableName tableName;
private final long sleepTime;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java
index 5b29d00c4030..5a24af9eeb62 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java
@@ -17,15 +17,19 @@
*/
package org.apache.hadoop.hbase.chaos.actions;
+import java.io.IOException;
+import java.util.concurrent.ThreadLocalRandom;
+
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
-
-import java.io.IOException;
-import java.util.concurrent.ThreadLocalRandom;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class SplitAllRegionOfTableAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SplitAllRegionOfTableAction.class);
private static final int DEFAULT_MAX_SPLITS = 3;
private static final String MAX_SPLIT_KEY = "hbase.chaosmonkey.action.maxFullTableSplits";
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java
index 25c80e918cfb..df424ec9e475 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java
@@ -25,11 +25,15 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to split a random region of a table.
*/
public class SplitRandomRegionOfTableAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SplitRandomRegionOfTableAction.class);
private final long sleepTime;
private final TableName tableName;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java
index 2a4871d3e729..12bbd094ce67 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java
@@ -24,11 +24,15 @@
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.TableName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to truncate of a table.
*/
public class TruncateTableAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TruncateTableAction.class);
private final TableName tableName;
private final Random random;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java
index 0035c2c4c729..264a54f191cb 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java
@@ -28,9 +28,13 @@
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ServerName;
import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** This action is too specific to put in ChaosMonkey; put it here */
public class UnbalanceKillAndRebalanceAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(UnbalanceKillAndRebalanceAction.class);
/** Fractions of servers to get regions and live and die respectively; from all other
* servers, HOARD_FRC_OF_REGIONS will be removed to the above randomly */
private static final double FRC_SERVERS_THAT_HOARD_AND_LIVE = 0.1;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java
index 2779bd1ca1ad..54690bf3fff5 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java
@@ -25,11 +25,15 @@
import org.apache.commons.lang.math.RandomUtils;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ServerName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that tries to unbalance the regions of a cluster.
*/
public class UnbalanceRegionsAction extends Action {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(UnbalanceRegionsAction.class);
private double fractionOfRegions;
private double fractionOfServers;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index b48f1a05cc74..a196bff89d8e 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -123,73 +123,103 @@
import org.junit.experimental.categories.Category;
/**
+ *
* This is an integration test borrowed from goraci, written by Keith Turner,
* which is in turn inspired by the Accumulo test called continous ingest (ci).
* The original source code can be found here:
- * https://github.com/keith-turner/goraci
- * https://github.com/enis/goraci/
- *
+ *
+ * - https://github.com/keith-turner/goraci
+ * - https://github.com/enis/goraci/
+ *
+ *
+ *
* Apache Accumulo [0] has a simple test suite that verifies that data is not
* lost at scale. This test suite is called continuous ingest. This test runs
* many ingest clients that continually create linked lists containing 25
* million nodes. At some point the clients are stopped and a map reduce job is
- * run to ensure no linked list has a hole. A hole indicates data was lost.··
- *
+ * run to ensure no linked list has a hole. A hole indicates data was lost.
+ *
+ *
* The nodes in the linked list are random. This causes each linked list to
* spread across the table. Therefore if one part of a table loses data, then it
* will be detected by references in another part of the table.
- *
- * THE ANATOMY OF THE TEST
+ *
+ *
+ *
THE ANATOMY OF THE TEST
*
* Below is rough sketch of how data is written. For specific details look at
* the Generator code.
- *
- * 1 Write out 1 million nodes· 2 Flush the client· 3 Write out 1 million that
- * reference previous million· 4 If this is the 25th set of 1 million nodes,
- * then update 1st set of million to point to last· 5 goto 1
- *
+ *
+ *
+ *
+ * - Write out 1 million nodes
+ * - Flush the client
+ * - Write out 1 million that reference previous million
+ * - If this is the 25th set of 1 million nodes, then update 1st set of
+ * million to point to last
+ * - goto 1
+ *
+ *
+ *
* The key is that nodes only reference flushed nodes. Therefore a node should
* never reference a missing node, even if the ingest client is killed at any
* point in time.
- *
+ *
+ *
* When running this test suite w/ Accumulo there is a script running in
* parallel called the Aggitator that randomly and continuously kills server
- * processes.·· The outcome was that many data loss bugs were found in Accumulo
- * by doing this.· This test suite can also help find bugs that impact uptime
- * and stability when· run for days or weeks.··
- *
- * This test suite consists the following· - a few Java programs· - a little
- * helper script to run the java programs - a maven script to build it.··
- *
+ * processes. The outcome was that many data loss bugs were found in Accumulo
+ * by doing this. This test suite can also help find bugs that impact uptime
+ * and stability when run for days or weeks.
+ *
+ *
+ * This test suite consists the following
+ *
+ * - a few Java programs
+ * - a little helper script to run the java programs
+ * - a maven script to build it
+ *
+ *
+ *
* When generating data, its best to have each map task generate a multiple of
* 25 million. The reason for this is that circular linked list are generated
* every 25M. Not generating a multiple in 25M will result in some nodes in the
* linked list not having references. The loss of an unreferenced node can not
* be detected.
- *
- *
- * Below is a description of the Java programs
- *
- * Generator - A map only job that generates data. As stated previously,·its best to generate data
- * in multiples of 25M. An option is also available to allow concurrent walkers to select and walk
- * random flushed loops during this phase.
- *
- * Verify - A map reduce job that looks for holes. Look at the counts after running. REFERENCED and
- * UNREFERENCED are· ok, any UNDEFINED counts are bad. Do not run at the· same
- * time as the Generator.
- *
- * Walker - A standalone program that start following a linked list· and emits timing info.··
- *
- * Print - A standalone program that prints nodes in the linked list
- *
- * Delete - A standalone program that deletes a single node
+ *
+ *
+ *
Below is a description of the Java programs
+ *
+ * -
+ * {@code Generator} - A map only job that generates data. As stated previously, its best to
+ * generate data in multiples of 25M. An option is also available to allow concurrent walkers to
+ * select and walk random flushed loops during this phase.
+ *
+ * -
+ * {@code Verify} - A map reduce job that looks for holes. Look at the counts after running.
+ * {@code REFERENCED} and {@code UNREFERENCED} are ok, any {@code UNDEFINED} counts are bad. Do not
+ * run at the same time as the Generator.
+ *
+ * -
+ * {@code Walker} - A standalone program that start following a linked list and emits timing info.
+ *
+ * -
+ * {@code Print} - A standalone program that prints nodes in the linked list
+ *
+ * -
+ * {@code Delete} - A standalone program that deletes a single node
+ *
+ *
*
* This class can be run as a unit test, as an integration test, or from the command line
- *
+ *
+ *
* ex:
+ *
* ./hbase org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList
* loop 2 1 100000 /temp 1 1000 50 1 0
- *
+ *
+ *
*/
@Category(IntegrationTests.class)
public class IntegrationTestBigLinkedList extends IntegrationTestBase {
@@ -1865,7 +1895,7 @@ private void printCommands() {
System.err.println(" walker " +
"Standalone program that starts following a linked list & emits timing info.");
System.err.println(" print Standalone program that prints nodes in the linked list.");
- System.err.println(" delete Standalone program that deletes a·single node.");
+ System.err.println(" delete Standalone program that deletes a single node.");
System.err.println(" loop Program to Loop through Generator and Verify steps");
System.err.println(" clean Program to clean all left over detritus.");
System.err.println(" search Search for missing keys.");
diff --git a/hbase-metrics-api/pom.xml b/hbase-metrics-api/pom.xml
index 6fa44fec8701..86d53f9193ac 100644
--- a/hbase-metrics-api/pom.xml
+++ b/hbase-metrics-api/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-metrics/pom.xml b/hbase-metrics/pom.xml
index ff3836c9f190..0d100aa1bc5d 100644
--- a/hbase-metrics/pom.xml
+++ b/hbase-metrics/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java
index 17a179dafa53..7312230e0ab1 100644
--- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java
+++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java
@@ -38,7 +38,7 @@ public class HistogramImpl implements Histogram {
private final CounterImpl counter;
public HistogramImpl() {
- this(Integer.MAX_VALUE << 2);
+ this((long) Integer.MAX_VALUE << 2);
}
public HistogramImpl(long maxExpected) {
diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java
index 5d3b1faa2101..3cd076740eb4 100644
--- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java
+++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java
@@ -62,19 +62,19 @@ public void testSnapshot() {
Snapshot snapshot = histogram.snapshot();
assertEquals(100, snapshot.getCount());
- assertEquals(50, snapshot.getMedian());
+ assertEquals(49, snapshot.getMedian());
assertEquals(49, snapshot.getMean());
assertEquals(0, snapshot.getMin());
assertEquals(99, snapshot.getMax());
- assertEquals(25, snapshot.get25thPercentile());
- assertEquals(75, snapshot.get75thPercentile());
- assertEquals(90, snapshot.get90thPercentile());
- assertEquals(95, snapshot.get95thPercentile());
- assertEquals(98, snapshot.get98thPercentile());
- assertEquals(99, snapshot.get99thPercentile());
- assertEquals(99, snapshot.get999thPercentile());
+ assertEquals(24, snapshot.get25thPercentile());
+ assertEquals(74, snapshot.get75thPercentile());
+ assertEquals(89, snapshot.get90thPercentile());
+ assertEquals(94, snapshot.get95thPercentile());
+ assertEquals(97, snapshot.get98thPercentile());
+ assertEquals(98, snapshot.get99thPercentile());
+ assertEquals(98, snapshot.get999thPercentile());
- assertEquals(51, snapshot.getCountAtOrBelow(50));
+ assertEquals(100, snapshot.getCountAtOrBelow(50));
// check that histogram is reset.
assertEquals(100, histogram.getCount()); // count does not reset
@@ -99,5 +99,25 @@ public void testSnapshot() {
assertEquals(198, snapshot.get98thPercentile());
assertEquals(199, snapshot.get99thPercentile());
assertEquals(199, snapshot.get999thPercentile());
+
+ for (int i = 500; i < 1000; i++) {
+ histogram.update(i);
+ }
+
+ snapshot = histogram.snapshot();
+
+ assertEquals(500, snapshot.getCount());
+ assertEquals(749, snapshot.getMedian());
+ assertEquals(749, snapshot.getMean());
+ assertEquals(500, snapshot.getMin());
+ assertEquals(999, snapshot.getMax());
+ assertEquals(624, snapshot.get25thPercentile());
+ assertEquals(874, snapshot.get75thPercentile());
+ assertEquals(949, snapshot.get90thPercentile());
+ assertEquals(974, snapshot.get95thPercentile());
+ assertEquals(989, snapshot.get98thPercentile());
+ assertEquals(994, snapshot.get99thPercentile());
+ assertEquals(998, snapshot.get999thPercentile());
+
}
}
diff --git a/hbase-prefix-tree/pom.xml b/hbase-prefix-tree/pom.xml
index 1e8260e010ce..9bd7e2cb1f3c 100644
--- a/hbase-prefix-tree/pom.xml
+++ b/hbase-prefix-tree/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml
index 2dcb9d95526a..ed79224e2e8a 100644
--- a/hbase-procedure/pom.xml
+++ b/hbase-procedure/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index 6a5078e9be35..658dc4454445 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-resource-bundle/pom.xml b/hbase-resource-bundle/pom.xml
index 18b1220cc9ba..249bbb942d7c 100644
--- a/hbase-resource-bundle/pom.xml
+++ b/hbase-resource-bundle/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml
index 383472ee961d..ac0b9da978ef 100644
--- a/hbase-rest/pom.xml
+++ b/hbase-rest/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
hbase-rest
diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml
index 796e0657ca55..1f7b6cd983ad 100644
--- a/hbase-rsgroup/pom.xml
+++ b/hbase-rsgroup/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index ea0e16fcd999..34eb84b4882a 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -23,7 +23,7 @@
hbase
org.apache.hbase
- 1.5.1-SNAPSHOT
+ 1.6.0-SNAPSHOT
..
hbase-server
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index 042037e159bf..4f9cd3b8f264 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -24,6 +24,7 @@
import java.util.Collections;
import java.util.List;
+import com.google.common.base.Joiner;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -161,6 +162,15 @@ public LocalHBaseCluster(final Configuration conf, final int noMasters,
for (int i = 0; i < noMasters; i++) {
addMaster(new Configuration(conf), i);
}
+
+ // Populate the master address host ports in the config. This is needed if a master based
+ // registry is configured for client metadata services (HBASE-18095)
+ List masterHostPorts = new ArrayList<>();
+ for (JVMClusterUtil.MasterThread masterThread: getMasters()) {
+ masterHostPorts.add(masterThread.getMaster().getServerName().getAddress().toString());
+ }
+ conf.set(HConstants.MASTER_ADDRS_KEY, Joiner.on(",").join(masterHostPorts));
+
// Start the HRegionServers.
this.regionServerClass =
(Class extends HRegionServer>)conf.getClass(HConstants.REGION_SERVER_IMPL,
@@ -214,7 +224,7 @@ public JVMClusterUtil.MasterThread addMaster() throws IOException {
}
public JVMClusterUtil.MasterThread addMaster(Configuration c, final int index)
- throws IOException {
+ throws IOException {
// Create each master with its own Configuration instance so each has
// its HConnection instance rather than share (see HBASE_INSTANCES down in
// the guts of HConnectionManager.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java
index f6e96fa3073b..24164e51658e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java
@@ -40,6 +40,13 @@ public class ZKSplitTransactionCoordination implements SplitTransactionCoordinat
private CoordinatedStateManager coordinationManager;
private final ZooKeeperWatcher watcher;
+ // max wait for split transaction - 100 times in a loop with 100 ms of thread sleep each time
+ // this accounts for ~24 s due to calls involved in loop. even for busy cluster, by this time,
+ // we should have been able to complete setData() In fact, ideally, 2nd retry after failed
+ // attempt should be sufficient to retrieve correct ZK node version and successfully updating
+ // RIT info in ZK node.
+ private static final int SPIN_WAIT_TIMEOUT = 100;
+
private static final Log LOG = LogFactory.getLog(ZKSplitTransactionCoordination.class);
public ZKSplitTransactionCoordination(CoordinatedStateManager coordinationProvider,
@@ -163,6 +170,10 @@ public void waitForSplitTransaction(final RegionServerServices services, Region
}
Thread.sleep(100);
spins++;
+ if (spins > SPIN_WAIT_TIMEOUT) {
+ throw new IOException("Waiting time for Split Transaction exceeded for region: "
+ + parent.getRegionInfo().getRegionNameAsString());
+ }
byte[] data = ZKAssign.getDataNoWatch(watcher, node, stat);
if (data == null) {
throw new IOException("Data is null, splitting node " + node + " no longer exists");
@@ -222,9 +233,14 @@ public void completeSplitTransaction(final RegionServerServices services, Region
// Tell master about split by updating zk. If we fail, abort.
if (coordinationManager.getServer() != null) {
try {
- zstd.setZnodeVersion(transitionSplittingNode(parent.getRegionInfo(), a.getRegionInfo(),
+ int newNodeVersion = transitionSplittingNode(parent.getRegionInfo(), a.getRegionInfo(),
b.getRegionInfo(), coordinationManager.getServer().getServerName(), zstd,
- RS_ZK_REGION_SPLITTING, RS_ZK_REGION_SPLIT));
+ RS_ZK_REGION_SPLITTING, RS_ZK_REGION_SPLIT);
+ if (newNodeVersion == -1) {
+ throw new IOException("Notifying master of RS split failed for region: "
+ + parent.getRegionInfo().getRegionNameAsString());
+ }
+ zstd.setZnodeVersion(newNodeVersion);
int spins = 0;
// Now wait for the master to process the split. We know it's done
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
index 93e18370407f..1d9b9ca48813 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
@@ -201,8 +201,8 @@ protected boolean checkKey(final Cell cell) throws IOException {
int keyComp = comparator.compareOnlyKeyPortion(lastCell, cell);
if (keyComp > 0) {
- throw new IOException("Added a key not lexically larger than"
- + " previous. Current cell = " + cell + ", lastCell = " + lastCell);
+ String message = getLexicalErrorMessage(cell);
+ throw new IOException(message);
} else if (keyComp == 0) {
isDuplicateKey = true;
}
@@ -210,6 +210,18 @@ protected boolean checkKey(final Cell cell) throws IOException {
return isDuplicateKey;
}
+ private String getLexicalErrorMessage(Cell cell) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Added a key not lexically larger than previous. Current cell = ");
+ sb.append(cell);
+ sb.append(", lastCell = ");
+ sb.append(lastCell);
+ //file context includes HFile path and optionally table and CF of file being written
+ sb.append("fileContext=");
+ sb.append(hFileContext);
+ return sb.toString();
+ }
+
/** Checks the given value for validity. */
protected void checkValue(final byte[] value, final int offset,
final int length) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index d177402cf557..114d64250fcd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -1292,6 +1292,8 @@ HFileBlock getBlockForCaching(CacheConfig cacheConf) {
.withCompressTags(fileContext.isCompressTags())
.withIncludesMvcc(fileContext.isIncludesMvcc())
.withIncludesTags(fileContext.isIncludesTags())
+ .withColumnFamily(fileContext.getColumnFamily())
+ .withTableName(fileContext.getTableName())
.build();
return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
getUncompressedSizeWithoutHeader(), prevOffset,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index e8f3c1f6bbbd..202cd55c81ef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -316,8 +316,8 @@ private WriterLength getNewWriter(byte[] family, Configuration conf,
.withCompression(compression)
.withChecksumType(HStore.getChecksumType(conf))
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
- .withBlockSize(blockSize);
-
+ .withBlockSize(blockSize)
+ .withColumnFamily(family);
if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
contextBuilder.withIncludesTags(true);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index c6bbf248bccf..5eb60d620942 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -305,6 +305,8 @@ public void run() {
private RegionsRecoveryChore regionsRecoveryChore = null;
+ private RegionsRecoveryConfigManager regionsRecoveryConfigManager = null;
+
// buffer for "fatal error" notices from region servers
// in the cluster. This is only used for assisting
// operations/debugging.
@@ -912,6 +914,7 @@ private void finishActiveMasterInitialization(MonitoredTask status)
configurationManager.registerObserver(this.cleanerPool);
configurationManager.registerObserver(this.hfileCleaner);
configurationManager.registerObserver(this.logCleaner);
+ configurationManager.registerObserver(this.regionsRecoveryConfigManager);
// Set master as 'initialized'.
setInitialized(true);
@@ -1256,6 +1259,7 @@ private void startServiceThreads() throws IOException {
this.logCleaner = new LogCleaner(cleanerInterval, this, conf,
getMasterFileSystem().getOldLogDir().getFileSystem(conf),
getMasterFileSystem().getOldLogDir(), cleanerPool);
+ getChoreService().scheduleChore(logCleaner);
//start the hfile archive cleaner thread
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
Map params = new HashMap();
@@ -1289,6 +1293,8 @@ private void startServiceThreads() throws IOException {
}
}
+ this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this);
+
serviceStarted = true;
if (LOG.isTraceEnabled()) {
LOG.trace("Started service threads");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java
index 78d4b785bd35..06ca30f4e956 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java
@@ -52,11 +52,6 @@ public class RegionsRecoveryChore extends ScheduledChore {
private static final String REGIONS_RECOVERY_CHORE_NAME = "RegionsRecoveryChore";
- private static final String REGIONS_RECOVERY_INTERVAL =
- "hbase.master.regions.recovery.check.interval";
-
- private static final int DEFAULT_REGIONS_RECOVERY_INTERVAL = 1200 * 1000; // Default 20 min ?
-
private static final String ERROR_REOPEN_REIONS_MSG =
"Error reopening regions with high storeRefCount. ";
@@ -76,8 +71,8 @@ public class RegionsRecoveryChore extends ScheduledChore {
RegionsRecoveryChore(final Stoppable stopper, final Configuration configuration,
final HMaster hMaster) {
- super(REGIONS_RECOVERY_CHORE_NAME, stopper, configuration.getInt(REGIONS_RECOVERY_INTERVAL,
- DEFAULT_REGIONS_RECOVERY_INTERVAL));
+ super(REGIONS_RECOVERY_CHORE_NAME, stopper, configuration.getInt(
+ HConstants.REGIONS_RECOVERY_INTERVAL, HConstants.DEFAULT_REGIONS_RECOVERY_INTERVAL));
this.hMaster = hMaster;
this.storeFileRefCountThreshold = configuration.getInt(
HConstants.STORE_FILE_REF_COUNT_THRESHOLD,
@@ -180,4 +175,20 @@ private void prepareTableToReopenRegionsMap(
}
+ // hashcode/equals implementation to ensure at-most one object of RegionsRecoveryChore
+ // is scheduled at a time - RegionsRecoveryConfigManager
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ return o != null && getClass() == o.getClass();
+ }
+
+ @Override
+ public int hashCode() {
+ return 31;
+ }
+
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java
new file mode 100644
index 000000000000..7bf078f5fa48
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.conf.ConfigurationObserver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Config manager for RegionsRecovery Chore - Dynamically reload config and update chore
+ * accordingly
+ */
+@InterfaceAudience.Private
+public class RegionsRecoveryConfigManager implements ConfigurationObserver {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RegionsRecoveryConfigManager.class);
+
+ private final HMaster hMaster;
+ private int prevMaxStoreFileRefCount;
+ private int prevRegionsRecoveryInterval;
+
+ RegionsRecoveryConfigManager(final HMaster hMaster) {
+ this.hMaster = hMaster;
+ Configuration conf = hMaster.getConfiguration();
+ this.prevMaxStoreFileRefCount = getMaxStoreFileRefCount(conf);
+ this.prevRegionsRecoveryInterval = getRegionsRecoveryChoreInterval(conf);
+ }
+
+ @Override
+ public void onConfigurationChange(Configuration conf) {
+ final int newMaxStoreFileRefCount = getMaxStoreFileRefCount(conf);
+ final int newRegionsRecoveryInterval = getRegionsRecoveryChoreInterval(conf);
+
+ if (prevMaxStoreFileRefCount == newMaxStoreFileRefCount
+ && prevRegionsRecoveryInterval == newRegionsRecoveryInterval) {
+ // no need to re-schedule the chore with updated config
+ // as there is no change in desired configs
+ return;
+ }
+
+ LOG.info("Config Reload for RegionsRecovery Chore. prevMaxStoreFileRefCount: {}," +
+ " newMaxStoreFileRefCount: {}, prevRegionsRecoveryInterval: {}, " +
+ "newRegionsRecoveryInterval: {}", prevMaxStoreFileRefCount, newMaxStoreFileRefCount,
+ prevRegionsRecoveryInterval, newRegionsRecoveryInterval);
+
+ RegionsRecoveryChore regionsRecoveryChore = new RegionsRecoveryChore(this.hMaster,
+ conf, this.hMaster);
+ ChoreService choreService = this.hMaster.getChoreService();
+
+ // Regions Reopen based on very high storeFileRefCount is considered enabled
+ // only if hbase.regions.recovery.store.file.ref.count has value > 0
+
+ synchronized (this) {
+ if (newMaxStoreFileRefCount > 0) {
+ // reschedule the chore
+ // provide mayInterruptIfRunning - false to take care of completion
+ // of in progress task if any
+ choreService.cancelChore(regionsRecoveryChore, false);
+ choreService.scheduleChore(regionsRecoveryChore);
+ } else {
+ choreService.cancelChore(regionsRecoveryChore, false);
+ }
+ this.prevMaxStoreFileRefCount = newMaxStoreFileRefCount;
+ this.prevRegionsRecoveryInterval = newRegionsRecoveryInterval;
+ }
+ }
+
+ private int getMaxStoreFileRefCount(Configuration configuration) {
+ return configuration.getInt(
+ HConstants.STORE_FILE_REF_COUNT_THRESHOLD,
+ HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD);
+ }
+
+ private int getRegionsRecoveryChoreInterval(Configuration configuration) {
+ return configuration.getInt(
+ HConstants.REGIONS_RECOVERY_INTERVAL,
+ HConstants.DEFAULT_REGIONS_RECOVERY_INTERVAL);
+ }
+
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java
new file mode 100644
index 000000000000..1a6265586b42
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java
@@ -0,0 +1,285 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hbase.master.balancer;
+
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Pattern;
+import java.util.regex.PatternSyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ServerName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This is an optional Cost function designed to allow region count skew across RegionServers. A
+ * rule file is loaded from the local FS or HDFS before balancing. It contains lines of rules. A
+ * rule is composed of a regexp for hostname, and a limit. For example, we could have:
+ *
+ * * rs[0-9] 200 * rs1[0-9] 50
+ *
+ * RegionServers with hostname matching the first rules will have a limit of 200, and the others 50.
+ * If there's no match, a default is set. The costFunction is trying to fill all RegionServers
+ * linearly, meaning that if the global usage is at 50%, then all RegionServers should hold half of
+ * their capacity in terms of regions. In order to use this CostFunction, you need to set the
+ * following options:
+ *
+ * - hbase.master.balancer.stochastic.additionalCostFunctions
+ * - hbase.master.balancer.stochastic.heterogeneousRegionCountRulesFile
+ * - hbase.master.balancer.stochastic.heterogeneousRegionCountDefault
+ *
+ * The rule file can be located on local FS or HDFS, depending on the prefix (file//: or hdfs://).
+ */
+public class HeterogeneousRegionCountCostFunction extends StochasticLoadBalancer.CostFunction {
+
+ /**
+ * configuration used for the path where the rule file is stored.
+ */
+ static final String HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_FILE =
+ "hbase.master.balancer.heterogeneousRegionCountRulesFile";
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HeterogeneousRegionCountCostFunction.class);
+ /**
+ * Default rule to apply when the rule file is not found. Default to 200.
+ */
+ private static final String HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_DEFAULT =
+ "hbase.master.balancer.heterogeneousRegionCountDefault";
+ /**
+ * Cost for the function. Default to 500, can be changed.
+ */
+ private static final String REGION_COUNT_SKEW_COST_KEY =
+ "hbase.master.balancer.stochastic.heterogeneousRegionCountCost";
+ private static final float DEFAULT_REGION_COUNT_SKEW_COST = 500;
+ private final String rulesPath;
+
+ /**
+ * Contains the rules, key is the regexp for ServerName, value is the limit
+ */
+ private final Map limitPerRule;
+
+ /**
+ * This is a cache, used to not go through all the limitPerRule map when searching for limit
+ */
+ private final Map limitPerRS;
+ private final Configuration conf;
+ private int defaultNumberOfRegions;
+
+ /**
+ * Total capacity of regions for the cluster, based on the online RS and their associated rules
+ */
+ private int totalCapacity = 0;
+ double overallUsage;
+
+ public HeterogeneousRegionCountCostFunction(Configuration conf) {
+ super(conf);
+ this.conf = conf;
+ this.limitPerRS = new HashMap<>();
+ this.limitPerRule = new HashMap<>();
+ this.setMultiplier(conf.getFloat(REGION_COUNT_SKEW_COST_KEY, DEFAULT_REGION_COUNT_SKEW_COST));
+ this.rulesPath = conf.get(HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_FILE);
+ this.defaultNumberOfRegions =
+ conf.getInt(HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_DEFAULT, 200);
+
+ if (this.defaultNumberOfRegions < 0) {
+ LOG.warn("invalid configuration '" + HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_DEFAULT
+ + "'. Setting default to 200");
+ this.defaultNumberOfRegions = 200;
+ }
+ if (conf.getFloat(StochasticLoadBalancer.RegionCountSkewCostFunction.REGION_COUNT_SKEW_COST_KEY,
+ StochasticLoadBalancer.RegionCountSkewCostFunction.DEFAULT_REGION_COUNT_SKEW_COST) > 0) {
+ LOG.warn("regionCountCost is not set to 0, "
+ + " this will interfere with the HeterogeneousRegionCountCostFunction!");
+ }
+ }
+
+ /**
+ * Called once per LB invocation to give the cost function to initialize it's state, and perform
+ * any costly calculation.
+ */
+ @Override
+ void init(final BaseLoadBalancer.Cluster cluster) {
+ this.cluster = cluster;
+ this.loadRules();
+ }
+
+ @Override
+ protected double cost() {
+ double cost = 0;
+ final double targetUsage = ((double) this.cluster.numRegions / (double) this.totalCapacity);
+
+ for (int i = 0; i < this.cluster.numServers; i++) {
+ // retrieve capacity for each RS
+ final ServerName sn = this.cluster.servers[i];
+ double limit;
+ if (this.limitPerRS.containsKey(sn)) {
+ limit = this.limitPerRS.get(sn);
+ } else {
+ limit = defaultNumberOfRegions;
+ }
+ final double nbrRegions = this.cluster.regionsPerServer[i].length;
+ final double usage = nbrRegions / limit;
+ if (usage > targetUsage) {
+ // cost is the number of regions above the local limit
+ final double localCost = (nbrRegions - Math.round(limit * targetUsage)) / limit;
+ cost += localCost;
+ }
+ }
+
+ return cost / (double) this.cluster.numServers;
+ }
+
+ /**
+ * used to load the rule files.
+ */
+ void loadRules() {
+ final List lines = readFile(this.rulesPath);
+ if (null == lines) {
+ LOG.warn("cannot load rules file, keeping latest rules file which has "
+ + this.limitPerRule.size() + " rules");
+ return;
+ }
+
+ LOG.info("loading rules file '" + this.rulesPath + "'");
+ this.limitPerRule.clear();
+ for (final String line : lines) {
+ try {
+ if (line.length() == 0) {
+ continue;
+ }
+ if (line.startsWith("#")) {
+ continue;
+ }
+ final String[] splits = line.split(" ");
+ if (splits.length != 2) {
+ throw new IOException(
+ "line '" + line + "' is malformated, " + "expected [regexp] [limit]. Skipping line");
+ }
+
+ final Pattern pattern = Pattern.compile(splits[0]);
+ final Integer limit = Integer.parseInt(splits[1]);
+ this.limitPerRule.put(pattern, limit);
+ } catch (final IOException | NumberFormatException | PatternSyntaxException e) {
+ LOG.error("error on line: " + e);
+ }
+ }
+ this.rebuildCache();
+ }
+
+ /**
+ * used to read the rule files from either HDFS or local FS
+ */
+ private List readFile(final String filename) {
+ if (null == filename) {
+ return null;
+ }
+ try {
+ if (filename.startsWith("file:")) {
+ return readFileFromLocalFS(filename);
+ }
+ return readFileFromHDFS(filename);
+ } catch (IOException e) {
+ LOG.error("cannot read rules file located at ' " + filename + " ':" + e.getMessage());
+ return null;
+ }
+ }
+
+ /**
+ * used to read the rule files from HDFS
+ */
+ private List readFileFromHDFS(final String filename) throws IOException {
+ final Path path = new Path(filename);
+ final FileSystem fs = FileSystem.get(this.conf);
+ final BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(path)));
+ return readLines(reader);
+ }
+
+ /**
+ * used to read the rule files from local FS
+ */
+ private List readFileFromLocalFS(final String filename) throws IOException {
+ BufferedReader reader = new BufferedReader(new FileReader(filename));
+ return readLines(reader);
+ }
+
+ private List readLines(BufferedReader reader) throws IOException {
+ final List records = new ArrayList<>();
+ String line;
+ while ((line = reader.readLine()) != null) {
+ records.add(line);
+ }
+ reader.close();
+ return records;
+ }
+
+ /**
+ * Rebuild cache matching ServerNames and their capacity.
+ */
+ private void rebuildCache() {
+ LOG.debug("Rebuilding cache of capacity for each RS");
+ this.limitPerRS.clear();
+ this.totalCapacity = 0;
+ if (null == this.cluster) {
+ return;
+ }
+ for (int i = 0; i < this.cluster.numServers; i++) {
+ final ServerName sn = this.cluster.servers[i];
+ final int capacity = this.findLimitForRS(sn);
+ LOG.debug(sn.getHostname() + " can hold " + capacity + " regions");
+ this.totalCapacity += capacity;
+ }
+ overallUsage = (double) this.cluster.numRegions / (double) this.totalCapacity;
+ LOG.info("Cluster can hold " + this.cluster.numRegions + "/" + this.totalCapacity + " regions ("
+ + Math.round(overallUsage * 100) + "%)");
+ if (overallUsage >= 1) {
+ LOG.warn("Cluster is overused");
+ }
+ }
+
+ /**
+ * Find the limit for a ServerName. If not found then return the default value
+ * @param serverName the server we are looking for
+ * @return the limit
+ */
+ int findLimitForRS(final ServerName serverName) {
+ boolean matched = false;
+ int limit = -1;
+ for (final Map.Entry entry : this.limitPerRule.entrySet()) {
+ if (entry.getKey().matcher(serverName.getHostname()).matches()) {
+ matched = true;
+ limit = entry.getValue();
+ break;
+ }
+ }
+ if (!matched) {
+ limit = this.defaultNumberOfRegions;
+ }
+ // Feeding cache
+ this.limitPerRS.put(serverName, limit);
+ return limit;
+ }
+
+ int getNumberOfRulesLoaded() {
+ return this.limitPerRule.size();
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index bdba8c2cd3fc..b14ab4059b98 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -1199,9 +1199,9 @@ protected double cost() {
* regions on a cluster.
*/
static class RegionCountSkewCostFunction extends CostFunction {
- private static final String REGION_COUNT_SKEW_COST_KEY =
+ static final String REGION_COUNT_SKEW_COST_KEY =
"hbase.master.balancer.stochastic.regionCountCost";
- private static final float DEFAULT_REGION_COUNT_SKEW_COST = 500;
+ static final float DEFAULT_REGION_COUNT_SKEW_COST = 500;
private double[] stats = null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index 78be50bd8ae9..50cd98040399 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
@@ -150,7 +150,7 @@ private void initCleanerChain(String confKey) {
for (String className : logCleaners) {
T logCleaner = newFileCleaner(className, conf);
if (logCleaner != null) {
- LOG.debug("initialize cleaner=" + className);
+ LOG.info("initialize cleaner=" + className);
this.cleanersChain.add(logCleaner);
}
}
@@ -254,7 +254,7 @@ private boolean checkAndDeleteFiles(List files) {
}
Iterable filteredFiles = cleaner.getDeletableFiles(deletableValidFiles);
-
+
// trace which cleaner is holding on to each file
if (LOG.isTraceEnabled()) {
ImmutableSet filteredFileSet = ImmutableSet.copyOf(filteredFiles);
@@ -264,10 +264,10 @@ private boolean checkAndDeleteFiles(List files) {
}
}
}
-
+
deletableValidFiles = filteredFiles;
}
-
+
Iterable filesToDelete = Iterables.concat(invalidFiles, deletableValidFiles);
return deleteFiles(filesToDelete) == files.size();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index ed4bd0da8e7e..f3ff124b143c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -653,7 +653,9 @@ public void rejectedExecution(Runnable runnable, ThreadPoolExecutor pool) {
if (runnable instanceof CompactionRunner) {
CompactionRunner runner = (CompactionRunner)runnable;
LOG.debug("Compaction Rejected: " + runner);
- runner.store.cancelRequestedCompaction(runner.compaction);
+ if (runner.compaction != null) {
+ runner.store.cancelRequestedCompaction(runner.compaction);
+ }
}
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2ba44292607a..86245841a118 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -369,6 +369,7 @@ public class HRegionServer extends HasThread implements
public static final String REGIONSERVER = "regionserver";
MetricsRegionServer metricsRegionServer;
+ MetricsRegionServerWrapperImpl metricsRegionServerImpl;
MetricsTable metricsTable;
private SpanReceiverHost spanReceiverHost;
@@ -1496,8 +1497,8 @@ protected void handleReportForDutyResponse(final RegionServerStartupResponse c)
this.cacheConfig = new CacheConfig(conf);
this.walFactory = setupWALAndReplication();
// Init in here rather than in constructor after thread name has been set
- this.metricsRegionServer = new MetricsRegionServer(
- new MetricsRegionServerWrapperImpl(this), conf);
+ this.metricsRegionServerImpl = new MetricsRegionServerWrapperImpl(this);
+ this.metricsRegionServer = new MetricsRegionServer(metricsRegionServerImpl, conf);
this.metricsTable = new MetricsTable(new MetricsTableWrapperAggregateImpl(this));
// Now that we have a metrics source, start the pause monitor
this.pauseMonitor = new JvmPauseMonitor(conf, getMetrics().getMetricsSource());
@@ -3114,6 +3115,7 @@ public Region getFromOnlineRegions(final String encodedRegionName) {
@Override
public boolean removeFromOnlineRegions(final Region r, ServerName destination) {
Region toReturn = this.onlineRegions.remove(r.getRegionInfo().getEncodedName());
+ metricsRegionServerImpl.requestsCountCache.remove(r.getRegionInfo().getEncodedName());
if (destination != null) {
long closeSeqNum = r.getMaxFlushedSeqId();
if (closeSeqNum == HConstants.NO_SEQNUM) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 9c0897fd7c71..f2038cf3c880 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1158,6 +1158,9 @@ private HFileContext createFileContext(Compression.Algorithm compression,
.withDataBlockEncoding(family.getDataBlockEncoding())
.withEncryptionContext(cryptoContext)
.withCreateTime(EnvironmentEdgeManager.currentTime())
+ .withColumnFamily(family.getName())
+ .withTableName(region.getTableDesc().
+ getTableName().getName())
.build();
return hFileContext;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 061267d8d32d..948c255df281 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -17,8 +17,11 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
@@ -84,6 +87,8 @@ class MetricsRegionServerWrapperImpl
private volatile long majorCompactedCellsSize = 0;
private volatile long blockedRequestsCount = 0L;
private volatile long averageRegionSize = 0L;
+ protected final Map>
+ requestsCountCache = new ConcurrentHashMap>();
private CacheStats cacheStats;
private CacheStats l1Stats = null;
@@ -586,9 +591,6 @@ public long getMajorCompactedCellsSize() {
public class RegionServerMetricsWrapperRunnable implements Runnable {
private long lastRan = 0;
- private long lastRequestCount = 0;
- private long lastReadRequestsCount = 0;
- private long lastWriteRequestsCount = 0;
@Override
synchronized public void run() {
@@ -628,7 +630,40 @@ synchronized public void run() {
long tempBlockedRequestsCount = 0L;
int regionCount = 0;
- for (Region r : regionServer.getOnlineRegionsLocalContext()) {
+
+ long currentReadRequestsCount = 0;
+ long currentWriteRequestsCount = 0;
+ long lastReadRequestsCount = 0;
+ long lastWriteRequestsCount = 0;
+ long readRequestsDelta = 0;
+ long writeRequestsDelta = 0;
+ long totalReadRequestsDelta = 0;
+ long totalWriteRequestsDelta = 0;
+ String encodedRegionName;
+ for (Region r : regionServer.getOnlineRegionsLocalContext()) {
+ encodedRegionName = r.getRegionInfo().getEncodedName();
+ currentReadRequestsCount = r.getReadRequestsCount();
+ currentWriteRequestsCount = r.getWriteRequestsCount();
+ if (requestsCountCache.containsKey(encodedRegionName)) {
+ lastReadRequestsCount = requestsCountCache.get(encodedRegionName).get(0);
+ lastWriteRequestsCount = requestsCountCache.get(encodedRegionName).get(1);
+ readRequestsDelta = currentReadRequestsCount - lastReadRequestsCount;
+ writeRequestsDelta = currentWriteRequestsCount - lastWriteRequestsCount;
+ totalReadRequestsDelta += readRequestsDelta;
+ totalWriteRequestsDelta += writeRequestsDelta;
+ //Update cache for our next comparision
+ requestsCountCache.get(encodedRegionName).set(0,currentReadRequestsCount);
+ requestsCountCache.get(encodedRegionName).set(1,currentWriteRequestsCount);
+ } else {
+ // List[0] -> readRequestCount
+ // List[1] -> writeRequestCount
+ ArrayList requests = new ArrayList(2);
+ requests.add(currentReadRequestsCount);
+ requests.add(currentWriteRequestsCount);
+ requestsCountCache.put(encodedRegionName, requests);
+ totalReadRequestsDelta += currentReadRequestsCount;
+ totalWriteRequestsDelta += currentWriteRequestsCount;
+ }
tempNumMutationsWithoutWAL += r.getNumMutationsWithoutWAL();
tempDataInMemoryWithoutWAL += r.getDataInMemoryWithoutWAL();
tempReadRequestsCount += r.getReadRequestsCount();
@@ -697,25 +732,16 @@ synchronized public void run() {
// If we've time traveled keep the last requests per second.
if ((currentTime - lastRan) > 0) {
- long currentRequestCount = getTotalRowActionRequestCount();
- requestsPerSecond = (currentRequestCount - lastRequestCount) /
+ requestsPerSecond = (totalReadRequestsDelta + totalWriteRequestsDelta) /
((currentTime - lastRan) / 1000.0);
- lastRequestCount = currentRequestCount;
-
- long intervalReadRequestsCount = tempReadRequestsCount - lastReadRequestsCount;
- long intervalWriteRequestsCount = tempWriteRequestsCount - lastWriteRequestsCount;
- double readRequestsRatePerMilliSecond = ((double)intervalReadRequestsCount/
+ double readRequestsRatePerMilliSecond = ((double)totalReadRequestsDelta/
(double)period);
- double writeRequestsRatePerMilliSecond = ((double)intervalWriteRequestsCount/
+ double writeRequestsRatePerMilliSecond = ((double)totalWriteRequestsDelta/
(double)period);
readRequestsRate = readRequestsRatePerMilliSecond * 1000.0;
writeRequestsRate = writeRequestsRatePerMilliSecond * 1000.0;
-
- lastReadRequestsCount = tempReadRequestsCount;
- lastWriteRequestsCount = tempWriteRequestsCount;
-
}
lastRan = currentTime;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 124da63b9fcb..5acb70922f83 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -18,6 +18,7 @@
*/
package org.apache.hadoop.hbase.replication.regionserver;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.Service;
@@ -439,14 +440,30 @@ public String getPeerClusterId() {
}
@Override
+ @VisibleForTesting
public Path getCurrentPath() {
- // only for testing
for (ReplicationSourceShipperThread worker : workerThreads.values()) {
if (worker.getCurrentPath() != null) return worker.getCurrentPath();
}
return null;
}
+ @VisibleForTesting
+ public Path getLastLoggedPath() {
+ for (ReplicationSourceShipperThread worker : workerThreads.values()) {
+ return worker.getLastLoggedPath();
+ }
+ return null;
+ }
+
+ @VisibleForTesting
+ public long getLastLoggedPosition() {
+ for (ReplicationSourceShipperThread worker : workerThreads.values()) {
+ return worker.getLastLoggedPosition();
+ }
+ return 0;
+ }
+
private boolean isSourceActive() {
return !this.stopper.isStopped() && this.sourceRunning;
}
@@ -481,8 +498,8 @@ public String getStats() {
for (Map.Entry entry : workerThreads.entrySet()) {
String walGroupId = entry.getKey();
ReplicationSourceShipperThread worker = entry.getValue();
- long position = worker.getCurrentPosition();
- Path currentPath = worker.getCurrentPath();
+ long position = worker.getLastLoggedPosition();
+ Path currentPath = worker.getLastLoggedPath();
sb.append("walGroup [").append(walGroupId).append("]: ");
if (currentPath != null) {
sb.append("currently replicating from: ").append(currentPath).append(" at position: ")
@@ -517,7 +534,7 @@ public Map getWalGroupStatus() {
int queueSize = queues.get(walGroupId).size();
replicationDelay =
ReplicationLoad.calculateReplicationDelay(ageOfLastShippedOp, lastTimeStamp, queueSize);
- Path currentPath = worker.getCurrentPath();
+ Path currentPath = worker.getLastLoggedPath();
fileSize = -1;
if (currentPath != null) {
try {
@@ -535,7 +552,7 @@ public Map getWalGroupStatus() {
.withQueueSize(queueSize)
.withWalGroup(walGroupId)
.withCurrentPath(currentPath)
- .withCurrentPosition(worker.getCurrentPosition())
+ .withCurrentPosition(worker.getLastLoggedPosition())
.withFileSize(fileSize)
.withAgeOfLastShippedOp(ageOfLastShippedOp)
.withReplicationDelay(replicationDelay);
@@ -555,7 +572,7 @@ public class ReplicationSourceShipperThread extends Thread {
// Last position in the log that we sent to ZooKeeper
private long lastLoggedPosition = -1;
// Path of the current log
- private volatile Path currentPath;
+ private volatile Path lastLoggedPath;
// Current state of the worker thread
private WorkerState state;
ReplicationSourceWALReaderThread entryReader;
@@ -600,13 +617,11 @@ public void run() {
WALEntryBatch entryBatch = entryReader.take();
shipEdits(entryBatch);
releaseBufferQuota((int) entryBatch.getHeapSize());
- if (replicationQueueInfo.isQueueRecovered() && entryBatch.getWalEntries().isEmpty()
- && entryBatch.getLastSeqIds().isEmpty()) {
- LOG.debug("Finished recovering queue for group " + walGroupId + " of peer "
- + peerClusterZnode);
+ if (!entryBatch.hasMoreEntries()) {
+ LOG.debug("Finished recovering queue for group "
+ + walGroupId + " of peer " + peerClusterZnode);
metrics.incrCompletedRecoveryQueue();
setWorkerState(WorkerState.FINISHED);
- continue;
}
} catch (InterruptedException e) {
LOG.trace("Interrupted while waiting for next replication entry batch", e);
@@ -614,7 +629,7 @@ public void run() {
}
}
- if (replicationQueueInfo.isQueueRecovered() && getWorkerState() == WorkerState.FINISHED) {
+ if (getWorkerState() == WorkerState.FINISHED) {
// use synchronize to make sure one last thread will clean the queue
synchronized (this) {
Threads.sleep(100);// wait a short while for other worker thread to fully exit
@@ -694,15 +709,13 @@ private int getBatchEntrySizeExcludeBulkLoad(WALEntryBatch entryBatch) {
protected void shipEdits(WALEntryBatch entryBatch) {
List entries = entryBatch.getWalEntries();
long lastReadPosition = entryBatch.getLastWalPosition();
- currentPath = entryBatch.getLastWalPath();
+ lastLoggedPath = entryBatch.getLastWalPath();
int sleepMultiplier = 0;
if (entries.isEmpty()) {
- if (lastLoggedPosition != lastReadPosition) {
- updateLogPosition(lastReadPosition);
- // if there was nothing to ship and it's not an error
- // set "ageOfLastShippedOp" to to indicate that we're current
- metrics.setAgeOfLastShippedOp(EnvironmentEdgeManager.currentTime(), walGroupId);
- }
+ updateLogPosition(lastReadPosition);
+ // if there was nothing to ship and it's not an error
+ // set "ageOfLastShippedOp" to to indicate that we're current
+ metrics.setAgeOfLastShippedOp(EnvironmentEdgeManager.currentTime(), walGroupId);
return;
}
int currentSize = (int) entryBatch.getHeapSize();
@@ -787,8 +800,7 @@ protected void shipEdits(WALEntryBatch entryBatch) {
}
private void updateLogPosition(long lastReadPosition) {
- manager.setPendingShipment(false);
- manager.logPositionAndCleanOldLogs(currentPath, peerClusterZnode, lastReadPosition,
+ manager.logPositionAndCleanOldLogs(lastLoggedPath, peerClusterZnode, lastReadPosition,
this.replicationQueueInfo.isQueueRecovered(), false);
lastLoggedPosition = lastReadPosition;
}
@@ -800,7 +812,7 @@ public void startup() {
public void uncaughtException(final Thread t, final Throwable e) {
RSRpcServices.exitIfOOME(e);
LOG.error("Unexpected exception in ReplicationSourceWorkerThread," + " currentPath="
- + getCurrentPath(), e);
+ + getLastLoggedPath(), e);
stopper.stop("Unexpected exception in ReplicationSourceWorkerThread");
}
};
@@ -941,8 +953,12 @@ public Path getCurrentPath() {
return this.entryReader.getCurrentPath();
}
- public long getCurrentPosition() {
- return this.lastLoggedPosition;
+ public Path getLastLoggedPath() {
+ return lastLoggedPath;
+ }
+
+ public long getLastLoggedPosition() {
+ return lastLoggedPosition;
}
private boolean isWorkerActive() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 6b8b6e273c47..0e3724a5a1ef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -123,8 +123,6 @@ public class ReplicationSourceManager implements ReplicationListener {
private AtomicLong totalBufferUsed = new AtomicLong();
- private boolean pendingShipment;
-
/**
* Creates a replication manager and sets the watch on all the other registered region servers
* @param replicationQueues the interface for manipulating replication queues
@@ -191,19 +189,13 @@ public ReplicationSourceManager(final ReplicationQueues replicationQueues,
* @param holdLogInZK if true then the log is retained in ZK
*/
public synchronized void logPositionAndCleanOldLogs(Path log, String id, long position,
- boolean queueRecovered, boolean holdLogInZK) {
- if (!this.pendingShipment) {
- String fileName = log.getName();
- this.replicationQueues.setLogPosition(id, fileName, position);
- if (holdLogInZK) {
- return;
- }
- cleanOldLogs(fileName, id, queueRecovered);
+ boolean queueRecovered, boolean holdLogInZK) {
+ String fileName = log.getName();
+ this.replicationQueues.setLogPosition(id, fileName, position);
+ if (holdLogInZK) {
+ return;
}
- }
-
- public synchronized void setPendingShipment(boolean pendingShipment) {
- this.pendingShipment = pendingShipment;
+ cleanOldLogs(fileName, id, queueRecovered);
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
index 1d94a7a2c815..58555742ecfe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
@@ -21,9 +21,7 @@
import java.io.EOFException;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.PriorityBlockingQueue;
@@ -69,7 +67,8 @@ public class ReplicationSourceWALReaderThread extends Thread {
// max count of each batch - multiply by number of batches in queue to get total
private int replicationBatchCountCapacity;
// position in the WAL to start reading at
- private long currentPosition;
+ private long lastReadPosition;
+ private Path lastReadPath;
private WALEntryFilter filter;
private long sleepForRetries;
//Indicates whether this particular worker is running
@@ -81,8 +80,6 @@ public class ReplicationSourceWALReaderThread extends Thread {
private AtomicLong totalBufferUsed;
private long totalBufferQuota;
- private ReplicationSourceManager replicationSourceManager;
-
/**
* Creates a reader worker for a given WAL queue. Reads WAL entries off a given queue, batches the
* entries, and puts them on a batch queue.
@@ -101,7 +98,8 @@ public ReplicationSourceWALReaderThread(ReplicationSourceManager manager,
FileSystem fs, Configuration conf, WALEntryFilter filter, MetricsSource metrics) {
this.replicationQueueInfo = replicationQueueInfo;
this.logQueue = logQueue;
- this.currentPosition = startPosition;
+ this.lastReadPath = logQueue.peek();
+ this.lastReadPosition = startPosition;
this.fs = fs;
this.conf = conf;
this.filter = filter;
@@ -111,7 +109,6 @@ public ReplicationSourceWALReaderThread(ReplicationSourceManager manager,
// memory used will be batchSizeCapacity * (nb.batches + 1)
// the +1 is for the current thread reading before placing onto the queue
int batchCount = conf.getInt("replication.source.nb.batches", 1);
- this.replicationSourceManager = manager;
this.totalBufferUsed = manager.getTotalBufferUsed();
this.totalBufferQuota = conf.getLong(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY,
HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT);
@@ -133,61 +130,45 @@ public void run() {
int sleepMultiplier = 1;
while (isReaderRunning()) { // we only loop back here if something fatal happened to our stream
try (WALEntryStream entryStream =
- new WALEntryStream(logQueue, fs, conf, currentPosition, metrics)) {
+ new WALEntryStream(logQueue, fs, conf, lastReadPosition, metrics)) {
while (isReaderRunning()) { // loop here to keep reusing stream while we can
if (!checkQuota()) {
continue;
}
- WALEntryBatch batch = null;
- while (entryStream.hasNext()) {
- if (batch == null) {
- batch = new WALEntryBatch(replicationBatchCountCapacity, entryStream.getCurrentPath());
- }
+ WALEntryBatch batch = new WALEntryBatch(replicationBatchCountCapacity);
+ boolean hasNext;
+ while ((hasNext = entryStream.hasNext()) == true) {
Entry entry = entryStream.next();
entry = filterEntry(entry);
if (entry != null) {
WALEdit edit = entry.getEdit();
if (edit != null && !edit.isEmpty()) {
long entrySize = getEntrySizeIncludeBulkLoad(entry);
- long entrySizeExlucdeBulkLoad = getEntrySizeExcludeBulkLoad(entry);
+ long entrySizeExcludeBulkLoad = getEntrySizeExcludeBulkLoad(entry);
batch.addEntry(entry);
- replicationSourceManager.setPendingShipment(true);
updateBatchStats(batch, entry, entryStream.getPosition(), entrySize);
- boolean totalBufferTooLarge = acquireBufferQuota(entrySizeExlucdeBulkLoad);
+ boolean totalBufferTooLarge = acquireBufferQuota(entrySizeExcludeBulkLoad);
// Stop if too many entries or too big
if (totalBufferTooLarge || batch.getHeapSize() >= replicationBatchSizeCapacity
|| batch.getNbEntries() >= replicationBatchCountCapacity) {
break;
}
}
- } else {
- replicationSourceManager.logPositionAndCleanOldLogs(entryStream.getCurrentPath(),
- this.replicationQueueInfo.getPeerClusterZnode(),
- entryStream.getPosition(),
- this.replicationQueueInfo.isQueueRecovered(), false);
}
}
- if (batch != null && (!batch.getLastSeqIds().isEmpty() || batch.getNbEntries() > 0)) {
- if (LOG.isTraceEnabled()) {
- LOG.trace(String.format("Read %s WAL entries eligible for replication",
- batch.getNbEntries()));
- }
- entryBatchQueue.put(batch);
+
+ updateBatch(entryStream, batch, hasNext);
+ if (isShippable(batch)) {
sleepMultiplier = 1;
- } else { // got no entries and didn't advance position in WAL
- LOG.trace("Didn't read any new entries from WAL");
- if (replicationQueueInfo.isQueueRecovered()) {
- // we're done with queue recovery, shut ourself down
+ entryBatchQueue.put(batch);
+ if (!batch.hasMoreEntries()) {
+ // we're done with queue recovery, shut ourselves down
setReaderRunning(false);
- // shuts down shipper thread immediately
- entryBatchQueue.put(batch != null ? batch
- : new WALEntryBatch(replicationBatchCountCapacity, entryStream.getCurrentPath()));
- } else {
- Thread.sleep(sleepForRetries);
}
+ } else {
+ Thread.sleep(sleepForRetries);
}
- currentPosition = entryStream.getPosition();
- entryStream.reset(); // reuse stream
+ resetStream(entryStream);
}
} catch (IOException | WALEntryStreamRuntimeException e) { // stream related
if (sleepMultiplier < maxRetriesMultiplier) {
@@ -205,6 +186,38 @@ public void run() {
}
}
+ private void updateBatch(WALEntryStream entryStream, WALEntryBatch batch, boolean moreData) {
+ logMessage(batch);
+ batch.updatePosition(entryStream);
+ batch.setMoreEntries(!replicationQueueInfo.isQueueRecovered() || moreData);
+ }
+
+ private void logMessage(WALEntryBatch batch) {
+ if (LOG.isTraceEnabled()) {
+ if (batch.isEmpty()) {
+ LOG.trace("Didn't read any new entries from WAL");
+ } else {
+ LOG.trace(String.format("Read %s WAL entries eligible for replication",
+ batch.getNbEntries()));
+ }
+ }
+ }
+
+ private boolean isShippable(WALEntryBatch batch) {
+ return !batch.isEmpty() || checkIfWALRolled(batch) || !batch.hasMoreEntries();
+ }
+
+ private boolean checkIfWALRolled(WALEntryBatch batch) {
+ return lastReadPath == null && batch.lastWalPath != null
+ || lastReadPath != null && !lastReadPath.equals(batch.lastWalPath);
+ }
+
+ private void resetStream(WALEntryStream stream) throws IOException {
+ lastReadPosition = stream.getPosition();
+ lastReadPath = stream.getCurrentPath();
+ stream.reset(); // reuse stream
+ }
+
// if we get an EOF due to a zero-length log, and there are other logs in queue
// (highly likely we've closed the current log), we've hit the max retries, and autorecovery is
// enabled, then dump the log
@@ -214,8 +227,8 @@ private void handleEofException(Exception e) {
try {
if (fs.getFileStatus(logQueue.peek()).getLen() == 0) {
LOG.warn("Forcing removal of 0 length log in queue: " + logQueue.peek());
- logQueue.remove();
- currentPosition = 0;
+ lastReadPath = logQueue.remove();
+ lastReadPosition = 0;
}
} catch (IOException ioe) {
LOG.warn("Couldn't get file length information about log " + logQueue.peek());
@@ -224,12 +237,6 @@ private void handleEofException(Exception e) {
}
public Path getCurrentPath() {
- // if we've read some WAL entries, get the Path we read from
- WALEntryBatch batchQueueHead = entryBatchQueue.peek();
- if (batchQueueHead != null) {
- return batchQueueHead.lastWalPath;
- }
- // otherwise, we must be currently reading from the head of the log queue
return logQueue.peek();
}
@@ -380,6 +387,10 @@ public void setReaderRunning(boolean readerRunning) {
this.isReaderRunning = readerRunning;
}
+ public long getLastReadPosition() {
+ return this.lastReadPosition;
+ }
+
/**
* Holds a batch of WAL entries to replicate, along with some statistics
*
@@ -396,17 +407,14 @@ static class WALEntryBatch {
private int nbHFiles = 0;
// heap size of data we need to replicate
private long heapSize = 0;
- // save the last sequenceid for each region if the table has serial-replication scope
- private Map lastSeqIds = new HashMap<>();
+ // whether more entries to read exist in WALs or not
+ private boolean moreEntries = true;
/**
- * @param walEntries
- * @param lastWalPath Path of the WAL the last entry in this batch was read from
- * @param lastWalPosition Position in the WAL the last entry in this batch was read from
+ * @param maxNbEntries the number of entries a batch can have
*/
- private WALEntryBatch(int maxNbEntries, Path lastWalPath) {
+ private WALEntryBatch(int maxNbEntries) {
this.walEntries = new ArrayList<>(maxNbEntries);
- this.lastWalPath = lastWalPath;
}
public void addEntry(Entry entry) {
@@ -466,13 +474,6 @@ public long getHeapSize() {
return heapSize;
}
- /**
- * @return the last sequenceid for each region if the table has serial-replication scope
- */
- public Map getLastSeqIds() {
- return lastSeqIds;
- }
-
private void incrementNbRowKeys(int increment) {
nbRowKeys += increment;
}
@@ -484,5 +485,22 @@ private void incrementNbHFiles(int increment) {
private void incrementHeapSize(long increment) {
heapSize += increment;
}
+
+ public boolean isEmpty() {
+ return walEntries.isEmpty();
+ }
+
+ public void updatePosition(WALEntryStream entryStream) {
+ lastWalPath = entryStream.getCurrentPath();
+ lastWalPosition = entryStream.getPosition();
+ }
+
+ public boolean hasMoreEntries() {
+ return moreEntries;
+ }
+
+ public void setMoreEntries(boolean moreEntries) {
+ this.moreEntries = moreEntries;
+ }
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java
index 95e974933b82..8663503571a9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java
@@ -20,6 +20,8 @@
package org.apache.hadoop.hbase.util;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
@@ -72,7 +74,8 @@ public LossyCounting(double errorRate, String name, LossyCountingListener listen
this.data = new ConcurrentHashMap<>();
this.listener = listener;
calculateCurrentTerm();
- executor = Executors.newSingleThreadExecutor();
+ executor = Executors.newSingleThreadExecutor(
+ new ThreadFactoryBuilder().setDaemon(true).setNameFormat("lossy-count-%d").build());
}
public LossyCounting(String name, LossyCountingListener listener) {
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 93fd3a81679a..3d5e2303bc8b 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -159,10 +159,7 @@
if ( fqtn != null ) {
table = new HTable(conf, fqtn);
if (table.getTableDescriptor().getRegionReplication() > 1) {
- tableHeader = "Table Regions
NameRegion Server | Start KeyEnd KeyLocality | Requests | ReplicaID |
";
withReplica = true;
- } else {
- tableHeader = "Table Regions
NameRegion Server | Start KeyEnd KeyLocality | Requests |
";
}
if ( !readOnly && action != null ) {
%>
@@ -213,70 +210,168 @@ if ( fqtn != null ) {
<%
if(fqtn.equals(TableName.META_TABLE_NAME.getNameAsString())) {
%>
-<%= tableHeader %>
-
-<%
- // NOTE: Presumes meta with one or more replicas
- for (int j = 0; j < numMetaReplicas; j++) {
- HRegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica(
- HRegionInfo.FIRST_META_REGIONINFO, j);
- ServerName metaLocation = metaTableLocator.waitMetaRegionLocation(master.getZooKeeper(), j, 1);
- for (int i = 0; i < 1; i++) {
- String url = "";
- String readReq = "N/A";
- String writeReq = "N/A";
- String fileSize = "N/A";
- String fileCount = "N/A";
- String memSize = "N/A";
- float locality = 0.0f;
+Table Regions
+
+
+
+
+
+
<%} else {
Admin admin = master.getConnection().getAdmin();
try { %>
@@ -363,6 +458,9 @@ if ( fqtn != null ) {
long totalSize = 0;
long totalStoreFileCount = 0;
long totalMemSize = 0;
+ long totalCompactingKVs = 0;
+ long totalCompactedKVs = 0;
+ String percentDone = "";
String urlRegionServer = null;
Map regDistribution = new TreeMap();
Map primaryRegDistribution = new TreeMap();
@@ -388,6 +486,8 @@ if ( fqtn != null ) {
totalStoreFileCount += regionload.getStorefiles();
totalMemSize += regionload.getMemStoreSizeMB();
totalStoreFileSizeMB += regionload.getStorefileSizeMB();
+ totalCompactingKVs += regionload.getTotalCompactingKVs();
+ totalCompactedKVs += regionload.getCurrentCompactedKVs();
} else {
RegionLoad load0 = new RegionLoad(ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder().setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build());
regionsToLoad.put(regionInfo, load0);
@@ -401,121 +501,209 @@ if ( fqtn != null ) {
regionsToLoad.put(regionInfo, load0);
}
}
+ if (totalCompactingKVs > 0) {
+ percentDone = String.format("%.2f", 100 *
+ ((float) totalCompactedKVs / totalCompactingKVs)) + "%";
+ }
if(regions != null && regions.size() > 0) { %>
Table Regions
-
-
-
-Name(<%= String.format("%,1d", regions.size())%>) |
-Region Server |
-ReadRequests (<%= String.format("%,1d", totalReadReq)%>) |
-WriteRequests (<%= String.format("%,1d", totalWriteReq)%>) |
-StorefileSize (<%= StringUtils.byteDesc(totalSize*1024l*1024)%>) |
-Num.Storefiles (<%= String.format("%,1d", totalStoreFileCount)%>) |
-MemSize (<%= StringUtils.byteDesc(totalMemSize*1024l*1024)%>) |
-Locality |
-Start Key |
-End Key |
-<%
- if (withReplica) {
-%>
-ReplicaID |
-<%
- }
-%>
-
-
-
+
+
+
+
+
+
+
+ Name(<%= String.format("%,1d", regions.size())%>) |
+ Region Server |
+ ReadRequests (<%= String.format("%,1d", totalReadReq)%>) |
+ WriteRequests (<%= String.format("%,1d", totalWriteReq)%>) |
+ StorefileSize (<%= StringUtils.byteDesc(totalSize*1024l*1024)%>) |
+ Num.Storefiles (<%= String.format("%,1d", totalStoreFileCount)%>) |
+ MemSize (<%= StringUtils.byteDesc(totalMemSize*1024l*1024)%>) |
+ Locality |
+ Start Key |
+ End Key |
+ <%
+ if (withReplica) {
+ %>
+ ReplicaID |
+ <%
+ }
+ %>
+
+
+
+ <%
+ List> entryList = new ArrayList>(regionsToLoad.entrySet());
+ numRegions = regions.size();
+ int numRegionsRendered = 0;
+ // render all regions
+ if (numRegionsToRender < 0) {
+ numRegionsToRender = numRegions;
+ }
+ for (Map.Entry hriEntry : entryList) {
+ HRegionInfo regionInfo = hriEntry.getKey();
+ ServerName addr = regions.get(regionInfo);
+ RegionLoad load = hriEntry.getValue();
+ String readReq = "N/A";
+ String writeReq = "N/A";
+ String regionSize = "N/A";
+ String fileCount = "N/A";
+ String memSize = "N/A";
+ float locality = 0.0f;
+ if(load != null) {
+ readReq = String.format("%,1d", load.getReadRequestsCount());
+ writeReq = String.format("%,1d", load.getWriteRequestsCount());
+ regionSize = StringUtils.byteDesc(load.getStorefileSizeMB()*1024l*1024);
+ fileCount = String.format("%,1d", load.getStorefiles());
+ memSize = StringUtils.byteDesc(load.getMemStoreSizeMB()*1024l*1024);
+ locality = load.getDataLocality();
+ }
-<%
- List> entryList = new ArrayList>(regionsToLoad.entrySet());
- numRegions = regions.size();
- int numRegionsRendered = 0;
- // render all regions
- if (numRegionsToRender < 0) {
- numRegionsToRender = numRegions;
- }
- for (Map.Entry hriEntry : entryList) {
- HRegionInfo regionInfo = hriEntry.getKey();
- ServerName addr = regions.get(regionInfo);
- RegionLoad load = hriEntry.getValue();
- String readReq = "N/A";
- String writeReq = "N/A";
- String regionSize = "N/A";
- String fileCount = "N/A";
- String memSize = "N/A";
- float locality = 0.0f;
- if(load != null) {
- readReq = String.format("%,1d", load.getReadRequestsCount());
- writeReq = String.format("%,1d", load.getWriteRequestsCount());
- regionSize = StringUtils.byteDesc(load.getStorefileSizeMB()*1024l*1024);
- fileCount = String.format("%,1d", load.getStorefiles());
- memSize = StringUtils.byteDesc(load.getMemStoreSizeMB()*1024l*1024);
- locality = load.getDataLocality();
- }
+ if (addr != null) {
+ ServerLoad sl = master.getServerManager().getLoad(addr);
+ // This port might be wrong if RS actually ended up using something else.
+ urlRegionServer =
+ "//" + URLEncoder.encode(addr.getHostname()) + ":" + master.getRegionServerInfoPort(addr) + "/";
+ if(sl != null) {
+ Integer i = regDistribution.get(addr);
+ if (null == i) i = Integer.valueOf(0);
+ regDistribution.put(addr, i + 1);
+ if (withReplica && RegionReplicaUtil.isDefaultReplica(regionInfo.getReplicaId())) {
+ i = primaryRegDistribution.get(addr);
+ if (null == i) i = Integer.valueOf(0);
+ primaryRegDistribution.put(addr, i+1);
+ }
+ }
+ }
+ if (numRegionsRendered < numRegionsToRender) {
+ numRegionsRendered++;
+ %>
+
+ <%= escapeXml(Bytes.toStringBinary(regionInfo.getRegionName())) %> |
+ <%
+ if (urlRegionServer != null) {
+ %>
+
+ <%= StringEscapeUtils.escapeHtml(addr.getHostname().toString()) + ":" + master.getRegionServerInfoPort(addr) %>
+ |
+ <%
+ } else {
+ %>
+ not deployed |
+ <%
+ }
+ %>
+ <%= readReq%> |
+ <%= writeReq%> |
+ <%= regionSize%> |
+ <%= fileCount%> |
+ <%= memSize%> |
+ <%= locality%> |
+ <%= escapeXml(Bytes.toStringBinary(regionInfo.getStartKey()))%> |
+ <%= escapeXml(Bytes.toStringBinary(regionInfo.getEndKey()))%> |
+ <%
+ if (withReplica) {
+ %>
+ <%= regionInfo.getReplicaId() %> |
+ <%
+ }
+ %>
+
+ <% } %>
+ <% } %>
+
+
+ <% if (numRegions > numRegionsRendered) {
+ String allRegionsUrl = "?name=" + URLEncoder.encode(fqtn,"UTF-8") + "&numRegions=all";
+ %>
+
This table has <%= numRegions %> regions in total, in order to improve the page load time,
+ only <%= numRegionsRendered %> regions are displayed here, click
+ here to see all regions.
+ <% } %>
+
+
+
+
+
+ Name(<%= String.format("%,1d", regions.size())%>) |
+ Region Server |
+ Num. Compacting KVs (<%= String.format("%,1d", totalCompactingKVs)%>) |
+ Num. Compacted KVs (<%= String.format("%,1d", totalCompactedKVs)%>) |
+ Remaining KVs (<%= String.format("%,1d", totalCompactingKVs - totalCompactedKVs)%>) |
+ Compaction Progress (<%= percentDone %>) |
+
+
+
+ <%
+ numRegionsRendered = 0;
+ for (Map.Entry hriEntry : entryList) {
+ HRegionInfo regionInfo = hriEntry.getKey();
+ ServerName addr = regions.get(regionInfo);
+ RegionLoad load = hriEntry.getValue();
+ long compactingKVs = 0;
+ long compactedKVs = 0;
+ String compactionProgress = "";
+ if(load != null) {
+ compactingKVs = load.getTotalCompactingKVs();
+ compactedKVs = load.getCurrentCompactedKVs();
+ if (compactingKVs > 0) {
+ compactionProgress = String.format("%.2f", 100 * ((float)
+ compactedKVs / compactingKVs)) + "%";
+ }
+ }
- if (addr != null) {
- ServerLoad sl = master.getServerManager().getLoad(addr);
- // This port might be wrong if RS actually ended up using something else.
- urlRegionServer =
- "//" + URLEncoder.encode(addr.getHostname()) + ":" + master.getRegionServerInfoPort(addr) + "/";
- if(sl != null) {
- Integer i = regDistribution.get(addr);
- if (null == i) i = Integer.valueOf(0);
- regDistribution.put(addr, i + 1);
- if (withReplica && RegionReplicaUtil.isDefaultReplica(regionInfo.getReplicaId())) {
- i = primaryRegDistribution.get(addr);
- if (null == i) i = Integer.valueOf(0);
- primaryRegDistribution.put(addr, i+1);
- }
- }
- }
- if (numRegionsRendered < numRegionsToRender) {
- numRegionsRendered++;
-%>
-
- <%= escapeXml(Bytes.toStringBinary(regionInfo.getRegionName())) %> |
- <%
- if (urlRegionServer != null) {
- %>
-
- <%= StringEscapeUtils.escapeHtml(addr.getHostname().toString()) + ":" + master.getRegionServerInfoPort(addr) %>
- |
- <%
- } else {
- %>
- not deployed |
- <%
- }
- %>
- <%= readReq%> |
- <%= writeReq%> |
- <%= regionSize%> |
- <%= fileCount%> |
- <%= memSize%> |
- <%= locality%> |
- <%= escapeXml(Bytes.toStringBinary(regionInfo.getStartKey()))%> |
- <%= escapeXml(Bytes.toStringBinary(regionInfo.getEndKey()))%> |
- <%
- if (withReplica) {
- %>
- <%= regionInfo.getReplicaId() %> |
- <%
- }
- %>
-
-<% } %>
-<% } %>
-
-
-<% if (numRegions > numRegionsRendered) {
- String allRegionsUrl = "?name=" + URLEncoder.encode(fqtn,"UTF-8") + "&numRegions=all";
-%>
-
This table has <%= numRegions %> regions in total, in order to improve the page load time,
- only <%= numRegionsRendered %> regions are displayed here, click
- here to see all regions.
-<% } %>
+ if (addr != null) {
+ // This port might be wrong if RS actually ended up using something else.
+ urlRegionServer =
+ "//" + URLEncoder.encode(addr.getHostname()) + ":" + master.getRegionServerInfoPort(addr) + "/";
+ }
+ if (numRegionsRendered < numRegionsToRender) {
+ numRegionsRendered++;
+ %>
+
+ <%= escapeXml(Bytes.toStringBinary(regionInfo.getRegionName())) %> |
+ <%
+ if (urlRegionServer != null) {
+ %>
+
+ <%= StringEscapeUtils.escapeHtml(addr.getHostname().toString()) + ":" + master.getRegionServerInfoPort(addr) %>
+ |
+ <%
+ } else {
+ %>
+ not deployed |
+ <%
+ }
+ %>
+ <%= String.format("%,1d", compactingKVs)%> |
+ <%= String.format("%,1d", compactedKVs)%> |
+ <%= String.format("%,1d", compactingKVs - compactedKVs)%> |
+ <%= compactionProgress%> |
+
+ <% } %>
+ <% } %>
+
+
+ <% if (numRegions > numRegionsRendered) {
+ String allRegionsUrl = "?name=" + URLEncoder.encode(fqtn,"UTF-8") + "&numRegions=all";
+ %>
+ This table has <%= numRegions %> regions in total, in order to improve the page load time,
+ only <%= numRegionsRendered %> regions are displayed here, click
+ here to see all regions.
+ <% } %>
+
+
+
Regions by Region Server
<%
if (withReplica) {
@@ -652,6 +840,8 @@ $(document).ready(function()
$("#regionServerTable").tablesorter();
$("#regionServerDetailsTable").tablesorter();
$("#tableRegionTable").tablesorter();
+ $("#tableCompactStatsTable").tablesorter();
+ $("#metaTableCompactStatsTable").tablesorter();
}
);
diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
index 05d8783ed406..0b7117c5535f 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
@@ -21,16 +21,18 @@
import="java.io.ByteArrayOutputStream"
import="java.io.PrintStream"
import="org.apache.hadoop.conf.Configuration"
+ import="org.apache.hadoop.fs.FileSystem"
import="org.apache.hadoop.fs.Path"
import="org.apache.hadoop.hbase.HBaseConfiguration"
import="org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter"
import="org.apache.hadoop.hbase.regionserver.HRegionServer"
- import="org.apache.hadoop.hbase.regionserver.StoreFile"
+ import="org.apache.hadoop.hbase.regionserver.StoreFileInfo"
%>
<%
String storeFile = request.getParameter("name");
HRegionServer rs = (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER);
Configuration conf = rs.getConfiguration();
+ FileSystem fs = FileSystem.get(conf);
%>
+
+ apache-release
+
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ 1.6.8
+ true
+
+ https://repository.apache.org/
+ apache.releases.https
+
+
+
+
+
release