Skip to content

Commit

Permalink
Pass (possibly) merged object into get_ops_for_entry_action, to ensur…
Browse files Browse the repository at this point in the history
…e we don't possibly generate siblings in Solr, especially for CRDTs.

C.f., #712

Fixes back port of yz_crdt test from develop-2.2 branch.
  • Loading branch information
fadushin committed Dec 2, 2016
1 parent 782f0dd commit 3d95371
Show file tree
Hide file tree
Showing 8 changed files with 9 additions and 711 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,5 @@ dialyzer_warnings
dialyzer_unhandled_warnings
/.eqc-info
/current_counterexample.eqc
.idea/**
*.iml
20 changes: 1 addition & 19 deletions riak_test/yz_aae_test.erl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
-define(BUCKET, ?INDEX1).
-define(REPAIR_MFA, {yz_exchange_fsm, repair, 2}).
-define(SPACER, "testfor spaces ").
-define(AAE_THROTTLE_LIMITS, [{-1, 0}, {10000, 10}]).
-define(CFG, [
{riak_core, [
{ring_creation_size, 16},
Expand All @@ -34,14 +33,12 @@
{anti_entropy_tick, 1000},
%% allow AAE to build trees and exchange rapidly
{anti_entropy_build_limit, {100, 1000}},
{anti_entropy_concurrency, 8},
{aae_throttle_limits, ?AAE_THROTTLE_LIMITS}
{anti_entropy_concurrency, 8}
]}
]).

confirm() ->
Cluster = rt:build_cluster(5, ?CFG),
verify_throttle_config(Cluster),
yz_rt:setup_drain_intercepts(Cluster),

%% Run test for `default'/legacy bucket type
Expand All @@ -52,21 +49,6 @@ confirm() ->
lager:info("Run test for custom bucket type"),
aae_run(Cluster, ?BUCKETWITHTYPE, ?INDEX2).

verify_throttle_config(Cluster) ->
lists:foreach(
fun(Node) ->
?assert(rpc:call(Node,
riak_core_throttle,
is_throttle_enabled,
[?YZ_APP_NAME, ?YZ_ENTROPY_THROTTLE_KEY])),
?assertMatch(?AAE_THROTTLE_LIMITS,
rpc:call(Node,
riak_core_throttle,
get_limits,
[?YZ_APP_NAME, ?YZ_ENTROPY_THROTTLE_KEY]))
end,
Cluster).

-spec aae_run([node()], bucket(), index_name()) -> pass | fail.
aae_run(Cluster, Bucket, Index) ->
case yz_rt:bb_driver_setup() of
Expand Down
32 changes: 0 additions & 32 deletions riak_test/yz_dt_test.erl
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,10 @@
{yokozuna, [{enabled, true}]}]).
-define(COUNTER, <<"counters">>).
-define(SET, <<"sets">>).
-define(HLL, <<"hlls">>).
-define(MAP, <<"maps">>).
-define(TYPES,
[{?COUNTER, counter},
{?SET, set},
{?HLL, hll},
{?MAP, map}]).

-import(yz_rt, [create_index/2,
Expand All @@ -39,13 +37,11 @@ confirm() ->
%% Update some datatypes
counter_update(PB),
set_update(PB),
hll_update(PB),
map_update(PB),

%% Search the index for the types
counter_search(ANode),
set_search(ANode),
hll_search(ANode),
map_search(ANode),

pass.
Expand Down Expand Up @@ -75,34 +71,6 @@ set_search(Node) ->
?assertSearch(Node, ?SET, "set", "Voldemort", 1),
?assertSearch(Node, ?SET, "set", "C*", 2).

hll_update(PB) ->
Dynamos = lists:foldl(fun riakc_hll:add_element/2, riakc_hll:new(),
[<<"Riak">>, <<"Cassandra">>, <<"Voldemort">>,
<<"Couchbase">>]),
Erlangs = riakc_hll:add_elements([<<"Riak">>,
<<"Couchbase">>,
<<"CouchDB">>],
riakc_hll:new()),
Bucket = {?HLL, <<"databass">>},
?assertEqual(ok,
riakc_pb_socket:update_type(
PB, Bucket, <<"dynamo">>, riakc_hll:to_op(Dynamos))),
?assertEqual(ok, riakc_pb_socket:update_type(
PB, Bucket, <<"erlang">>, riakc_hll:to_op(Erlangs))),
{ok, CheckDynamo} = riakc_pb_socket:fetch_type(PB, Bucket, <<"dynamo">>),
?assertEqual(riakc_hll:value(CheckDynamo), 4),
{ok, CheckErlang} = riakc_pb_socket:fetch_type(PB, Bucket, <<"erlang">>),
?assertEqual(riakc_hll:value(CheckErlang), 3).

hll_search(Node) ->
?assertSearch(Node, ?HLL, "hll", "3", 1),
?assertSearch(Node, ?HLL, "hll", "4", 1),
?assertSearch(Node, ?HLL, "hll", "2", 0),
?assertSearch(Node, ?HLL, "hll", "[0 TO 3]", 1),
?assertSearch(Node, ?HLL, "hll", "[4 TO 1000]", 1),
?assertSearch(Node, ?HLL, "hll", "[5 TO 1000]", 0),
?assertSearch(Node, ?HLL, "hll", "[0 TO 2]", 0).

map_update(PB) ->
Sam = lists:foldl(fun({Key, Fun}, Map) ->
riakc_map:update(Key, Fun, Map)
Expand Down
102 changes: 0 additions & 102 deletions riak_test/yz_faceted_search.erl
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ confirm() ->
put_restaurants(Cluster, ?BUCKET),
verify_field_faceting(Cluster, ?INDEX),
verify_query_faceting(Cluster, ?INDEX),
verify_pivot_faceting(Cluster, ?INDEX),
pass.

-define(RESTAURANTS,
Expand Down Expand Up @@ -151,104 +150,3 @@ verify_query_facets(HP, Index, Params) ->
?assertMatch({_Key, 3}, lists:keyfind(?PRICE_RANGE_1, 1, FacetQueries)),
?assertMatch({_Key, 1}, lists:keyfind(?PRICE_RANGE_2, 1, FacetQueries)),
ok.

%%
%% Pivot faceting is new in Solr 4.10. This is not an exhaustive test of
%% pivot faceting, but just ensures that basic functionality is accessible
%% via the HTTP interface.
%%
%% Example return value:
%%
%% "facet_counts":{
%% "facet_queries":{},
%% "facet_fields":{
%% "state":["Ohio",4]},
%% "facet_dates":{},
%% "facet_ranges":{},
%% "facet_intervals":{},
%% "facet_pivot":{
%% "state,city":[{
%% "field":"state",
%% "value":"Ohio",
%% "count":4,
%% "pivot":[{
%% "field":"city",
%% "value":"Cincinnati",
%% "count":3},
%% {
%% "field":"city",
%% "value":"Columbus",
%% "count":1}]},
%% {
%% "field":"state",
%% "value":"Kentucky",
%% "count":1,
%% "pivot":[{
%% "field":"city",
%% "value":"Covington",
%% "count":1}]}]}}}
%%
verify_pivot_faceting(Cluster, Index) ->
HP = yz_rt:host_port(Cluster),
FacetPivots = "state,city",
Params = [{facet, true}, {'facet.field', state}, {'facet.mincount', 3}, {'facet.pivot', FacetPivots}],
lager:info("Pivot faceting: ~p, ~p, ~p", [HP, Index, Params]),
{ok, "200", _Hdr, Res} = yz_rt:search(HP, Index, "name", "*", Params),
Struct = mochijson2:decode(Res),
lager:debug("Pivot faceting results: ~p", [Struct]),

NumFound = kvc:path([<<"response">>, <<"numFound">>], Struct),
?assertEqual(5, NumFound),
StateCounts = kvc:path(
[<<"facet_counts">>, <<"facet_fields">>, <<"state">>],
Struct),
%% We expect to see all 4 Ohio restaurants in the search results, but none
%% of the Kentucky restaurants because there is only one in the test data
%% set, and facet.mincount is set to 3.
?assertEqual([<<"Ohio">>, 4], StateCounts),
%%
%% Verify the pivot results (only by state,city)
%%
StatePivots = kvc:path(
[<<"facet_counts">>, <<"facet_pivot">>, list_to_binary(FacetPivots)],
Struct
),
?assertEqual(2, length(StatePivots)),
lists:foreach(
fun verify_state_pivot/1,
StatePivots
).


verify_state_pivot(Pivot) ->
verify_state_pivot(kvc:path(value, Pivot), Pivot).

verify_state_pivot(<<"Ohio">>, StatePivot) ->
verify_pivot_count(4, StatePivot),
CityPivots = kvc:path(pivot, StatePivot),
?assertEqual(2, length(CityPivots)),
lists:foreach(
fun verify_city_pivot/1,
CityPivots
);
verify_state_pivot(<<"Kentucky">>, StatePivot) ->
verify_pivot_count(1, StatePivot),
CityPivots = kvc:path(pivot, StatePivot),
?assertEqual(1, length(CityPivots)),
lists:foreach(
fun verify_city_pivot/1,
CityPivots
).

verify_city_pivot(Pivot) ->
verify_city_pivot(kvc:path(value, Pivot), Pivot).

verify_city_pivot(<<"Cincinnati">>, Pivot) ->
verify_pivot_count(3, Pivot);
verify_city_pivot(<<"Columbus">>, Pivot) ->
verify_pivot_count(1, Pivot);
verify_city_pivot(<<"Covington">>, Pivot) ->
verify_pivot_count(1, Pivot).

verify_pivot_count(ExpectedCount, Pivot) ->
?assertEqual(ExpectedCount, kvc:path(count, Pivot)).
70 changes: 0 additions & 70 deletions riak_test/yz_pb.erl
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ confirm() ->
confirm_admin_index(Cluster),
confirm_admin_bad_index_name(Cluster),
confirm_basic_search(Cluster),
confirm_w1c_search(Cluster),
confirm_fl_search_without_score(Cluster),
confirm_fl_search_without_score_without_sort(Cluster),
confirm_encoded_search(Cluster),
Expand All @@ -52,8 +51,6 @@ confirm() ->
confirm_stored_fields(Cluster),
confirm_search_non_existent_index(Cluster),
confirm_search_with_spaced_key(Cluster),
confirm_create_index_within_timeout(Cluster),
confirm_create_index_not_within_timeout(Cluster),
pass.

select_random(List) ->
Expand Down Expand Up @@ -205,15 +202,6 @@ confirm_basic_search(Cluster) ->
Params = [{sort, <<"score desc">>}, {fl, ["*","score"]}],
store_and_search(Cluster, Bucket, "test", Body, <<"text:herp">>, Params).

confirm_w1c_search(Cluster) ->
Index = <<"write_once">>,
Bucket = {Index, <<"b1">>},
create_index(Cluster, Index, Index, [{write_once, true}]),
lager:info("confirm_basic_search ~p", [Bucket]),
Body = "herp derp",
Params = [{sort, <<"score desc">>}, {fl, ["*","score"]}],
store_and_search(Cluster, Bucket, "test", Body, <<"text:herp">>, Params).

confirm_fl_search_without_score(Cluster) ->
Index = <<"fl_search_without_score">>,
Bucket = {Index, <<"b1">>},
Expand Down Expand Up @@ -405,61 +393,3 @@ confirm_search_with_spaced_key(Cluster) ->
Params = [{sort, <<"age_i asc">>}],
store_and_search(Cluster, Bucket, Key,
Body, "application/json", <<"foo_i:5">>, Params).

confirm_create_index_within_timeout(Cluster) ->
Index = <<"index_within_timeout">>,
Index1 = <<"index_within_infinity">>,
Bucket = {Index, <<"b1">>},
Node = select_random(Cluster),
[{Host, Port}] = host_entries(rt:connection_info([Node])),
{ok, Pid} = riakc_pb_socket:start_link(Host, (Port-1)),
lager:info("confirm_search_to_test_index_within_timeout ~p", [Bucket]),
NvalT = {n_val, 3},
Timeout = {timeout, 25000},
Timeout1 = {timeout, infinity},
?assertEqual(ok,
riakc_pb_socket:create_search_index(Pid, Index, [NvalT, Timeout])),
?assertEqual(ok, element(1, riakc_pb_socket:get_search_index(Pid, Index, []))),

lager:info("confirm_search_to_test_index_within_infinity ~p", [Bucket]),
?assertEqual(ok,
riakc_pb_socket:create_search_index(Pid, Index1, [NvalT, Timeout1])),
?assertEqual(ok,
element(1, riakc_pb_socket:get_search_index(Pid, Index1, []))),

riakc_pb_socket:stop(Pid).

confirm_create_index_not_within_timeout(Cluster) ->
Index = <<"index_not_within_timeout">>,
Bucket = {Index, <<"b1">>},
lager:info("confirm_search_to_test_index_not_within_timeout ~p", [Bucket]),
Node = select_random(Cluster),
[{Host, Port}] = host_entries(rt:connection_info([Node])),
{ok, Pid} = riakc_pb_socket:start_link(Host, (Port-1)),
riakc_pb_socket:set_options(Pid, [queue_if_disconnected]),

SchemaName = ?YZ_DEFAULT_SCHEMA_NAME,
%% Test invalid n_val
NValT = {n_val, bbbbbb},
%% Test invalid timeout value
Timeout = {timeout, asdasdasd},

NValT1 = {n_val, 3},
Timeout1 = {timeout, 10},

?assertError(badarg, riakc_pb_socket:create_search_index(Pid, Index,
SchemaName,
[NValT1, Timeout])),
?assertError(badarg, riakc_pb_socket:create_search_index(Pid, Index,
SchemaName,
[NValT, Timeout1])),

{error, <<"Index index_not_within_timeout not created on all the nodes within 10 ms timeout\n">>} =
riakc_pb_socket:create_search_index(
Pid, Index, SchemaName,
[NValT1, Timeout1]),

ok = yz_rt:wait_for_index(Cluster, Index),
?assertEqual(ok,
element(1, riakc_pb_socket:get_search_index(Pid, Index, []))),
riakc_pb_socket:stop(Pid).
11 changes: 4 additions & 7 deletions riak_test/yz_rt.erl
Original file line number Diff line number Diff line change
Expand Up @@ -863,21 +863,18 @@ load_intercept_code(Node) ->
"*.erl"]),
rt_intercept:load_code(Node, [CodePath]).

rolling_upgrade(Cluster, Version, UpgradeConfig, WaitForServices) ->
rolling_upgrade(Cluster, Version, UpgradeConfig, WaitForServices, fun rt:no_op/1).

-spec rolling_upgrade(cluster() | node(),
current | previous | legacy,
UpgradeConfig :: props(),
WaitForServices :: [atom()]) -> ok.
rolling_upgrade(Cluster, Version, UpgradeConfig, WaitForServices, UpgradeCallabck)
rolling_upgrade(Cluster, Version, UpgradeConfig, WaitForServices)
when is_list(Cluster) ->
lager:info("Perform rolling upgrade on cluster ~p", [Cluster]),
[rolling_upgrade(Node, Version, UpgradeConfig, WaitForServices, UpgradeCallabck)
[rolling_upgrade(Node, Version, UpgradeConfig, WaitForServices)
|| Node <- Cluster],
ok;
rolling_upgrade(Node, Version, UpgradeConfig, WaitForServices, UpgradeCallabck) ->
rt:upgrade(Node, Version, UpgradeConfig, UpgradeCallabck),
rolling_upgrade(Node, Version, UpgradeConfig, WaitForServices) ->
rt:upgrade(Node, Version, UpgradeConfig),
[rt:wait_for_service(Node, Service) || Service <- WaitForServices],
ok.

Expand Down
Loading

0 comments on commit 3d95371

Please sign in to comment.