1-module(couch_replicator_small_max_request_size_target).
2
3-include_lib("couch/include/couch_eunit.hrl").
4-include_lib("couch/include/couch_db.hrl").
5
6-import(couch_replicator_test_helper, [
7    db_url/1,
8    replicate/1,
9    compare_dbs/3
10]).
11
12-define(TIMEOUT_EUNIT, 360).
13
14
15setup() ->
16    DbName = ?tempdb(),
17    {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
18    ok = couch_db:close(Db),
19    DbName.
20
21
22setup(remote) ->
23    {remote, setup()};
24
25setup({A, B}) ->
26    Ctx = test_util:start_couch([couch_replicator]),
27    config:set("chttpd", "max_http_request_size", "10000", false),
28    Source = setup(A),
29    Target = setup(B),
30    {Ctx, {Source, Target}}.
31
32
33teardown({remote, DbName}) ->
34    teardown(DbName);
35teardown(DbName) ->
36    ok = couch_server:delete(DbName, [?ADMIN_CTX]),
37    ok.
38
39teardown(_, {Ctx, {Source, Target}}) ->
40    teardown(Source),
41    teardown(Target),
42    ok = application:stop(couch_replicator),
43    ok = test_util:stop_couch(Ctx).
44
45
46reduce_max_request_size_test_() ->
47    Pairs = [{remote, remote}],
48    {
49        "Replicate docs when target has a small max_http_request_size",
50        {
51            foreachx,
52            fun setup/1, fun teardown/2,
53            [{Pair, fun should_replicate_all_docs/2}
54             || Pair <- Pairs]
55            ++ [{Pair, fun should_replicate_one/2}
56             || Pair <- Pairs]
57            % Disabled. See issue 574. Sometimes PUTs with a doc and
58            % attachment which exceed maximum request size are simply
59            % closed instead of returning a 413 request. That makes these
60            % tests flaky.
61            ++ [{Pair, fun should_replicate_one_with_attachment/2}
62             || Pair <- Pairs]
63        }
64    }.
65
66
67% Test documents which are below max_http_request_size but when batched, batch size
68% will be greater than max_http_request_size. Replicator could automatically split
69% the batch into smaller batches and POST those separately.
70should_replicate_all_docs({From, To}, {_Ctx, {Source, Target}}) ->
71    {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
72     {inorder, [should_populate_source(Source),
73                should_replicate(Source, Target),
74                should_compare_databases(Source, Target, [])]}}.
75
76
77% If a document is too large to post as a single request, that document is
78% skipped but replication overall will make progress and not crash.
79should_replicate_one({From, To}, {_Ctx, {Source, Target}}) ->
80    {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
81     {inorder, [should_populate_source_one_large_one_small(Source),
82                should_replicate(Source, Target),
83                should_compare_databases(Source, Target, [<<"doc0">>])]}}.
84
85
86% If a document has an attachment > 64 * 1024 bytes, replicator will switch to
87% POST-ing individual documents directly and skip bulk_docs. Test that case
88% separately
89% See note in main test function why this was disabled.
90should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) ->
91   {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
92    {inorder, [should_populate_source_one_large_attachment(Source),
93               should_populate_source(Source),
94               should_replicate(Source, Target),
95               should_compare_databases(Source, Target, [<<"doc0">>])]}}.
96
97
98should_populate_source({remote, Source}) ->
99    should_populate_source(Source);
100
101should_populate_source(Source) ->
102    {timeout, ?TIMEOUT_EUNIT, ?_test(add_docs(Source, 5, 3000, 0))}.
103
104
105should_populate_source_one_large_one_small({remote, Source}) ->
106    should_populate_source_one_large_one_small(Source);
107
108should_populate_source_one_large_one_small(Source) ->
109    {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_one_small(Source, 12000, 3000))}.
110
111
112should_populate_source_one_large_attachment({remote, Source}) ->
113   should_populate_source_one_large_attachment(Source);
114
115should_populate_source_one_large_attachment(Source) ->
116  {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}.
117
118
119should_replicate({remote, Source}, Target) ->
120    should_replicate(db_url(Source), Target);
121
122should_replicate(Source, {remote, Target}) ->
123    should_replicate(Source, db_url(Target));
124
125should_replicate(Source, Target) ->
126    {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
127
128
129should_compare_databases({remote, Source}, Target, ExceptIds) ->
130    should_compare_databases(Source, Target, ExceptIds);
131
132should_compare_databases(Source, {remote, Target}, ExceptIds) ->
133    should_compare_databases(Source, Target, ExceptIds);
134
135should_compare_databases(Source, Target, ExceptIds) ->
136    {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target, ExceptIds))}.
137
138
139binary_chunk(Size) when is_integer(Size), Size > 0 ->
140    << <<"x">> || _ <- lists:seq(1, Size) >>.
141
142
143add_docs(DbName, DocCount, DocSize, AttSize) ->
144    [begin
145        DocId = iolist_to_binary(["doc", integer_to_list(Id)]),
146        add_doc(DbName, DocId, DocSize, AttSize)
147    end || Id <- lists:seq(1, DocCount)],
148    ok.
149
150
151one_large_one_small(DbName, Large, Small) ->
152    add_doc(DbName, <<"doc0">>, Large, 0),
153    add_doc(DbName, <<"doc1">>, Small, 0).
154
155
156one_large_attachment(DbName, Size, AttSize) ->
157   add_doc(DbName, <<"doc0">>, Size, AttSize).
158
159
160add_doc(DbName, DocId, Size, AttSize) when is_binary(DocId) ->
161     {ok, Db} = couch_db:open_int(DbName, []),
162     Doc0 = #doc{id = DocId, body = {[{<<"x">>, binary_chunk(Size)}]}},
163     Doc = Doc0#doc{atts = atts(AttSize)},
164     {ok, _} = couch_db:update_doc(Db, Doc, []),
165     couch_db:close(Db).
166
167
168atts(0) ->
169    [];
170
171atts(Size) ->
172    [couch_att:new([
173        {name, <<"att1">>},
174        {type, <<"app/binary">>},
175        {att_len, Size},
176        {data, fun(Bytes) -> binary_chunk(Bytes) end}
177    ])].
178
179
180replicate(Source, Target) ->
181    replicate({[
182        {<<"source">>, Source},
183        {<<"target">>, Target},
184        {<<"worker_processes">>, "1"} %  This make batch_size predictable
185    ]}).
186