1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "components/sync/engine_impl/cycle/data_type_tracker.h"
6 
7 #include <algorithm>
8 #include <utility>
9 
10 #include "base/check.h"
11 #include "base/notreached.h"
12 
13 namespace syncer {
14 
15 namespace {
16 
17 #define ENUM_CASE(x) \
18   case x:            \
19     return #x;       \
20     break;
21 
22 }  // namespace
23 
WaitInterval()24 WaitInterval::WaitInterval() : mode(UNKNOWN) {}
25 
WaitInterval(BlockingMode mode,base::TimeDelta length)26 WaitInterval::WaitInterval(BlockingMode mode, base::TimeDelta length)
27     : mode(mode), length(length) {}
28 
~WaitInterval()29 WaitInterval::~WaitInterval() {}
30 
GetModeString(BlockingMode mode)31 const char* WaitInterval::GetModeString(BlockingMode mode) {
32   switch (mode) {
33     ENUM_CASE(UNKNOWN);
34     ENUM_CASE(EXPONENTIAL_BACKOFF);
35     ENUM_CASE(THROTTLED);
36     ENUM_CASE(EXPONENTIAL_BACKOFF_RETRYING);
37   }
38   NOTREACHED();
39   return "";
40 }
41 
42 #undef ENUM_CASE
43 
DataTypeTracker(size_t initial_payload_buffer_size)44 DataTypeTracker::DataTypeTracker(size_t initial_payload_buffer_size)
45     : local_nudge_count_(0),
46       local_refresh_request_count_(0),
47       payload_buffer_size_(initial_payload_buffer_size),
48       initial_sync_required_(false),
49       sync_required_to_resolve_conflict_(false) {}
50 
~DataTypeTracker()51 DataTypeTracker::~DataTypeTracker() {}
52 
RecordLocalChange()53 base::TimeDelta DataTypeTracker::RecordLocalChange() {
54   local_nudge_count_++;
55   return nudge_delay_;
56 }
57 
RecordLocalRefreshRequest()58 void DataTypeTracker::RecordLocalRefreshRequest() {
59   local_refresh_request_count_++;
60 }
61 
RecordRemoteInvalidation(std::unique_ptr<InvalidationInterface> incoming)62 void DataTypeTracker::RecordRemoteInvalidation(
63     std::unique_ptr<InvalidationInterface> incoming) {
64   DCHECK(incoming);
65 
66   // Merge the incoming invalidation into our list of pending invalidations.
67   //
68   // We won't use STL algorithms here because our concept of equality doesn't
69   // quite fit the expectations of set_intersection.  In particular, two
70   // invalidations can be equal according to the SingleObjectInvalidationSet's
71   // rules (ie. have equal versions), but still have different AckHandle values
72   // and need to be acknowledged separately.
73   //
74   // The invalidations service can only track one outsanding invalidation per
75   // type and version, so the acknowledgement here should be redundant.  We'll
76   // acknowledge them anyway since it should do no harm, and makes this code a
77   // bit easier to test.
78   //
79   // Overlaps should be extremely rare for most invalidations.  They can happen
80   // for unknown version invalidations, though.
81 
82   auto it = pending_invalidations_.begin();
83 
84   // Find the lower bound.
85   while (it != pending_invalidations_.end() &&
86          InvalidationInterface::LessThanByVersion(**it, *incoming)) {
87     it++;
88   }
89 
90   if (it != pending_invalidations_.end() &&
91       !InvalidationInterface::LessThanByVersion(*incoming, **it) &&
92       !InvalidationInterface::LessThanByVersion(**it, *incoming)) {
93     // Incoming overlaps with existing.  Either both are unknown versions
94     // (likely) or these two have the same version number (very unlikely).
95     // Acknowledge and overwrite existing.
96 
97     // Insert before the existing and get iterator to inserted.
98     auto it2 = pending_invalidations_.insert(it, std::move(incoming));
99 
100     // Increment that iterator to the old one, then acknowledge and remove it.
101     ++it2;
102     (*it2)->Acknowledge();
103     pending_invalidations_.erase(it2);
104   } else {
105     // The incoming has a version not in the pending_invalidations_ list.
106     // Add it to the list at the proper position.
107     pending_invalidations_.insert(it, std::move(incoming));
108   }
109 
110   // The incoming invalidation may have caused us to exceed our buffer size.
111   // Trim some items from our list, if necessary.
112   while (pending_invalidations_.size() > payload_buffer_size_) {
113     last_dropped_invalidation_ = std::move(pending_invalidations_.front());
114     last_dropped_invalidation_->Drop();
115     pending_invalidations_.erase(pending_invalidations_.begin());
116   }
117 }
118 
RecordInitialSyncRequired()119 void DataTypeTracker::RecordInitialSyncRequired() {
120   initial_sync_required_ = true;
121 }
122 
RecordCommitConflict()123 void DataTypeTracker::RecordCommitConflict() {
124   sync_required_to_resolve_conflict_ = true;
125 }
126 
RecordSuccessfulSyncCycle()127 void DataTypeTracker::RecordSuccessfulSyncCycle() {
128   // If we were blocked, then we would have been excluded from this cycle's
129   // GetUpdates and Commit actions.  Our state remains unchanged.
130   if (IsBlocked()) {
131     return;
132   }
133 
134   // Reset throttling and backoff state.
135   unblock_time_ = base::TimeTicks();
136   wait_interval_.reset();
137 
138   local_nudge_count_ = 0;
139   local_refresh_request_count_ = 0;
140 
141   // TODO(rlarocque): If we want this to be correct even if we should happen to
142   // crash before writing all our state, we should wait until the results of
143   // this sync cycle have been written to disk before updating the invalidations
144   // state.  See crbug.com/324996.
145   for (auto it = pending_invalidations_.begin();
146        it != pending_invalidations_.end(); ++it) {
147     (*it)->Acknowledge();
148   }
149   pending_invalidations_.clear();
150 
151   if (last_dropped_invalidation_) {
152     last_dropped_invalidation_->Acknowledge();
153     last_dropped_invalidation_.reset();
154   }
155 
156   // The initial sync should generally have happened as part of a "configure"
157   // sync cycle, before this method gets called (i.e. after a successful
158   // "normal" sync cycle). However, in some cases the initial sync might not
159   // have happened, e.g. if this one data type got blocked or throttled during
160   // the configure cycle. For those cases, also clear |initial_sync_required_|
161   // here.
162   initial_sync_required_ = false;
163 
164   sync_required_to_resolve_conflict_ = false;
165 }
166 
RecordInitialSyncDone()167 void DataTypeTracker::RecordInitialSyncDone() {
168   // If we were blocked during the initial sync cycle, then the initial sync is
169   // not actually done. Our state remains unchanged.
170   if (IsBlocked()) {
171     return;
172   }
173   initial_sync_required_ = false;
174 }
175 
176 // This limit will take effect on all future invalidations received.
UpdatePayloadBufferSize(size_t new_size)177 void DataTypeTracker::UpdatePayloadBufferSize(size_t new_size) {
178   payload_buffer_size_ = new_size;
179 }
180 
IsSyncRequired() const181 bool DataTypeTracker::IsSyncRequired() const {
182   return !IsBlocked() && (HasLocalChangePending() || IsGetUpdatesRequired());
183 }
184 
IsGetUpdatesRequired() const185 bool DataTypeTracker::IsGetUpdatesRequired() const {
186   // TODO(crbug.com/926184): Maybe this shouldn't check IsInitialSyncRequired():
187   // The initial sync is done in a configuration cycle, while this method
188   // refers to normal cycles.
189   return !IsBlocked() &&
190          (HasRefreshRequestPending() || HasPendingInvalidation() ||
191           IsInitialSyncRequired() || IsSyncRequiredToResolveConflict());
192 }
193 
HasLocalChangePending() const194 bool DataTypeTracker::HasLocalChangePending() const {
195   return local_nudge_count_ > 0;
196 }
197 
HasRefreshRequestPending() const198 bool DataTypeTracker::HasRefreshRequestPending() const {
199   return local_refresh_request_count_ > 0;
200 }
201 
HasPendingInvalidation() const202 bool DataTypeTracker::HasPendingInvalidation() const {
203   return !pending_invalidations_.empty() || last_dropped_invalidation_;
204 }
205 
IsInitialSyncRequired() const206 bool DataTypeTracker::IsInitialSyncRequired() const {
207   return initial_sync_required_;
208 }
209 
IsSyncRequiredToResolveConflict() const210 bool DataTypeTracker::IsSyncRequiredToResolveConflict() const {
211   return sync_required_to_resolve_conflict_;
212 }
213 
SetLegacyNotificationHint(sync_pb::DataTypeProgressMarker * progress) const214 void DataTypeTracker::SetLegacyNotificationHint(
215     sync_pb::DataTypeProgressMarker* progress) const {
216   DCHECK(!IsBlocked())
217       << "We should not make requests if the type is throttled or backed off.";
218 
219   if (!pending_invalidations_.empty() &&
220       !pending_invalidations_.back()->IsUnknownVersion()) {
221     // The old-style source info can contain only one hint per type.  We grab
222     // the most recent, to mimic the old coalescing behaviour.
223     progress->set_notification_hint(
224         pending_invalidations_.back()->GetPayload());
225   } else if (HasLocalChangePending()) {
226     // The old-style source info sent up an empty string (as opposed to
227     // nothing at all) when the type was locally nudged, but had not received
228     // any invalidations.
229     progress->set_notification_hint(std::string());
230   }
231 }
232 
FillGetUpdatesTriggersMessage(sync_pb::GetUpdateTriggers * msg) const233 void DataTypeTracker::FillGetUpdatesTriggersMessage(
234     sync_pb::GetUpdateTriggers* msg) const {
235   // Fill the list of payloads, if applicable.  The payloads must be ordered
236   // oldest to newest, so we insert them in the same order as we've been storing
237   // them internally.
238   for (auto it = pending_invalidations_.begin();
239        it != pending_invalidations_.end(); ++it) {
240     if (!(*it)->IsUnknownVersion()) {
241       msg->add_notification_hint((*it)->GetPayload());
242     }
243   }
244 
245   msg->set_server_dropped_hints(
246       !pending_invalidations_.empty() &&
247       (*pending_invalidations_.begin())->IsUnknownVersion());
248   msg->set_client_dropped_hints(!!last_dropped_invalidation_);
249   msg->set_local_modification_nudges(local_nudge_count_);
250   msg->set_datatype_refresh_nudges(local_refresh_request_count_);
251   msg->set_initial_sync_in_progress(initial_sync_required_);
252   msg->set_sync_for_resolve_conflict_in_progress(
253       sync_required_to_resolve_conflict_);
254 }
255 
IsBlocked() const256 bool DataTypeTracker::IsBlocked() const {
257   return wait_interval_.get() &&
258          (wait_interval_->mode == WaitInterval::THROTTLED ||
259           wait_interval_->mode == WaitInterval::EXPONENTIAL_BACKOFF);
260 }
261 
GetTimeUntilUnblock() const262 base::TimeDelta DataTypeTracker::GetTimeUntilUnblock() const {
263   DCHECK(IsBlocked());
264   return std::max(base::TimeDelta::FromSeconds(0),
265                   unblock_time_ - base::TimeTicks::Now());
266 }
267 
GetLastBackoffInterval() const268 base::TimeDelta DataTypeTracker::GetLastBackoffInterval() const {
269   if (GetBlockingMode() != WaitInterval::EXPONENTIAL_BACKOFF_RETRYING) {
270     NOTREACHED();
271     return base::TimeDelta::FromSeconds(0);
272   }
273   return wait_interval_->length;
274 }
275 
ThrottleType(base::TimeDelta duration,base::TimeTicks now)276 void DataTypeTracker::ThrottleType(base::TimeDelta duration,
277                                    base::TimeTicks now) {
278   unblock_time_ = std::max(unblock_time_, now + duration);
279   wait_interval_ =
280       std::make_unique<WaitInterval>(WaitInterval::THROTTLED, duration);
281 }
282 
BackOffType(base::TimeDelta duration,base::TimeTicks now)283 void DataTypeTracker::BackOffType(base::TimeDelta duration,
284                                   base::TimeTicks now) {
285   unblock_time_ = std::max(unblock_time_, now + duration);
286   wait_interval_ = std::make_unique<WaitInterval>(
287       WaitInterval::EXPONENTIAL_BACKOFF, duration);
288 }
289 
UpdateThrottleOrBackoffState()290 void DataTypeTracker::UpdateThrottleOrBackoffState() {
291   if (base::TimeTicks::Now() >= unblock_time_) {
292     if (wait_interval_.get() &&
293         (wait_interval_->mode == WaitInterval::EXPONENTIAL_BACKOFF ||
294          wait_interval_->mode == WaitInterval::EXPONENTIAL_BACKOFF_RETRYING)) {
295       wait_interval_->mode = WaitInterval::EXPONENTIAL_BACKOFF_RETRYING;
296     } else {
297       unblock_time_ = base::TimeTicks();
298       wait_interval_.reset();
299     }
300   }
301 }
302 
UpdateLocalNudgeDelay(base::TimeDelta delay)303 void DataTypeTracker::UpdateLocalNudgeDelay(base::TimeDelta delay) {
304   nudge_delay_ = delay;
305 }
306 
GetBlockingMode() const307 WaitInterval::BlockingMode DataTypeTracker::GetBlockingMode() const {
308   if (!wait_interval_) {
309     return WaitInterval::UNKNOWN;
310   }
311   return wait_interval_->mode;
312 }
313 
314 }  // namespace syncer
315