1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "components/omnibox/browser/scored_history_match.h"
6 
7 #include <math.h>
8 
9 #include <algorithm>
10 #include <utility>
11 #include <vector>
12 
13 #include "base/check_op.h"
14 #include "base/macros.h"
15 #include "base/no_destructor.h"
16 #include "base/numerics/safe_conversions.h"
17 #include "base/strings/string_number_conversions.h"
18 #include "base/strings/string_split.h"
19 #include "base/strings/string_util.h"
20 #include "base/strings/utf_string_conversions.h"
21 #include "components/bookmarks/browser/bookmark_utils.h"
22 #include "components/omnibox/browser/autocomplete_match.h"
23 #include "components/omnibox/browser/history_url_provider.h"
24 #include "components/omnibox/browser/omnibox_field_trial.h"
25 #include "components/omnibox/browser/url_prefix.h"
26 #include "components/omnibox/common/omnibox_features.h"
27 #include "net/base/registry_controlled_domains/registry_controlled_domain.h"
28 
29 namespace {
30 
31 // The number of days of recency scores to precompute.
32 const int kDaysToPrecomputeRecencyScoresFor = 366;
33 
34 // The number of raw term score buckets use; raw term scores greater this are
35 // capped at the score of the largest bucket.
36 const int kMaxRawTermScore = 30;
37 
38 // Pre-computed information to speed up calculating recency scores.
39 // |days_ago_to_recency_score| is a simple array mapping how long ago a page was
40 // visited (in days) to the recency score we should assign it.  This allows easy
41 // lookups of scores without requiring math. This is initialized by
42 // InitDaysAgoToRecencyScoreArray called by
43 // ScoredHistoryMatch::Init().
44 float days_ago_to_recency_score[kDaysToPrecomputeRecencyScoresFor];
45 
46 // Pre-computed information to speed up calculating topicality scores.
47 // |raw_term_score_to_topicality_score| is a simple array mapping how raw terms
48 // scores (a weighted sum of the number of hits for the term, weighted by how
49 // important the hit is: hostname, path, etc.) to the topicality score we should
50 // assign it.  This allows easy lookups of scores without requiring math. This
51 // is initialized by InitRawTermScoreToTopicalityScoreArray() called from
52 // ScoredHistoryMatch::Init().
53 float raw_term_score_to_topicality_score[kMaxRawTermScore];
54 
55 // Precalculates raw_term_score_to_topicality_score, used in
56 // GetTopicalityScore().
InitRawTermScoreToTopicalityScoreArray()57 void InitRawTermScoreToTopicalityScoreArray() {
58   for (int term_score = 0; term_score < kMaxRawTermScore; ++term_score) {
59     float topicality_score;
60     if (term_score < 10) {
61       // If the term scores less than 10 points (no full-credit hit, or
62       // no combination of hits that score that well), then the topicality
63       // score is linear in the term score.
64       topicality_score = 0.1 * term_score;
65     } else {
66       // For term scores of at least ten points, pass them through a log
67       // function so a score of 10 points gets a 1.0 (to meet up exactly
68       // with the linear component) and increases logarithmically until
69       // maxing out at 30 points, with computes to a score around 2.1.
70       topicality_score = (1.0 + 2.25 * log10(0.1 * term_score));
71     }
72     raw_term_score_to_topicality_score[term_score] = topicality_score;
73   }
74 }
75 
76 // Pre-calculates days_ago_to_recency_score, used in GetRecencyScore().
InitDaysAgoToRecencyScoreArray()77 void InitDaysAgoToRecencyScoreArray() {
78   for (int days_ago = 0; days_ago < kDaysToPrecomputeRecencyScoresFor;
79        days_ago++) {
80     int unnormalized_recency_score;
81     if (days_ago <= 4) {
82       unnormalized_recency_score = 100;
83     } else if (days_ago <= 14) {
84       // Linearly extrapolate between 4 and 14 days so 14 days has a score
85       // of 70.
86       unnormalized_recency_score = 70 + (14 - days_ago) * (100 - 70) / (14 - 4);
87     } else if (days_ago <= 31) {
88       // Linearly extrapolate between 14 and 31 days so 31 days has a score
89       // of 50.
90       unnormalized_recency_score = 50 + (31 - days_ago) * (70 - 50) / (31 - 14);
91     } else if (days_ago <= 90) {
92       // Linearly extrapolate between 30 and 90 days so 90 days has a score
93       // of 30.
94       unnormalized_recency_score = 30 + (90 - days_ago) * (50 - 30) / (90 - 30);
95     } else {
96       // Linearly extrapolate between 90 and 365 days so 365 days has a score
97       // of 10.
98       unnormalized_recency_score =
99           10 + (365 - days_ago) * (20 - 10) / (365 - 90);
100     }
101     days_ago_to_recency_score[days_ago] = unnormalized_recency_score / 100.0;
102     if (days_ago > 0) {
103       DCHECK_LE(days_ago_to_recency_score[days_ago],
104                 days_ago_to_recency_score[days_ago - 1]);
105     }
106   }
107 }
108 
109 }  // namespace
110 
111 // static
112 bool ScoredHistoryMatch::also_do_hup_like_scoring_;
113 float ScoredHistoryMatch::bookmark_value_;
114 float ScoredHistoryMatch::typed_value_;
115 size_t ScoredHistoryMatch::max_visits_to_score_;
116 bool ScoredHistoryMatch::allow_tld_matches_;
117 bool ScoredHistoryMatch::allow_scheme_matches_;
118 size_t ScoredHistoryMatch::num_title_words_to_allow_;
119 float ScoredHistoryMatch::topicality_threshold_;
120 ScoredHistoryMatch::ScoreMaxRelevances*
121     ScoredHistoryMatch::relevance_buckets_override_ = nullptr;
122 OmniboxFieldTrial::NumMatchesScores*
123     ScoredHistoryMatch::matches_to_specificity_override_ = nullptr;
124 
ScoredHistoryMatch()125 ScoredHistoryMatch::ScoredHistoryMatch()
126     : ScoredHistoryMatch(history::URLRow(),
127                          VisitInfoVector(),
128                          base::string16(),
129                          String16Vector(),
130                          WordStarts(),
131                          RowWordStarts(),
132                          false,
133                          1,
134                          base::Time::Max()) {}
135 
ScoredHistoryMatch(const history::URLRow & row,const VisitInfoVector & visits,const base::string16 & lower_string,const String16Vector & terms_vector,const WordStarts & terms_to_word_starts_offsets,const RowWordStarts & word_starts,bool is_url_bookmarked,size_t num_matching_pages,base::Time now)136 ScoredHistoryMatch::ScoredHistoryMatch(
137     const history::URLRow& row,
138     const VisitInfoVector& visits,
139     const base::string16& lower_string,
140     const String16Vector& terms_vector,
141     const WordStarts& terms_to_word_starts_offsets,
142     const RowWordStarts& word_starts,
143     bool is_url_bookmarked,
144     size_t num_matching_pages,
145     base::Time now)
146     : raw_score(0) {
147   // Initialize HistoryMatch fields. TODO(tommycli): Merge these two classes.
148   url_info = row;
149   input_location = 0;
150   match_in_scheme = false;
151   match_in_subdomain = false;
152   innermost_match = false;
153 
154   // NOTE: Call Init() before doing any validity checking to ensure that the
155   // class is always initialized after an instance has been constructed. In
156   // particular, this ensures that the class is initialized after an instance
157   // has been constructed via the no-args constructor.
158   ScoredHistoryMatch::Init();
159 
160   // Figure out where each search term appears in the URL and/or page title
161   // so that we can score as well as provide autocomplete highlighting.
162   base::OffsetAdjuster::Adjustments adjustments;
163   GURL gurl = row.url();
164   base::string16 cleaned_up_url_for_matching =
165       bookmarks::CleanUpUrlForMatching(gurl, &adjustments);
166   base::string16 title = bookmarks::CleanUpTitleForMatching(row.title());
167   int term_num = 0;
168   for (const auto& term : terms_vector) {
169     TermMatches url_term_matches =
170         MatchTermInString(term, cleaned_up_url_for_matching, term_num);
171     TermMatches title_term_matches = MatchTermInString(term, title, term_num);
172     if (url_term_matches.empty() && title_term_matches.empty()) {
173       // A term was not found in either URL or title - reject.
174       return;
175     }
176     url_matches.insert(url_matches.end(), url_term_matches.begin(),
177                        url_term_matches.end());
178     title_matches.insert(title_matches.end(), title_term_matches.begin(),
179                          title_term_matches.end());
180     ++term_num;
181   }
182 
183   // Sort matches by offset, which is needed for GetTopicalityScore() to
184   // function correctly.
185   url_matches = SortMatches(url_matches);
186   title_matches = SortMatches(title_matches);
187 
188   // We can likely inline autocomplete a match if:
189   //  1) there is only one search term
190   //  2) AND the match begins immediately after one of the prefixes in
191   //     URLPrefix such as http://www and https:// (note that one of these
192   //     is the empty prefix, for cases where the user has typed the scheme)
193   //  3) AND the search string does not end in whitespace (making it look to
194   //     the IMUI as though there is a single search term when actually there
195   //     is a second, empty term).
196   // |best_inlineable_prefix| stores the inlineable prefix computed in
197   // clause (2) or NULL if no such prefix exists.  (The URL is not inlineable.)
198   // Note that using the best prefix here means that when multiple
199   // prefixes match, we'll choose to inline following the longest one.
200   // For a URL like "http://www.washingtonmutual.com", this means
201   // typing "w" will inline "ashington..." instead of "ww.washington...".
202   // We cannot be sure about inlining at this stage because this test depends
203   // on the cleaned up URL, which is not necessarily the same as the URL string
204   // used in HistoryQuick provider to construct the match.  For instance, the
205   // cleaned up URL has special characters unescaped so as to allow matches
206   // with them.  This aren't unescaped when HistoryQuick displays the URL;
207   // hence a match in the URL that involves matching the unescaped special
208   // characters may not be able to be inlined given how HistoryQuick displays
209   // it.  This happens in HistoryQuickProvider::QuickMatchToACMatch().
210   bool likely_can_inline = false;
211   if (!url_matches.empty() && (terms_vector.size() == 1) &&
212       !base::IsUnicodeWhitespace(*lower_string.rbegin())) {
213     const base::string16 gurl_spec = base::UTF8ToUTF16(gurl.spec());
214     const URLPrefix* best_inlineable_prefix =
215         URLPrefix::BestURLPrefix(gurl_spec, terms_vector[0]);
216     if (best_inlineable_prefix) {
217       // When inline autocompleting this match, we're going to use the part of
218       // the URL following the end of the matching text.  However, it's possible
219       // that FormatUrl(), when formatting this suggestion for display,
220       // mucks with the text.  We need to ensure that the text we're thinking
221       // about highlighting isn't in the middle of a mucked sequence.  In
222       // particular, for the omnibox input of "x" or "xn", we may get a match
223       // in a punycoded domain name such as http://www.xn--blahblah.com/.
224       // When FormatUrl() processes the xn--blahblah part of the hostname, it'll
225       // transform the whole thing into a series of unicode characters.  It's
226       // impossible to give the user an inline autocompletion of the text
227       // following "x" or "xn" in this case because those characters no longer
228       // exist in the displayed URL string.
229       size_t offset =
230         best_inlineable_prefix->prefix.length() + terms_vector[0].length();
231       base::OffsetAdjuster::UnadjustOffset(adjustments, &offset);
232       if (offset != base::string16::npos) {
233         // Initialize innermost_match.
234         // The idea here is that matches that occur in the scheme or
235         // "www." are worse than matches which don't.  For the URLs
236         // "http://www.google.com" and "http://wellsfargo.com", we want
237         // the omnibox input "w" to cause the latter URL to rank higher
238         // than the former.  Note that this is not the same as checking
239         // whether one match's inlinable prefix has more components than
240         // the other match's, since in this example, both matches would
241         // have an inlinable prefix of "http://", which is one component.
242         //
243         // Instead, we look for the overall best (i.e., most components)
244         // prefix of the current URL, and then check whether the inlinable
245         // prefix has that many components.  If it does, this is an
246         // "innermost" match, and should be boosted.  In the example
247         // above, the best prefixes for the two URLs have two and one
248         // components respectively, while the inlinable prefixes each
249         // have one component; this means the first match is not innermost
250         // and the second match is innermost, resulting in us boosting the
251         // second match.
252         //
253         // Now, the code that implements this.
254         // The deepest prefix for this URL regardless of where the match is.
255         const URLPrefix* best_prefix =
256             URLPrefix::BestURLPrefix(gurl_spec, base::string16());
257         DCHECK(best_prefix);
258         // If the URL is likely to be inlineable, we must have a match.  Note
259         // the prefix that makes it inlineable may be empty.
260         likely_can_inline = true;
261         innermost_match = (best_inlineable_prefix->num_components ==
262                            best_prefix->num_components);
263       }
264     }
265   }
266 
267   const float topicality_score =
268       GetTopicalityScore(terms_vector.size(), gurl, adjustments,
269                          terms_to_word_starts_offsets, word_starts);
270   const float frequency_score = GetFrequency(now, is_url_bookmarked, visits);
271   const float specificity_score =
272       GetDocumentSpecificityScore(num_matching_pages);
273   raw_score = base::saturated_cast<int>(GetFinalRelevancyScore(
274       topicality_score, frequency_score, specificity_score));
275 
276   if (also_do_hup_like_scoring_ && likely_can_inline) {
277     // HistoryURL-provider-like scoring gives any match that is
278     // capable of being inlined a certain minimum score.  Some of these
279     // are given a higher score that lets them be shown in inline.
280     // This test here derives from the test in
281     // HistoryURLProvider::PromoteMatchForInlineAutocomplete().
282     const bool promote_to_inline =
283         (row.typed_count() > 1) || (IsHostOnly() && (row.typed_count() == 1));
284     int hup_like_score =
285         promote_to_inline
286             ? HistoryURLProvider::kScoreForBestInlineableResult
287             : HistoryURLProvider::kBaseScoreForNonInlineableResult;
288 
289     // Also, if the user types the hostname of a host with a typed
290     // visit, then everything from that host get given inlineable scores
291     // (because the URL-that-you-typed will go first and everything
292     // else will be assigned one minus the previous score, as coded
293     // at the end of HistoryURLProvider::DoAutocomplete().
294     if (base::UTF8ToUTF16(gurl.host()) == terms_vector[0])
295       hup_like_score = HistoryURLProvider::kScoreForBestInlineableResult;
296 
297     // HistoryURLProvider has the function PromoteOrCreateShorterSuggestion()
298     // that's meant to promote prefixes of the best match (if they've
299     // been visited enough related to the best match) or
300     // create/promote host-only suggestions (even if they've never
301     // been typed).  The code is complicated and we don't try to
302     // duplicate the logic here.  Instead, we handle a simple case: in
303     // low-typed-count ranges, give host-only matches (i.e.,
304     // http://www.foo.com/ vs. http://www.foo.com/bar.html) a boost so
305     // that the host-only match outscores all the other matches that
306     // would normally have the same base score.  This behavior is not
307     // identical to what happens in HistoryURLProvider even in these
308     // low typed count ranges--sometimes it will create/promote when
309     // this test does not (indeed, we cannot create matches like HUP
310     // can) and vice versa--but the underlying philosophy is similar.
311     if (!promote_to_inline && IsHostOnly())
312       hup_like_score++;
313 
314     // All the other logic to goes into hup-like-scoring happens in
315     // the tie-breaker case of MatchScoreGreater().
316 
317     // Incorporate hup_like_score into raw_score.
318     raw_score = std::max(raw_score, hup_like_score);
319   }
320 
321   url_matches = DeoverlapMatches(url_matches);
322   title_matches = DeoverlapMatches(title_matches);
323 
324   // Now that we're done processing this entry, correct the offsets of the
325   // matches in |url_matches| so they point to offsets in the original URL
326   // spec, not the cleaned-up URL string that we used for matching.
327   std::vector<size_t> offsets = OffsetsFromTermMatches(url_matches);
328   base::OffsetAdjuster::UnadjustOffsets(adjustments, &offsets);
329   url_matches = ReplaceOffsetsInTermMatches(url_matches, offsets);
330 
331   // Now that url_matches contains the unadjusted offsets referring to the
332   // original URL, we can calculate which components the matches are for.
333   std::vector<AutocompleteMatch::MatchPosition> match_positions;
334   for (auto& url_match : url_matches) {
335     match_positions.push_back(
336         std::make_pair(url_match.offset, url_match.offset + url_match.length));
337   }
338   AutocompleteMatch::GetMatchComponents(gurl, match_positions, &match_in_scheme,
339                                         &match_in_subdomain);
340 }
341 
342 ScoredHistoryMatch::ScoredHistoryMatch(const ScoredHistoryMatch& other) =
343     default;
344 ScoredHistoryMatch::ScoredHistoryMatch(ScoredHistoryMatch&& other) = default;
345 ScoredHistoryMatch& ScoredHistoryMatch::operator=(
346     const ScoredHistoryMatch& other) = default;
347 ScoredHistoryMatch& ScoredHistoryMatch::operator=(ScoredHistoryMatch&& other) =
348     default;
349 ScoredHistoryMatch::~ScoredHistoryMatch() = default;
350 
351 // Comparison function for sorting ScoredMatches by their scores with
352 // intelligent tie-breaking.
MatchScoreGreater(const ScoredHistoryMatch & m1,const ScoredHistoryMatch & m2)353 bool ScoredHistoryMatch::MatchScoreGreater(const ScoredHistoryMatch& m1,
354                                            const ScoredHistoryMatch& m2) {
355   if (m1.raw_score != m2.raw_score)
356     return m1.raw_score > m2.raw_score;
357 
358   // This tie-breaking logic is inspired by / largely copied from the
359   // ordering logic in history_url_provider.cc CompareHistoryMatch().
360 
361   // A URL that has been typed at all is better than one that has never been
362   // typed.  (Note "!"s on each side.)
363   if (!m1.url_info.typed_count() != !m2.url_info.typed_count())
364     return m1.url_info.typed_count() > m2.url_info.typed_count();
365 
366   // Innermost matches (matches after any scheme or "www.") are better than
367   // non-innermost matches.
368   if (m1.innermost_match != m2.innermost_match)
369     return m1.innermost_match;
370 
371   // URLs that have been typed more often are better.
372   if (m1.url_info.typed_count() != m2.url_info.typed_count())
373     return m1.url_info.typed_count() > m2.url_info.typed_count();
374 
375   // For URLs that have each been typed once, a host (alone) is better
376   // than a page inside.
377   if (m1.url_info.typed_count() == 1) {
378     if (m1.IsHostOnly() != m2.IsHostOnly())
379       return m1.IsHostOnly();
380   }
381 
382   // URLs that have been visited more often are better.
383   if (m1.url_info.visit_count() != m2.url_info.visit_count())
384     return m1.url_info.visit_count() > m2.url_info.visit_count();
385 
386   // URLs that have been visited more recently are better.
387   return m1.url_info.last_visit() > m2.url_info.last_visit();
388 }
389 
390 // static
FilterTermMatchesByWordStarts(const TermMatches & term_matches,const WordStarts & terms_to_word_starts_offsets,const WordStarts & word_starts,size_t start_pos,size_t end_pos,bool allow_midword_continuations)391 TermMatches ScoredHistoryMatch::FilterTermMatchesByWordStarts(
392     const TermMatches& term_matches,
393     const WordStarts& terms_to_word_starts_offsets,
394     const WordStarts& word_starts,
395     size_t start_pos,
396     size_t end_pos,
397     bool allow_midword_continuations) {
398   // Return early if no filtering is needed.
399   if (start_pos == std::string::npos)
400     return term_matches;
401   TermMatches filtered_matches;
402   auto next_word_starts = word_starts.begin();
403   auto end_word_starts = word_starts.end();
404   size_t last_end = 0;
405   for (const auto& term_match : term_matches) {
406     const size_t term_offset =
407         terms_to_word_starts_offsets[term_match.term_num];
408     // Advance next_word_starts until it's >= the position of the term we're
409     // considering (adjusted for where the word begins within the term).
410     while ((next_word_starts != end_word_starts) &&
411            (*next_word_starts < (term_match.offset + term_offset)))
412       ++next_word_starts;
413     // Add the match if it's (1) before the position we start filtering at, (2)
414     // after the position we stop filtering at (assuming we have a position
415     // to stop filtering at), (3) at a word boundary, (4) void of words (e.g.
416     // the term '-' contains no words), or, (5) if allow_midword_continuations
417     // is true, continues where the previous match left off (e.g. inputs such
418     // as 'stack overflow' will match text such as 'stackoverflow').
419     if (term_match.offset < start_pos ||
420         (end_pos != std::string::npos && term_match.offset >= end_pos) ||
421         (next_word_starts != end_word_starts &&
422          *next_word_starts == term_match.offset + term_offset) ||
423         term_offset == term_match.length ||
424         (allow_midword_continuations && term_match.offset == last_end)) {
425       last_end = term_match.offset + term_match.length;
426       filtered_matches.push_back(term_match);
427     }
428   }
429   return filtered_matches;
430 }
431 
432 // static
Init()433 void ScoredHistoryMatch::Init() {
434   static bool initialized = false;
435 
436   if (initialized)
437     return;
438 
439   initialized = true;
440   also_do_hup_like_scoring_ = OmniboxFieldTrial::HQPAlsoDoHUPLikeScoring();
441   bookmark_value_ = OmniboxFieldTrial::HQPBookmarkValue();
442   typed_value_ = OmniboxFieldTrial::HQPTypedValue();
443   max_visits_to_score_ = OmniboxFieldTrial::HQPMaxVisitsToScore();
444   allow_tld_matches_ = OmniboxFieldTrial::HQPAllowMatchInTLDValue();
445   allow_scheme_matches_ = OmniboxFieldTrial::HQPAllowMatchInSchemeValue();
446   num_title_words_to_allow_ = OmniboxFieldTrial::HQPNumTitleWordsToAllow();
447   topicality_threshold_ =
448       OmniboxFieldTrial::HQPExperimentalTopicalityThreshold();
449 
450   InitRawTermScoreToTopicalityScoreArray();
451   InitDaysAgoToRecencyScoreArray();
452 }
453 
GetTopicalityScore(const int num_terms,const GURL & url,const base::OffsetAdjuster::Adjustments & adjustments,const WordStarts & terms_to_word_starts_offsets,const RowWordStarts & word_starts)454 float ScoredHistoryMatch::GetTopicalityScore(
455     const int num_terms,
456     const GURL& url,
457     const base::OffsetAdjuster::Adjustments& adjustments,
458     const WordStarts& terms_to_word_starts_offsets,
459     const RowWordStarts& word_starts) {
460   // A vector that accumulates per-term scores.  The strongest match--a
461   // match in the hostname at a word boundary--is worth 10 points.
462   // Everything else is less.  In general, a match that's not at a word
463   // boundary is worth about 1/4th or 1/5th of a match at the word boundary
464   // in the same part of the URL/title.
465   DCHECK_GT(num_terms, 0);
466   std::vector<int> term_scores(num_terms, 0);
467   auto next_word_starts = word_starts.url_word_starts_.begin();
468   auto end_word_starts = word_starts.url_word_starts_.end();
469 
470   const url::Parsed& parsed = url.parsed_for_possibly_invalid_spec();
471   size_t host_pos = parsed.CountCharactersBefore(url::Parsed::HOST, true);
472   size_t path_pos = parsed.CountCharactersBefore(url::Parsed::PATH, true);
473   size_t query_pos = parsed.CountCharactersBefore(url::Parsed::QUERY, true);
474   size_t last_part_of_host_pos =
475       url.possibly_invalid_spec().rfind('.', path_pos);
476 
477   // |word_starts| and |url_matches| both contain offsets for the cleaned up
478   // URL used for matching, so we have to follow those adjustments.
479   base::OffsetAdjuster::AdjustOffset(adjustments, &host_pos);
480   base::OffsetAdjuster::AdjustOffset(adjustments, &path_pos);
481   base::OffsetAdjuster::AdjustOffset(adjustments, &query_pos);
482   base::OffsetAdjuster::AdjustOffset(adjustments, &last_part_of_host_pos);
483 
484   // Loop through all URL matches and score them appropriately.
485   // First, filter all matches not at a word boundary and in the path (or
486   // later).
487   url_matches = FilterTermMatchesByWordStarts(
488       url_matches, terms_to_word_starts_offsets, word_starts.url_word_starts_,
489       path_pos, std::string::npos, true);
490   if (url.has_scheme()) {
491     // Also filter matches not at a word boundary and in the scheme.
492     url_matches = FilterTermMatchesByWordStarts(
493         url_matches, terms_to_word_starts_offsets, word_starts.url_word_starts_,
494         0, host_pos, true);
495   }
496   for (const auto& url_match : url_matches) {
497     // Calculate the offset in the URL string where the meaningful (word) part
498     // of the term starts.  This takes into account times when a term starts
499     // with punctuation such as "/foo".
500     const size_t term_word_offset =
501         url_match.offset + terms_to_word_starts_offsets[url_match.term_num];
502     // Advance next_word_starts until it's >= the position of the term we're
503     // considering (adjusted for where the word begins within the term).
504     while ((next_word_starts != end_word_starts) &&
505            (*next_word_starts < term_word_offset)) {
506       ++next_word_starts;
507     }
508     const bool at_word_boundary = (next_word_starts != end_word_starts) &&
509                                   (*next_word_starts == term_word_offset);
510     if (term_word_offset >= query_pos) {
511       // The match is in the query or ref component.
512       term_scores[url_match.term_num] += 5;
513     } else if (term_word_offset >= path_pos) {
514       // The match is in the path component.
515       term_scores[url_match.term_num] += 8;
516     } else if (term_word_offset >= host_pos) {
517       if (term_word_offset < last_part_of_host_pos) {
518         // Either there are no dots in the hostname or this match isn't
519         // the last dotted component.
520         term_scores[url_match.term_num] += at_word_boundary ? 10 : 2;
521       } else {
522         // The match is in the last part of a dotted hostname (usually this
523         // is the top-level domain .com, .net, etc.).
524         if (allow_tld_matches_)
525           term_scores[url_match.term_num] += at_word_boundary ? 10 : 0;
526       }
527     } else {
528       // The match is in the protocol (a.k.a. scheme).
529       // Matches not at a word boundary should have been filtered already.
530       if (allow_scheme_matches_)
531         term_scores[url_match.term_num] += 10;
532     }
533   }
534   // Now do the analogous loop over all matches in the title.
535   next_word_starts = word_starts.title_word_starts_.begin();
536   end_word_starts = word_starts.title_word_starts_.end();
537   size_t word_num = 0;
538   title_matches = FilterTermMatchesByWordStarts(
539       title_matches, terms_to_word_starts_offsets,
540       word_starts.title_word_starts_, 0, std::string::npos, true);
541   for (const auto& title_match : title_matches) {
542     // Calculate the offset in the title string where the meaningful (word) part
543     // of the term starts.  This takes into account times when a term starts
544     // with punctuation such as "/foo".
545     const size_t term_word_offset =
546         title_match.offset + terms_to_word_starts_offsets[title_match.term_num];
547     // Advance next_word_starts until it's >= the position of the term we're
548     // considering (adjusted for where the word begins within the term).
549     while ((next_word_starts != end_word_starts) &&
550            (*next_word_starts < term_word_offset)) {
551       ++next_word_starts;
552       ++word_num;
553     }
554     if (word_num >= num_title_words_to_allow_)
555       break;  // only count the first ten words
556     term_scores[title_match.term_num] += 8;
557   }
558   // TODO(mpearson): Restore logic for penalizing out-of-order matches.
559   // (Perhaps discount them by 0.8?)
560   // TODO(mpearson): Consider: if the earliest match occurs late in the string,
561   // should we discount it?
562   // TODO(mpearson): Consider: do we want to score based on how much of the
563   // input string the input covers?  (I'm leaning toward no.)
564 
565   // Compute the topicality_score as the sum of transformed term_scores.
566   float topicality_score = 0;
567   for (int term_score : term_scores) {
568     topicality_score += raw_term_score_to_topicality_score[std::min(
569         term_score, kMaxRawTermScore - 1)];
570   }
571   // TODO(mpearson): If there are multiple terms, consider taking the
572   // geometric mean of per-term scores rather than the arithmetic mean.
573 
574   const float final_topicality_score = topicality_score / num_terms;
575 
576   // Demote the URL if the topicality score is less than threshold.
577   if (final_topicality_score < topicality_threshold_)
578     return 0.0;
579 
580   return final_topicality_score;
581 }
582 
GetRecencyScore(int last_visit_days_ago) const583 float ScoredHistoryMatch::GetRecencyScore(int last_visit_days_ago) const {
584   // Lookup the score in days_ago_to_recency_score, treating
585   // everything older than what we've precomputed as the oldest thing
586   // we've precomputed.  The std::max is to protect against corruption
587   // in the database (in case last_visit_days_ago is negative).
588   return days_ago_to_recency_score[std::max(
589       std::min(last_visit_days_ago, kDaysToPrecomputeRecencyScoresFor - 1), 0)];
590 }
591 
GetFrequency(const base::Time & now,const bool bookmarked,const VisitInfoVector & visits) const592 float ScoredHistoryMatch::GetFrequency(const base::Time& now,
593                                        const bool bookmarked,
594                                        const VisitInfoVector& visits) const {
595   // Compute the weighted sum of |value_of_transition| over the last at most
596   // |max_visits_to_score_| visits, where each visit is weighted using
597   // GetRecencyScore() based on how many days ago it happened.
598   //
599   // Here are example frequency scores, assuming |max_visits_to_score_| is 10.
600   // - a single typed visit more than three months ago, no other visits -> 0.45
601   //   ( = 1.5 typed visit score * 0.3 recency score)
602   // - a single visit recently -> 1.0
603   //   ( = 1.0 visit score * 1.0 recency score)
604   // - a single typed visit recently -> 1.5
605   //   ( = 1.5 typed visit score * 1.0 recency score)
606   // - 10+ visits, once every three days, no typed visits -> about 7.5
607   //   ( 10+ visits, averaging about 0.75 recency score each)
608   // - 10+ typed visits, once a week -> about 7.5
609   //   ( 10+ visits, average of 1.5 typed visit score * 0.5 recency score)
610   // - 10+ visit, once every day, no typed visits -> about 9.0
611   //   ( 10+ visits, average about 0.9 recency score each)
612   // - 10+ typed visit, once every three days -> about 11
613   //   ( 10+ visits, averaging about 1.5 typed visit *  0.75 recency score each)
614   // - 10+ typed visits today -> 15
615   //   ( 10+ visits, each worth 1.5 typed visit score)
616   float summed_visit_points = 0;
617   auto visits_end =
618       visits.begin() + std::min(visits.size(), max_visits_to_score_);
619   // Visits should be in newest to oldest order.
620   DCHECK(std::adjacent_find(
621              visits.begin(), visits_end,
622              [](const history::VisitInfo& a, const history::VisitInfo& b) {
623                return a.first < b.first;
624              }) == visits_end);
625   for (auto i = visits.begin(); i != visits_end; ++i) {
626     const bool is_page_transition_typed =
627         ui::PageTransitionCoreTypeIs(i->second, ui::PAGE_TRANSITION_TYPED);
628     float value_of_transition = is_page_transition_typed ? typed_value_ : 1;
629     if (bookmarked)
630       value_of_transition = std::max(value_of_transition, bookmark_value_);
631     const float bucket_weight = GetRecencyScore((now - i->first).InDays());
632     summed_visit_points += (value_of_transition * bucket_weight);
633   }
634   return summed_visit_points;
635 }
636 
GetDocumentSpecificityScore(size_t num_matching_pages) const637 float ScoredHistoryMatch::GetDocumentSpecificityScore(
638     size_t num_matching_pages) const {
639   // A mapping from the number of matching pages to their associated document
640   // specificity scores.  See omnibox_field_trial.h for more details.
641   static base::NoDestructor<OmniboxFieldTrial::NumMatchesScores>
642       default_matches_to_specificity(OmniboxFieldTrial::HQPNumMatchesScores());
643   OmniboxFieldTrial::NumMatchesScores* matches_to_specificity =
644       matches_to_specificity_override_ ? matches_to_specificity_override_
645                                        : default_matches_to_specificity.get();
646 
647   // The floating point value below must be less than the lowest score the
648   // server would send down.
649   OmniboxFieldTrial::NumMatchesScores::const_iterator it = std::upper_bound(
650       matches_to_specificity->begin(), matches_to_specificity->end(),
651       std::pair<size_t, double>{num_matching_pages, -1});
652   return (it != matches_to_specificity->end()) ? it->second : 1.0;
653 }
654 
655 // static
GetFinalRelevancyScore(float topicality_score,float frequency_score,float specificity_score)656 float ScoredHistoryMatch::GetFinalRelevancyScore(float topicality_score,
657                                                  float frequency_score,
658                                                  float specificity_score) {
659   // |relevance_buckets| gives a mapping from intemerdiate score to the final
660   // relevance score.
661   static base::NoDestructor<ScoreMaxRelevances> default_relevance_buckets(
662       GetHQPBuckets());
663   ScoreMaxRelevances* relevance_buckets = relevance_buckets_override_
664                                               ? relevance_buckets_override_
665                                               : default_relevance_buckets.get();
666   DCHECK(!relevance_buckets->empty());
667   DCHECK_EQ(0.0, (*relevance_buckets)[0].first);
668 
669   if (topicality_score == 0)
670     return 0;
671 
672   // Compute an intermediate score by multiplying the topicality, specificity,
673   // and frequency scores, then map it to the range [0, 1399].  For typical
674   // ranges, remember:
675   // * topicality score is usually around 1.0; typical range is [0.5, 2.5].
676   //   1.0 is the value assigned when a single-term input matches in the
677   //   hostname.  For more details, see GetTopicalityScore().
678   // * specificity score is usually 1.0; typical range is [1.0, 3.0].
679   //   1.0 is the default value when the user's input matches many documents.
680   //   For more details, see GetDocumentSpecificityScore().
681   // * frequency score has a much wider range depending on the number of
682   //   visits; typical range is [0.3, 15.0].  For more details, see
683   //   GetFrequency().
684   //
685   // The default scoring buckets: "0.0:550,1:625,9.0:1300,90.0:1399"
686   // will linearly interpolate the scores between:
687   //      0.0 to 1.0  --> 550 to 625
688   //      1.0 to 9.0  --> 625 to 1300
689   //      9.0 to 90.0 --> 1300 to 1399
690   //      >= 90.0     --> 1399
691   //
692   // The score maxes out at 1399 (i.e., cannot beat a good inlineable result
693   // from HistoryURL provider).
694   const float intermediate_score =
695       topicality_score * frequency_score * specificity_score;
696 
697   // Find the threshold where intermediate score is greater than bucket.
698   size_t i = 1;
699   for (; i < relevance_buckets->size(); ++i) {
700     const ScoreMaxRelevance& bucket = (*relevance_buckets)[i];
701     if (intermediate_score >= bucket.first) {
702       continue;
703     }
704     const ScoreMaxRelevance& previous_bucket = (*relevance_buckets)[i - 1];
705     const float slope = ((bucket.second - previous_bucket.second) /
706                          (bucket.first - previous_bucket.first));
707     return (previous_bucket.second +
708             (slope * (intermediate_score - previous_bucket.first)));
709   }
710   // It will reach this stage when the score is > highest bucket score.
711   // Return the highest bucket score.
712   return (*relevance_buckets)[i - 1].second;
713 }
714 
715 // static
716 std::vector<ScoredHistoryMatch::ScoreMaxRelevance>
GetHQPBuckets()717 ScoredHistoryMatch::GetHQPBuckets() {
718   std::string relevance_buckets_str =
719       OmniboxFieldTrial::HQPExperimentalScoringBuckets();
720   static constexpr char kDefaultHQPBuckets[] =
721       "0.0:550,1:625,9.0:1300,90.0:1399";
722   if (relevance_buckets_str.empty())
723     relevance_buckets_str = kDefaultHQPBuckets;
724   return GetHQPBucketsFromString(relevance_buckets_str);
725 }
726 
727 // static
728 ScoredHistoryMatch::ScoreMaxRelevances
GetHQPBucketsFromString(const std::string & buckets_str)729 ScoredHistoryMatch::GetHQPBucketsFromString(const std::string& buckets_str) {
730   DCHECK(!buckets_str.empty());
731   base::StringPairs kv_pairs;
732   if (!base::SplitStringIntoKeyValuePairs(buckets_str, ':', ',', &kv_pairs))
733     return ScoreMaxRelevances();
734   ScoreMaxRelevances hqp_buckets;
735   for (base::StringPairs::const_iterator it = kv_pairs.begin();
736        it != kv_pairs.end(); ++it) {
737     ScoreMaxRelevance bucket;
738     bool is_valid_intermediate_score =
739         base::StringToDouble(it->first, &bucket.first);
740     DCHECK(is_valid_intermediate_score);
741     bool is_valid_hqp_score = base::StringToInt(it->second, &bucket.second);
742     DCHECK(is_valid_hqp_score);
743     hqp_buckets.push_back(bucket);
744   }
745   return hqp_buckets;
746 }
747