1 #include "cache.h"
2 #include "refs.h"
3 #include "object-store.h"
4 #include "cache-tree.h"
5 #include "mergesort.h"
6 #include "diff.h"
7 #include "diffcore.h"
8 #include "tag.h"
9 #include "blame.h"
10 #include "alloc.h"
11 #include "commit-slab.h"
12 #include "bloom.h"
13 #include "commit-graph.h"
14 
15 define_commit_slab(blame_suspects, struct blame_origin *);
16 static struct blame_suspects blame_suspects;
17 
get_blame_suspects(struct commit * commit)18 struct blame_origin *get_blame_suspects(struct commit *commit)
19 {
20 	struct blame_origin **result;
21 
22 	result = blame_suspects_peek(&blame_suspects, commit);
23 
24 	return result ? *result : NULL;
25 }
26 
set_blame_suspects(struct commit * commit,struct blame_origin * origin)27 static void set_blame_suspects(struct commit *commit, struct blame_origin *origin)
28 {
29 	*blame_suspects_at(&blame_suspects, commit) = origin;
30 }
31 
blame_origin_decref(struct blame_origin * o)32 void blame_origin_decref(struct blame_origin *o)
33 {
34 	if (o && --o->refcnt <= 0) {
35 		struct blame_origin *p, *l = NULL;
36 		if (o->previous)
37 			blame_origin_decref(o->previous);
38 		free(o->file.ptr);
39 		/* Should be present exactly once in commit chain */
40 		for (p = get_blame_suspects(o->commit); p; l = p, p = p->next) {
41 			if (p == o) {
42 				if (l)
43 					l->next = p->next;
44 				else
45 					set_blame_suspects(o->commit, p->next);
46 				free(o);
47 				return;
48 			}
49 		}
50 		die("internal error in blame_origin_decref");
51 	}
52 }
53 
54 /*
55  * Given a commit and a path in it, create a new origin structure.
56  * The callers that add blame to the scoreboard should use
57  * get_origin() to obtain shared, refcounted copy instead of calling
58  * this function directly.
59  */
make_origin(struct commit * commit,const char * path)60 static struct blame_origin *make_origin(struct commit *commit, const char *path)
61 {
62 	struct blame_origin *o;
63 	FLEX_ALLOC_STR(o, path, path);
64 	o->commit = commit;
65 	o->refcnt = 1;
66 	o->next = get_blame_suspects(commit);
67 	set_blame_suspects(commit, o);
68 	return o;
69 }
70 
71 /*
72  * Locate an existing origin or create a new one.
73  * This moves the origin to front position in the commit util list.
74  */
get_origin(struct commit * commit,const char * path)75 static struct blame_origin *get_origin(struct commit *commit, const char *path)
76 {
77 	struct blame_origin *o, *l;
78 
79 	for (o = get_blame_suspects(commit), l = NULL; o; l = o, o = o->next) {
80 		if (!strcmp(o->path, path)) {
81 			/* bump to front */
82 			if (l) {
83 				l->next = o->next;
84 				o->next = get_blame_suspects(commit);
85 				set_blame_suspects(commit, o);
86 			}
87 			return blame_origin_incref(o);
88 		}
89 	}
90 	return make_origin(commit, path);
91 }
92 
93 
94 
verify_working_tree_path(struct repository * r,struct commit * work_tree,const char * path)95 static void verify_working_tree_path(struct repository *r,
96 				     struct commit *work_tree, const char *path)
97 {
98 	struct commit_list *parents;
99 	int pos;
100 
101 	for (parents = work_tree->parents; parents; parents = parents->next) {
102 		const struct object_id *commit_oid = &parents->item->object.oid;
103 		struct object_id blob_oid;
104 		unsigned short mode;
105 
106 		if (!get_tree_entry(r, commit_oid, path, &blob_oid, &mode) &&
107 		    oid_object_info(r, &blob_oid, NULL) == OBJ_BLOB)
108 			return;
109 	}
110 
111 	pos = index_name_pos(r->index, path, strlen(path));
112 	if (pos >= 0)
113 		; /* path is in the index */
114 	else if (-1 - pos < r->index->cache_nr &&
115 		 !strcmp(r->index->cache[-1 - pos]->name, path))
116 		; /* path is in the index, unmerged */
117 	else
118 		die("no such path '%s' in HEAD", path);
119 }
120 
append_parent(struct repository * r,struct commit_list ** tail,const struct object_id * oid)121 static struct commit_list **append_parent(struct repository *r,
122 					  struct commit_list **tail,
123 					  const struct object_id *oid)
124 {
125 	struct commit *parent;
126 
127 	parent = lookup_commit_reference(r, oid);
128 	if (!parent)
129 		die("no such commit %s", oid_to_hex(oid));
130 	return &commit_list_insert(parent, tail)->next;
131 }
132 
append_merge_parents(struct repository * r,struct commit_list ** tail)133 static void append_merge_parents(struct repository *r,
134 				 struct commit_list **tail)
135 {
136 	int merge_head;
137 	struct strbuf line = STRBUF_INIT;
138 
139 	merge_head = open(git_path_merge_head(r), O_RDONLY);
140 	if (merge_head < 0) {
141 		if (errno == ENOENT)
142 			return;
143 		die("cannot open '%s' for reading",
144 		    git_path_merge_head(r));
145 	}
146 
147 	while (!strbuf_getwholeline_fd(&line, merge_head, '\n')) {
148 		struct object_id oid;
149 		if (get_oid_hex(line.buf, &oid))
150 			die("unknown line in '%s': %s",
151 			    git_path_merge_head(r), line.buf);
152 		tail = append_parent(r, tail, &oid);
153 	}
154 	close(merge_head);
155 	strbuf_release(&line);
156 }
157 
158 /*
159  * This isn't as simple as passing sb->buf and sb->len, because we
160  * want to transfer ownership of the buffer to the commit (so we
161  * must use detach).
162  */
set_commit_buffer_from_strbuf(struct repository * r,struct commit * c,struct strbuf * sb)163 static void set_commit_buffer_from_strbuf(struct repository *r,
164 					  struct commit *c,
165 					  struct strbuf *sb)
166 {
167 	size_t len;
168 	void *buf = strbuf_detach(sb, &len);
169 	set_commit_buffer(r, c, buf, len);
170 }
171 
172 /*
173  * Prepare a dummy commit that represents the work tree (or staged) item.
174  * Note that annotating work tree item never works in the reverse.
175  */
fake_working_tree_commit(struct repository * r,struct diff_options * opt,const char * path,const char * contents_from)176 static struct commit *fake_working_tree_commit(struct repository *r,
177 					       struct diff_options *opt,
178 					       const char *path,
179 					       const char *contents_from)
180 {
181 	struct commit *commit;
182 	struct blame_origin *origin;
183 	struct commit_list **parent_tail, *parent;
184 	struct object_id head_oid;
185 	struct strbuf buf = STRBUF_INIT;
186 	const char *ident;
187 	time_t now;
188 	int len;
189 	struct cache_entry *ce;
190 	unsigned mode;
191 	struct strbuf msg = STRBUF_INIT;
192 
193 	repo_read_index(r);
194 	time(&now);
195 	commit = alloc_commit_node(r);
196 	commit->object.parsed = 1;
197 	commit->date = now;
198 	parent_tail = &commit->parents;
199 
200 	if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))
201 		die("no such ref: HEAD");
202 
203 	parent_tail = append_parent(r, parent_tail, &head_oid);
204 	append_merge_parents(r, parent_tail);
205 	verify_working_tree_path(r, commit, path);
206 
207 	origin = make_origin(commit, path);
208 
209 	ident = fmt_ident("Not Committed Yet", "not.committed.yet",
210 			WANT_BLANK_IDENT, NULL, 0);
211 	strbuf_addstr(&msg, "tree 0000000000000000000000000000000000000000\n");
212 	for (parent = commit->parents; parent; parent = parent->next)
213 		strbuf_addf(&msg, "parent %s\n",
214 			    oid_to_hex(&parent->item->object.oid));
215 	strbuf_addf(&msg,
216 		    "author %s\n"
217 		    "committer %s\n\n"
218 		    "Version of %s from %s\n",
219 		    ident, ident, path,
220 		    (!contents_from ? path :
221 		     (!strcmp(contents_from, "-") ? "standard input" : contents_from)));
222 	set_commit_buffer_from_strbuf(r, commit, &msg);
223 
224 	if (!contents_from || strcmp("-", contents_from)) {
225 		struct stat st;
226 		const char *read_from;
227 		char *buf_ptr;
228 		unsigned long buf_len;
229 
230 		if (contents_from) {
231 			if (stat(contents_from, &st) < 0)
232 				die_errno("Cannot stat '%s'", contents_from);
233 			read_from = contents_from;
234 		}
235 		else {
236 			if (lstat(path, &st) < 0)
237 				die_errno("Cannot lstat '%s'", path);
238 			read_from = path;
239 		}
240 		mode = canon_mode(st.st_mode);
241 
242 		switch (st.st_mode & S_IFMT) {
243 		case S_IFREG:
244 			if (opt->flags.allow_textconv &&
245 			    textconv_object(r, read_from, mode, null_oid(), 0, &buf_ptr, &buf_len))
246 				strbuf_attach(&buf, buf_ptr, buf_len, buf_len + 1);
247 			else if (strbuf_read_file(&buf, read_from, st.st_size) != st.st_size)
248 				die_errno("cannot open or read '%s'", read_from);
249 			break;
250 		case S_IFLNK:
251 			if (strbuf_readlink(&buf, read_from, st.st_size) < 0)
252 				die_errno("cannot readlink '%s'", read_from);
253 			break;
254 		default:
255 			die("unsupported file type %s", read_from);
256 		}
257 	}
258 	else {
259 		/* Reading from stdin */
260 		mode = 0;
261 		if (strbuf_read(&buf, 0, 0) < 0)
262 			die_errno("failed to read from stdin");
263 	}
264 	convert_to_git(r->index, path, buf.buf, buf.len, &buf, 0);
265 	origin->file.ptr = buf.buf;
266 	origin->file.size = buf.len;
267 	pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid);
268 
269 	/*
270 	 * Read the current index, replace the path entry with
271 	 * origin->blob_sha1 without mucking with its mode or type
272 	 * bits; we are not going to write this index out -- we just
273 	 * want to run "diff-index --cached".
274 	 */
275 	discard_index(r->index);
276 	repo_read_index(r);
277 
278 	len = strlen(path);
279 	if (!mode) {
280 		int pos = index_name_pos(r->index, path, len);
281 		if (0 <= pos)
282 			mode = r->index->cache[pos]->ce_mode;
283 		else
284 			/* Let's not bother reading from HEAD tree */
285 			mode = S_IFREG | 0644;
286 	}
287 	ce = make_empty_cache_entry(r->index, len);
288 	oidcpy(&ce->oid, &origin->blob_oid);
289 	memcpy(ce->name, path, len);
290 	ce->ce_flags = create_ce_flags(0);
291 	ce->ce_namelen = len;
292 	ce->ce_mode = create_ce_mode(mode);
293 	add_index_entry(r->index, ce,
294 			ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
295 
296 	cache_tree_invalidate_path(r->index, path);
297 
298 	return commit;
299 }
300 
301 
302 
diff_hunks(mmfile_t * file_a,mmfile_t * file_b,xdl_emit_hunk_consume_func_t hunk_func,void * cb_data,int xdl_opts)303 static int diff_hunks(mmfile_t *file_a, mmfile_t *file_b,
304 		      xdl_emit_hunk_consume_func_t hunk_func, void *cb_data, int xdl_opts)
305 {
306 	xpparam_t xpp = {0};
307 	xdemitconf_t xecfg = {0};
308 	xdemitcb_t ecb = {NULL};
309 
310 	xpp.flags = xdl_opts;
311 	xecfg.hunk_func = hunk_func;
312 	ecb.priv = cb_data;
313 	return xdi_diff(file_a, file_b, &xpp, &xecfg, &ecb);
314 }
315 
get_next_line(const char * start,const char * end)316 static const char *get_next_line(const char *start, const char *end)
317 {
318 	const char *nl = memchr(start, '\n', end - start);
319 
320 	return nl ? nl + 1 : end;
321 }
322 
find_line_starts(int ** line_starts,const char * buf,unsigned long len)323 static int find_line_starts(int **line_starts, const char *buf,
324 			    unsigned long len)
325 {
326 	const char *end = buf + len;
327 	const char *p;
328 	int *lineno;
329 	int num = 0;
330 
331 	for (p = buf; p < end; p = get_next_line(p, end))
332 		num++;
333 
334 	ALLOC_ARRAY(*line_starts, num + 1);
335 	lineno = *line_starts;
336 
337 	for (p = buf; p < end; p = get_next_line(p, end))
338 		*lineno++ = p - buf;
339 
340 	*lineno = len;
341 
342 	return num;
343 }
344 
345 struct fingerprint_entry;
346 
347 /* A fingerprint is intended to loosely represent a string, such that two
348  * fingerprints can be quickly compared to give an indication of the similarity
349  * of the strings that they represent.
350  *
351  * A fingerprint is represented as a multiset of the lower-cased byte pairs in
352  * the string that it represents. Whitespace is added at each end of the
353  * string. Whitespace pairs are ignored. Whitespace is converted to '\0'.
354  * For example, the string "Darth   Radar" will be converted to the following
355  * fingerprint:
356  * {"\0d", "da", "da", "ar", "ar", "rt", "th", "h\0", "\0r", "ra", "ad", "r\0"}
357  *
358  * The similarity between two fingerprints is the size of the intersection of
359  * their multisets, including repeated elements. See fingerprint_similarity for
360  * examples.
361  *
362  * For ease of implementation, the fingerprint is implemented as a map
363  * of byte pairs to the count of that byte pair in the string, instead of
364  * allowing repeated elements in a set.
365  */
366 struct fingerprint {
367 	struct hashmap map;
368 	/* As we know the maximum number of entries in advance, it's
369 	 * convenient to store the entries in a single array instead of having
370 	 * the hashmap manage the memory.
371 	 */
372 	struct fingerprint_entry *entries;
373 };
374 
375 /* A byte pair in a fingerprint. Stores the number of times the byte pair
376  * occurs in the string that the fingerprint represents.
377  */
378 struct fingerprint_entry {
379 	/* The hashmap entry - the hash represents the byte pair in its
380 	 * entirety so we don't need to store the byte pair separately.
381 	 */
382 	struct hashmap_entry entry;
383 	/* The number of times the byte pair occurs in the string that the
384 	 * fingerprint represents.
385 	 */
386 	int count;
387 };
388 
389 /* See `struct fingerprint` for an explanation of what a fingerprint is.
390  * \param result the fingerprint of the string is stored here. This must be
391  * 		 freed later using free_fingerprint.
392  * \param line_begin the start of the string
393  * \param line_end the end of the string
394  */
get_fingerprint(struct fingerprint * result,const char * line_begin,const char * line_end)395 static void get_fingerprint(struct fingerprint *result,
396 			    const char *line_begin,
397 			    const char *line_end)
398 {
399 	unsigned int hash, c0 = 0, c1;
400 	const char *p;
401 	int max_map_entry_count = 1 + line_end - line_begin;
402 	struct fingerprint_entry *entry = xcalloc(max_map_entry_count,
403 		sizeof(struct fingerprint_entry));
404 	struct fingerprint_entry *found_entry;
405 
406 	hashmap_init(&result->map, NULL, NULL, max_map_entry_count);
407 	result->entries = entry;
408 	for (p = line_begin; p <= line_end; ++p, c0 = c1) {
409 		/* Always terminate the string with whitespace.
410 		 * Normalise whitespace to 0, and normalise letters to
411 		 * lower case. This won't work for multibyte characters but at
412 		 * worst will match some unrelated characters.
413 		 */
414 		if ((p == line_end) || isspace(*p))
415 			c1 = 0;
416 		else
417 			c1 = tolower(*p);
418 		hash = c0 | (c1 << 8);
419 		/* Ignore whitespace pairs */
420 		if (hash == 0)
421 			continue;
422 		hashmap_entry_init(&entry->entry, hash);
423 
424 		found_entry = hashmap_get_entry(&result->map, entry,
425 						/* member name */ entry, NULL);
426 		if (found_entry) {
427 			found_entry->count += 1;
428 		} else {
429 			entry->count = 1;
430 			hashmap_add(&result->map, &entry->entry);
431 			++entry;
432 		}
433 	}
434 }
435 
free_fingerprint(struct fingerprint * f)436 static void free_fingerprint(struct fingerprint *f)
437 {
438 	hashmap_clear(&f->map);
439 	free(f->entries);
440 }
441 
442 /* Calculates the similarity between two fingerprints as the size of the
443  * intersection of their multisets, including repeated elements. See
444  * `struct fingerprint` for an explanation of the fingerprint representation.
445  * The similarity between "cat mat" and "father rather" is 2 because "at" is
446  * present twice in both strings while the similarity between "tim" and "mit"
447  * is 0.
448  */
fingerprint_similarity(struct fingerprint * a,struct fingerprint * b)449 static int fingerprint_similarity(struct fingerprint *a, struct fingerprint *b)
450 {
451 	int intersection = 0;
452 	struct hashmap_iter iter;
453 	const struct fingerprint_entry *entry_a, *entry_b;
454 
455 	hashmap_for_each_entry(&b->map, &iter, entry_b,
456 				entry /* member name */) {
457 		entry_a = hashmap_get_entry(&a->map, entry_b, entry, NULL);
458 		if (entry_a) {
459 			intersection += entry_a->count < entry_b->count ?
460 					entry_a->count : entry_b->count;
461 		}
462 	}
463 	return intersection;
464 }
465 
466 /* Subtracts byte-pair elements in B from A, modifying A in place.
467  */
fingerprint_subtract(struct fingerprint * a,struct fingerprint * b)468 static void fingerprint_subtract(struct fingerprint *a, struct fingerprint *b)
469 {
470 	struct hashmap_iter iter;
471 	struct fingerprint_entry *entry_a;
472 	const struct fingerprint_entry *entry_b;
473 
474 	hashmap_iter_init(&b->map, &iter);
475 
476 	hashmap_for_each_entry(&b->map, &iter, entry_b,
477 				entry /* member name */) {
478 		entry_a = hashmap_get_entry(&a->map, entry_b, entry, NULL);
479 		if (entry_a) {
480 			if (entry_a->count <= entry_b->count)
481 				hashmap_remove(&a->map, &entry_b->entry, NULL);
482 			else
483 				entry_a->count -= entry_b->count;
484 		}
485 	}
486 }
487 
488 /* Calculate fingerprints for a series of lines.
489  * Puts the fingerprints in the fingerprints array, which must have been
490  * preallocated to allow storing line_count elements.
491  */
get_line_fingerprints(struct fingerprint * fingerprints,const char * content,const int * line_starts,long first_line,long line_count)492 static void get_line_fingerprints(struct fingerprint *fingerprints,
493 				  const char *content, const int *line_starts,
494 				  long first_line, long line_count)
495 {
496 	int i;
497 	const char *linestart, *lineend;
498 
499 	line_starts += first_line;
500 	for (i = 0; i < line_count; ++i) {
501 		linestart = content + line_starts[i];
502 		lineend = content + line_starts[i + 1];
503 		get_fingerprint(fingerprints + i, linestart, lineend);
504 	}
505 }
506 
free_line_fingerprints(struct fingerprint * fingerprints,int nr_fingerprints)507 static void free_line_fingerprints(struct fingerprint *fingerprints,
508 				   int nr_fingerprints)
509 {
510 	int i;
511 
512 	for (i = 0; i < nr_fingerprints; i++)
513 		free_fingerprint(&fingerprints[i]);
514 }
515 
516 /* This contains the data necessary to linearly map a line number in one half
517  * of a diff chunk to the line in the other half of the diff chunk that is
518  * closest in terms of its position as a fraction of the length of the chunk.
519  */
520 struct line_number_mapping {
521 	int destination_start, destination_length,
522 		source_start, source_length;
523 };
524 
525 /* Given a line number in one range, offset and scale it to map it onto the
526  * other range.
527  * Essentially this mapping is a simple linear equation but the calculation is
528  * more complicated to allow performing it with integer operations.
529  * Another complication is that if a line could map onto many lines in the
530  * destination range then we want to choose the line at the center of those
531  * possibilities.
532  * Example: if the chunk is 2 lines long in A and 10 lines long in B then the
533  * first 5 lines in B will map onto the first line in the A chunk, while the
534  * last 5 lines will all map onto the second line in the A chunk.
535  * Example: if the chunk is 10 lines long in A and 2 lines long in B then line
536  * 0 in B will map onto line 2 in A, and line 1 in B will map onto line 7 in A.
537  */
map_line_number(int line_number,const struct line_number_mapping * mapping)538 static int map_line_number(int line_number,
539 	const struct line_number_mapping *mapping)
540 {
541 	return ((line_number - mapping->source_start) * 2 + 1) *
542 	       mapping->destination_length /
543 	       (mapping->source_length * 2) +
544 	       mapping->destination_start;
545 }
546 
547 /* Get a pointer to the element storing the similarity between a line in A
548  * and a line in B.
549  *
550  * The similarities are stored in a 2-dimensional array. Each "row" in the
551  * array contains the similarities for a line in B. The similarities stored in
552  * a row are the similarities between the line in B and the nearby lines in A.
553  * To keep the length of each row the same, it is padded out with values of -1
554  * where the search range extends beyond the lines in A.
555  * For example, if max_search_distance_a is 2 and the two sides of a diff chunk
556  * look like this:
557  * a | m
558  * b | n
559  * c | o
560  * d | p
561  * e | q
562  * Then the similarity array will contain:
563  * [-1, -1, am, bm, cm,
564  *  -1, an, bn, cn, dn,
565  *  ao, bo, co, do, eo,
566  *  bp, cp, dp, ep, -1,
567  *  cq, dq, eq, -1, -1]
568  * Where similarities are denoted either by -1 for invalid, or the
569  * concatenation of the two lines in the diff being compared.
570  *
571  * \param similarities array of similarities between lines in A and B
572  * \param line_a the index of the line in A, in the same frame of reference as
573  *	closest_line_a.
574  * \param local_line_b the index of the line in B, relative to the first line
575  *		       in B that similarities represents.
576  * \param closest_line_a the index of the line in A that is deemed to be
577  *			 closest to local_line_b. This must be in the same
578  *			 frame of reference as line_a. This value defines
579  *			 where similarities is centered for the line in B.
580  * \param max_search_distance_a maximum distance in lines from the closest line
581  * 				in A for other lines in A for which
582  * 				similarities may be calculated.
583  */
get_similarity(int * similarities,int line_a,int local_line_b,int closest_line_a,int max_search_distance_a)584 static int *get_similarity(int *similarities,
585 			   int line_a, int local_line_b,
586 			   int closest_line_a, int max_search_distance_a)
587 {
588 	assert(abs(line_a - closest_line_a) <=
589 	       max_search_distance_a);
590 	return similarities + line_a - closest_line_a +
591 	       max_search_distance_a +
592 	       local_line_b * (max_search_distance_a * 2 + 1);
593 }
594 
595 #define CERTAIN_NOTHING_MATCHES -2
596 #define CERTAINTY_NOT_CALCULATED -1
597 
598 /* Given a line in B, first calculate its similarities with nearby lines in A
599  * if not already calculated, then identify the most similar and second most
600  * similar lines. The "certainty" is calculated based on those two
601  * similarities.
602  *
603  * \param start_a the index of the first line of the chunk in A
604  * \param length_a the length in lines of the chunk in A
605  * \param local_line_b the index of the line in B, relative to the first line
606  * 		       in the chunk.
607  * \param fingerprints_a array of fingerprints for the chunk in A
608  * \param fingerprints_b array of fingerprints for the chunk in B
609  * \param similarities 2-dimensional array of similarities between lines in A
610  * 		       and B. See get_similarity() for more details.
611  * \param certainties array of values indicating how strongly a line in B is
612  * 		      matched with some line in A.
613  * \param second_best_result array of absolute indices in A for the second
614  * 			     closest match of a line in B.
615  * \param result array of absolute indices in A for the closest match of a line
616  * 		 in B.
617  * \param max_search_distance_a maximum distance in lines from the closest line
618  * 				in A for other lines in A for which
619  * 				similarities may be calculated.
620  * \param map_line_number_in_b_to_a parameter to map_line_number().
621  */
find_best_line_matches(int start_a,int length_a,int start_b,int local_line_b,struct fingerprint * fingerprints_a,struct fingerprint * fingerprints_b,int * similarities,int * certainties,int * second_best_result,int * result,const int max_search_distance_a,const struct line_number_mapping * map_line_number_in_b_to_a)622 static void find_best_line_matches(
623 	int start_a,
624 	int length_a,
625 	int start_b,
626 	int local_line_b,
627 	struct fingerprint *fingerprints_a,
628 	struct fingerprint *fingerprints_b,
629 	int *similarities,
630 	int *certainties,
631 	int *second_best_result,
632 	int *result,
633 	const int max_search_distance_a,
634 	const struct line_number_mapping *map_line_number_in_b_to_a)
635 {
636 
637 	int i, search_start, search_end, closest_local_line_a, *similarity,
638 		best_similarity = 0, second_best_similarity = 0,
639 		best_similarity_index = 0, second_best_similarity_index = 0;
640 
641 	/* certainty has already been calculated so no need to redo the work */
642 	if (certainties[local_line_b] != CERTAINTY_NOT_CALCULATED)
643 		return;
644 
645 	closest_local_line_a = map_line_number(
646 		local_line_b + start_b, map_line_number_in_b_to_a) - start_a;
647 
648 	search_start = closest_local_line_a - max_search_distance_a;
649 	if (search_start < 0)
650 		search_start = 0;
651 
652 	search_end = closest_local_line_a + max_search_distance_a + 1;
653 	if (search_end > length_a)
654 		search_end = length_a;
655 
656 	for (i = search_start; i < search_end; ++i) {
657 		similarity = get_similarity(similarities,
658 					    i, local_line_b,
659 					    closest_local_line_a,
660 					    max_search_distance_a);
661 		if (*similarity == -1) {
662 			/* This value will never exceed 10 but assert just in
663 			 * case
664 			 */
665 			assert(abs(i - closest_local_line_a) < 1000);
666 			/* scale the similarity by (1000 - distance from
667 			 * closest line) to act as a tie break between lines
668 			 * that otherwise are equally similar.
669 			 */
670 			*similarity = fingerprint_similarity(
671 				fingerprints_b + local_line_b,
672 				fingerprints_a + i) *
673 				(1000 - abs(i - closest_local_line_a));
674 		}
675 		if (*similarity > best_similarity) {
676 			second_best_similarity = best_similarity;
677 			second_best_similarity_index = best_similarity_index;
678 			best_similarity = *similarity;
679 			best_similarity_index = i;
680 		} else if (*similarity > second_best_similarity) {
681 			second_best_similarity = *similarity;
682 			second_best_similarity_index = i;
683 		}
684 	}
685 
686 	if (best_similarity == 0) {
687 		/* this line definitely doesn't match with anything. Mark it
688 		 * with this special value so it doesn't get invalidated and
689 		 * won't be recalculated.
690 		 */
691 		certainties[local_line_b] = CERTAIN_NOTHING_MATCHES;
692 		result[local_line_b] = -1;
693 	} else {
694 		/* Calculate the certainty with which this line matches.
695 		 * If the line matches well with two lines then that reduces
696 		 * the certainty. However we still want to prioritise matching
697 		 * a line that matches very well with two lines over matching a
698 		 * line that matches poorly with one line, hence doubling
699 		 * best_similarity.
700 		 * This means that if we have
701 		 * line X that matches only one line with a score of 3,
702 		 * line Y that matches two lines equally with a score of 5,
703 		 * and line Z that matches only one line with a score or 2,
704 		 * then the lines in order of certainty are X, Y, Z.
705 		 */
706 		certainties[local_line_b] = best_similarity * 2 -
707 			second_best_similarity;
708 
709 		/* We keep both the best and second best results to allow us to
710 		 * check at a later stage of the matching process whether the
711 		 * result needs to be invalidated.
712 		 */
713 		result[local_line_b] = start_a + best_similarity_index;
714 		second_best_result[local_line_b] =
715 			start_a + second_best_similarity_index;
716 	}
717 }
718 
719 /*
720  * This finds the line that we can match with the most confidence, and
721  * uses it as a partition. It then calls itself on the lines on either side of
722  * that partition. In this way we avoid lines appearing out of order, and
723  * retain a sensible line ordering.
724  * \param start_a index of the first line in A with which lines in B may be
725  * 		  compared.
726  * \param start_b index of the first line in B for which matching should be
727  * 		  done.
728  * \param length_a number of lines in A with which lines in B may be compared.
729  * \param length_b number of lines in B for which matching should be done.
730  * \param fingerprints_a mutable array of fingerprints in A. The first element
731  * 			 corresponds to the line at start_a.
732  * \param fingerprints_b array of fingerprints in B. The first element
733  * 			 corresponds to the line at start_b.
734  * \param similarities 2-dimensional array of similarities between lines in A
735  * 		       and B. See get_similarity() for more details.
736  * \param certainties array of values indicating how strongly a line in B is
737  * 		      matched with some line in A.
738  * \param second_best_result array of absolute indices in A for the second
739  * 			     closest match of a line in B.
740  * \param result array of absolute indices in A for the closest match of a line
741  * 		 in B.
742  * \param max_search_distance_a maximum distance in lines from the closest line
743  * 			      in A for other lines in A for which
744  * 			      similarities may be calculated.
745  * \param max_search_distance_b an upper bound on the greatest possible
746  * 			      distance between lines in B such that they will
747  *                              both be compared with the same line in A
748  * 			      according to max_search_distance_a.
749  * \param map_line_number_in_b_to_a parameter to map_line_number().
750  */
fuzzy_find_matching_lines_recurse(int start_a,int start_b,int length_a,int length_b,struct fingerprint * fingerprints_a,struct fingerprint * fingerprints_b,int * similarities,int * certainties,int * second_best_result,int * result,int max_search_distance_a,int max_search_distance_b,const struct line_number_mapping * map_line_number_in_b_to_a)751 static void fuzzy_find_matching_lines_recurse(
752 	int start_a, int start_b,
753 	int length_a, int length_b,
754 	struct fingerprint *fingerprints_a,
755 	struct fingerprint *fingerprints_b,
756 	int *similarities,
757 	int *certainties,
758 	int *second_best_result,
759 	int *result,
760 	int max_search_distance_a,
761 	int max_search_distance_b,
762 	const struct line_number_mapping *map_line_number_in_b_to_a)
763 {
764 	int i, invalidate_min, invalidate_max, offset_b,
765 		second_half_start_a, second_half_start_b,
766 		second_half_length_a, second_half_length_b,
767 		most_certain_line_a, most_certain_local_line_b = -1,
768 		most_certain_line_certainty = -1,
769 		closest_local_line_a;
770 
771 	for (i = 0; i < length_b; ++i) {
772 		find_best_line_matches(start_a,
773 				       length_a,
774 				       start_b,
775 				       i,
776 				       fingerprints_a,
777 				       fingerprints_b,
778 				       similarities,
779 				       certainties,
780 				       second_best_result,
781 				       result,
782 				       max_search_distance_a,
783 				       map_line_number_in_b_to_a);
784 
785 		if (certainties[i] > most_certain_line_certainty) {
786 			most_certain_line_certainty = certainties[i];
787 			most_certain_local_line_b = i;
788 		}
789 	}
790 
791 	/* No matches. */
792 	if (most_certain_local_line_b == -1)
793 		return;
794 
795 	most_certain_line_a = result[most_certain_local_line_b];
796 
797 	/*
798 	 * Subtract the most certain line's fingerprint in B from the matched
799 	 * fingerprint in A. This means that other lines in B can't also match
800 	 * the same parts of the line in A.
801 	 */
802 	fingerprint_subtract(fingerprints_a + most_certain_line_a - start_a,
803 			     fingerprints_b + most_certain_local_line_b);
804 
805 	/* Invalidate results that may be affected by the choice of most
806 	 * certain line.
807 	 */
808 	invalidate_min = most_certain_local_line_b - max_search_distance_b;
809 	invalidate_max = most_certain_local_line_b + max_search_distance_b + 1;
810 	if (invalidate_min < 0)
811 		invalidate_min = 0;
812 	if (invalidate_max > length_b)
813 		invalidate_max = length_b;
814 
815 	/* As the fingerprint in A has changed, discard previously calculated
816 	 * similarity values with that fingerprint.
817 	 */
818 	for (i = invalidate_min; i < invalidate_max; ++i) {
819 		closest_local_line_a = map_line_number(
820 			i + start_b, map_line_number_in_b_to_a) - start_a;
821 
822 		/* Check that the lines in A and B are close enough that there
823 		 * is a similarity value for them.
824 		 */
825 		if (abs(most_certain_line_a - start_a - closest_local_line_a) >
826 			max_search_distance_a) {
827 			continue;
828 		}
829 
830 		*get_similarity(similarities, most_certain_line_a - start_a,
831 				i, closest_local_line_a,
832 				max_search_distance_a) = -1;
833 	}
834 
835 	/* More invalidating of results that may be affected by the choice of
836 	 * most certain line.
837 	 * Discard the matches for lines in B that are currently matched with a
838 	 * line in A such that their ordering contradicts the ordering imposed
839 	 * by the choice of most certain line.
840 	 */
841 	for (i = most_certain_local_line_b - 1; i >= invalidate_min; --i) {
842 		/* In this loop we discard results for lines in B that are
843 		 * before most-certain-line-B but are matched with a line in A
844 		 * that is after most-certain-line-A.
845 		 */
846 		if (certainties[i] >= 0 &&
847 		    (result[i] >= most_certain_line_a ||
848 		     second_best_result[i] >= most_certain_line_a)) {
849 			certainties[i] = CERTAINTY_NOT_CALCULATED;
850 		}
851 	}
852 	for (i = most_certain_local_line_b + 1; i < invalidate_max; ++i) {
853 		/* In this loop we discard results for lines in B that are
854 		 * after most-certain-line-B but are matched with a line in A
855 		 * that is before most-certain-line-A.
856 		 */
857 		if (certainties[i] >= 0 &&
858 		    (result[i] <= most_certain_line_a ||
859 		     second_best_result[i] <= most_certain_line_a)) {
860 			certainties[i] = CERTAINTY_NOT_CALCULATED;
861 		}
862 	}
863 
864 	/* Repeat the matching process for lines before the most certain line.
865 	 */
866 	if (most_certain_local_line_b > 0) {
867 		fuzzy_find_matching_lines_recurse(
868 			start_a, start_b,
869 			most_certain_line_a + 1 - start_a,
870 			most_certain_local_line_b,
871 			fingerprints_a, fingerprints_b, similarities,
872 			certainties, second_best_result, result,
873 			max_search_distance_a,
874 			max_search_distance_b,
875 			map_line_number_in_b_to_a);
876 	}
877 	/* Repeat the matching process for lines after the most certain line.
878 	 */
879 	if (most_certain_local_line_b + 1 < length_b) {
880 		second_half_start_a = most_certain_line_a;
881 		offset_b = most_certain_local_line_b + 1;
882 		second_half_start_b = start_b + offset_b;
883 		second_half_length_a =
884 			length_a + start_a - second_half_start_a;
885 		second_half_length_b =
886 			length_b + start_b - second_half_start_b;
887 		fuzzy_find_matching_lines_recurse(
888 			second_half_start_a, second_half_start_b,
889 			second_half_length_a, second_half_length_b,
890 			fingerprints_a + second_half_start_a - start_a,
891 			fingerprints_b + offset_b,
892 			similarities +
893 				offset_b * (max_search_distance_a * 2 + 1),
894 			certainties + offset_b,
895 			second_best_result + offset_b, result + offset_b,
896 			max_search_distance_a,
897 			max_search_distance_b,
898 			map_line_number_in_b_to_a);
899 	}
900 }
901 
902 /* Find the lines in the parent line range that most closely match the lines in
903  * the target line range. This is accomplished by matching fingerprints in each
904  * blame_origin, and choosing the best matches that preserve the line ordering.
905  * See struct fingerprint for details of fingerprint matching, and
906  * fuzzy_find_matching_lines_recurse for details of preserving line ordering.
907  *
908  * The performance is believed to be O(n log n) in the typical case and O(n^2)
909  * in a pathological case, where n is the number of lines in the target range.
910  */
fuzzy_find_matching_lines(struct blame_origin * parent,struct blame_origin * target,int tlno,int parent_slno,int same,int parent_len)911 static int *fuzzy_find_matching_lines(struct blame_origin *parent,
912 				      struct blame_origin *target,
913 				      int tlno, int parent_slno, int same,
914 				      int parent_len)
915 {
916 	/* We use the terminology "A" for the left hand side of the diff AKA
917 	 * parent, and "B" for the right hand side of the diff AKA target. */
918 	int start_a = parent_slno;
919 	int length_a = parent_len;
920 	int start_b = tlno;
921 	int length_b = same - tlno;
922 
923 	struct line_number_mapping map_line_number_in_b_to_a = {
924 		start_a, length_a, start_b, length_b
925 	};
926 
927 	struct fingerprint *fingerprints_a = parent->fingerprints;
928 	struct fingerprint *fingerprints_b = target->fingerprints;
929 
930 	int i, *result, *second_best_result,
931 		*certainties, *similarities, similarity_count;
932 
933 	/*
934 	 * max_search_distance_a means that given a line in B, compare it to
935 	 * the line in A that is closest to its position, and the lines in A
936 	 * that are no greater than max_search_distance_a lines away from the
937 	 * closest line in A.
938 	 *
939 	 * max_search_distance_b is an upper bound on the greatest possible
940 	 * distance between lines in B such that they will both be compared
941 	 * with the same line in A according to max_search_distance_a.
942 	 */
943 	int max_search_distance_a = 10, max_search_distance_b;
944 
945 	if (length_a <= 0)
946 		return NULL;
947 
948 	if (max_search_distance_a >= length_a)
949 		max_search_distance_a = length_a ? length_a - 1 : 0;
950 
951 	max_search_distance_b = ((2 * max_search_distance_a + 1) * length_b
952 				 - 1) / length_a;
953 
954 	CALLOC_ARRAY(result, length_b);
955 	CALLOC_ARRAY(second_best_result, length_b);
956 	CALLOC_ARRAY(certainties, length_b);
957 
958 	/* See get_similarity() for details of similarities. */
959 	similarity_count = length_b * (max_search_distance_a * 2 + 1);
960 	CALLOC_ARRAY(similarities, similarity_count);
961 
962 	for (i = 0; i < length_b; ++i) {
963 		result[i] = -1;
964 		second_best_result[i] = -1;
965 		certainties[i] = CERTAINTY_NOT_CALCULATED;
966 	}
967 
968 	for (i = 0; i < similarity_count; ++i)
969 		similarities[i] = -1;
970 
971 	fuzzy_find_matching_lines_recurse(start_a, start_b,
972 					  length_a, length_b,
973 					  fingerprints_a + start_a,
974 					  fingerprints_b + start_b,
975 					  similarities,
976 					  certainties,
977 					  second_best_result,
978 					  result,
979 					  max_search_distance_a,
980 					  max_search_distance_b,
981 					  &map_line_number_in_b_to_a);
982 
983 	free(similarities);
984 	free(certainties);
985 	free(second_best_result);
986 
987 	return result;
988 }
989 
fill_origin_fingerprints(struct blame_origin * o)990 static void fill_origin_fingerprints(struct blame_origin *o)
991 {
992 	int *line_starts;
993 
994 	if (o->fingerprints)
995 		return;
996 	o->num_lines = find_line_starts(&line_starts, o->file.ptr,
997 					o->file.size);
998 	CALLOC_ARRAY(o->fingerprints, o->num_lines);
999 	get_line_fingerprints(o->fingerprints, o->file.ptr, line_starts,
1000 			      0, o->num_lines);
1001 	free(line_starts);
1002 }
1003 
drop_origin_fingerprints(struct blame_origin * o)1004 static void drop_origin_fingerprints(struct blame_origin *o)
1005 {
1006 	if (o->fingerprints) {
1007 		free_line_fingerprints(o->fingerprints, o->num_lines);
1008 		o->num_lines = 0;
1009 		FREE_AND_NULL(o->fingerprints);
1010 	}
1011 }
1012 
1013 /*
1014  * Given an origin, prepare mmfile_t structure to be used by the
1015  * diff machinery
1016  */
fill_origin_blob(struct diff_options * opt,struct blame_origin * o,mmfile_t * file,int * num_read_blob,int fill_fingerprints)1017 static void fill_origin_blob(struct diff_options *opt,
1018 			     struct blame_origin *o, mmfile_t *file,
1019 			     int *num_read_blob, int fill_fingerprints)
1020 {
1021 	if (!o->file.ptr) {
1022 		enum object_type type;
1023 		unsigned long file_size;
1024 
1025 		(*num_read_blob)++;
1026 		if (opt->flags.allow_textconv &&
1027 		    textconv_object(opt->repo, o->path, o->mode,
1028 				    &o->blob_oid, 1, &file->ptr, &file_size))
1029 			;
1030 		else
1031 			file->ptr = read_object_file(&o->blob_oid, &type,
1032 						     &file_size);
1033 		file->size = file_size;
1034 
1035 		if (!file->ptr)
1036 			die("Cannot read blob %s for path %s",
1037 			    oid_to_hex(&o->blob_oid),
1038 			    o->path);
1039 		o->file = *file;
1040 	}
1041 	else
1042 		*file = o->file;
1043 	if (fill_fingerprints)
1044 		fill_origin_fingerprints(o);
1045 }
1046 
drop_origin_blob(struct blame_origin * o)1047 static void drop_origin_blob(struct blame_origin *o)
1048 {
1049 	FREE_AND_NULL(o->file.ptr);
1050 	drop_origin_fingerprints(o);
1051 }
1052 
1053 /*
1054  * Any merge of blames happens on lists of blames that arrived via
1055  * different parents in a single suspect.  In this case, we want to
1056  * sort according to the suspect line numbers as opposed to the final
1057  * image line numbers.  The function body is somewhat longish because
1058  * it avoids unnecessary writes.
1059  */
1060 
blame_merge(struct blame_entry * list1,struct blame_entry * list2)1061 static struct blame_entry *blame_merge(struct blame_entry *list1,
1062 				       struct blame_entry *list2)
1063 {
1064 	struct blame_entry *p1 = list1, *p2 = list2,
1065 		**tail = &list1;
1066 
1067 	if (!p1)
1068 		return p2;
1069 	if (!p2)
1070 		return p1;
1071 
1072 	if (p1->s_lno <= p2->s_lno) {
1073 		do {
1074 			tail = &p1->next;
1075 			if ((p1 = *tail) == NULL) {
1076 				*tail = p2;
1077 				return list1;
1078 			}
1079 		} while (p1->s_lno <= p2->s_lno);
1080 	}
1081 	for (;;) {
1082 		*tail = p2;
1083 		do {
1084 			tail = &p2->next;
1085 			if ((p2 = *tail) == NULL)  {
1086 				*tail = p1;
1087 				return list1;
1088 			}
1089 		} while (p1->s_lno > p2->s_lno);
1090 		*tail = p1;
1091 		do {
1092 			tail = &p1->next;
1093 			if ((p1 = *tail) == NULL) {
1094 				*tail = p2;
1095 				return list1;
1096 			}
1097 		} while (p1->s_lno <= p2->s_lno);
1098 	}
1099 }
1100 
get_next_blame(const void * p)1101 static void *get_next_blame(const void *p)
1102 {
1103 	return ((struct blame_entry *)p)->next;
1104 }
1105 
set_next_blame(void * p1,void * p2)1106 static void set_next_blame(void *p1, void *p2)
1107 {
1108 	((struct blame_entry *)p1)->next = p2;
1109 }
1110 
1111 /*
1112  * Final image line numbers are all different, so we don't need a
1113  * three-way comparison here.
1114  */
1115 
compare_blame_final(const void * p1,const void * p2)1116 static int compare_blame_final(const void *p1, const void *p2)
1117 {
1118 	return ((struct blame_entry *)p1)->lno > ((struct blame_entry *)p2)->lno
1119 		? 1 : -1;
1120 }
1121 
compare_blame_suspect(const void * p1,const void * p2)1122 static int compare_blame_suspect(const void *p1, const void *p2)
1123 {
1124 	const struct blame_entry *s1 = p1, *s2 = p2;
1125 	/*
1126 	 * to allow for collating suspects, we sort according to the
1127 	 * respective pointer value as the primary sorting criterion.
1128 	 * The actual relation is pretty unimportant as long as it
1129 	 * establishes a total order.  Comparing as integers gives us
1130 	 * that.
1131 	 */
1132 	if (s1->suspect != s2->suspect)
1133 		return (intptr_t)s1->suspect > (intptr_t)s2->suspect ? 1 : -1;
1134 	if (s1->s_lno == s2->s_lno)
1135 		return 0;
1136 	return s1->s_lno > s2->s_lno ? 1 : -1;
1137 }
1138 
blame_sort_final(struct blame_scoreboard * sb)1139 void blame_sort_final(struct blame_scoreboard *sb)
1140 {
1141 	sb->ent = llist_mergesort(sb->ent, get_next_blame, set_next_blame,
1142 				  compare_blame_final);
1143 }
1144 
compare_commits_by_reverse_commit_date(const void * a,const void * b,void * c)1145 static int compare_commits_by_reverse_commit_date(const void *a,
1146 						  const void *b,
1147 						  void *c)
1148 {
1149 	return -compare_commits_by_commit_date(a, b, c);
1150 }
1151 
1152 /*
1153  * For debugging -- origin is refcounted, and this asserts that
1154  * we do not underflow.
1155  */
sanity_check_refcnt(struct blame_scoreboard * sb)1156 static void sanity_check_refcnt(struct blame_scoreboard *sb)
1157 {
1158 	int baa = 0;
1159 	struct blame_entry *ent;
1160 
1161 	for (ent = sb->ent; ent; ent = ent->next) {
1162 		/* Nobody should have zero or negative refcnt */
1163 		if (ent->suspect->refcnt <= 0) {
1164 			fprintf(stderr, "%s in %s has negative refcnt %d\n",
1165 				ent->suspect->path,
1166 				oid_to_hex(&ent->suspect->commit->object.oid),
1167 				ent->suspect->refcnt);
1168 			baa = 1;
1169 		}
1170 	}
1171 	if (baa)
1172 		sb->on_sanity_fail(sb, baa);
1173 }
1174 
1175 /*
1176  * If two blame entries that are next to each other came from
1177  * contiguous lines in the same origin (i.e. <commit, path> pair),
1178  * merge them together.
1179  */
blame_coalesce(struct blame_scoreboard * sb)1180 void blame_coalesce(struct blame_scoreboard *sb)
1181 {
1182 	struct blame_entry *ent, *next;
1183 
1184 	for (ent = sb->ent; ent && (next = ent->next); ent = next) {
1185 		if (ent->suspect == next->suspect &&
1186 		    ent->s_lno + ent->num_lines == next->s_lno &&
1187 		    ent->lno + ent->num_lines == next->lno &&
1188 		    ent->ignored == next->ignored &&
1189 		    ent->unblamable == next->unblamable) {
1190 			ent->num_lines += next->num_lines;
1191 			ent->next = next->next;
1192 			blame_origin_decref(next->suspect);
1193 			free(next);
1194 			ent->score = 0;
1195 			next = ent; /* again */
1196 		}
1197 	}
1198 
1199 	if (sb->debug) /* sanity */
1200 		sanity_check_refcnt(sb);
1201 }
1202 
1203 /*
1204  * Merge the given sorted list of blames into a preexisting origin.
1205  * If there were no previous blames to that commit, it is entered into
1206  * the commit priority queue of the score board.
1207  */
1208 
queue_blames(struct blame_scoreboard * sb,struct blame_origin * porigin,struct blame_entry * sorted)1209 static void queue_blames(struct blame_scoreboard *sb, struct blame_origin *porigin,
1210 			 struct blame_entry *sorted)
1211 {
1212 	if (porigin->suspects)
1213 		porigin->suspects = blame_merge(porigin->suspects, sorted);
1214 	else {
1215 		struct blame_origin *o;
1216 		for (o = get_blame_suspects(porigin->commit); o; o = o->next) {
1217 			if (o->suspects) {
1218 				porigin->suspects = sorted;
1219 				return;
1220 			}
1221 		}
1222 		porigin->suspects = sorted;
1223 		prio_queue_put(&sb->commits, porigin->commit);
1224 	}
1225 }
1226 
1227 /*
1228  * Fill the blob_sha1 field of an origin if it hasn't, so that later
1229  * call to fill_origin_blob() can use it to locate the data.  blob_sha1
1230  * for an origin is also used to pass the blame for the entire file to
1231  * the parent to detect the case where a child's blob is identical to
1232  * that of its parent's.
1233  *
1234  * This also fills origin->mode for corresponding tree path.
1235  */
fill_blob_sha1_and_mode(struct repository * r,struct blame_origin * origin)1236 static int fill_blob_sha1_and_mode(struct repository *r,
1237 				   struct blame_origin *origin)
1238 {
1239 	if (!is_null_oid(&origin->blob_oid))
1240 		return 0;
1241 	if (get_tree_entry(r, &origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode))
1242 		goto error_out;
1243 	if (oid_object_info(r, &origin->blob_oid, NULL) != OBJ_BLOB)
1244 		goto error_out;
1245 	return 0;
1246  error_out:
1247 	oidclr(&origin->blob_oid);
1248 	origin->mode = S_IFINVALID;
1249 	return -1;
1250 }
1251 
1252 struct blame_bloom_data {
1253 	/*
1254 	 * Changed-path Bloom filter keys. These can help prevent
1255 	 * computing diffs against first parents, but we need to
1256 	 * expand the list as code is moved or files are renamed.
1257 	 */
1258 	struct bloom_filter_settings *settings;
1259 	struct bloom_key **keys;
1260 	int nr;
1261 	int alloc;
1262 };
1263 
1264 static int bloom_count_queries = 0;
1265 static int bloom_count_no = 0;
maybe_changed_path(struct repository * r,struct blame_origin * origin,struct blame_bloom_data * bd)1266 static int maybe_changed_path(struct repository *r,
1267 			      struct blame_origin *origin,
1268 			      struct blame_bloom_data *bd)
1269 {
1270 	int i;
1271 	struct bloom_filter *filter;
1272 
1273 	if (!bd)
1274 		return 1;
1275 
1276 	if (commit_graph_generation(origin->commit) == GENERATION_NUMBER_INFINITY)
1277 		return 1;
1278 
1279 	filter = get_bloom_filter(r, origin->commit);
1280 
1281 	if (!filter)
1282 		return 1;
1283 
1284 	bloom_count_queries++;
1285 	for (i = 0; i < bd->nr; i++) {
1286 		if (bloom_filter_contains(filter,
1287 					  bd->keys[i],
1288 					  bd->settings))
1289 			return 1;
1290 	}
1291 
1292 	bloom_count_no++;
1293 	return 0;
1294 }
1295 
add_bloom_key(struct blame_bloom_data * bd,const char * path)1296 static void add_bloom_key(struct blame_bloom_data *bd,
1297 			  const char *path)
1298 {
1299 	if (!bd)
1300 		return;
1301 
1302 	if (bd->nr >= bd->alloc) {
1303 		bd->alloc *= 2;
1304 		REALLOC_ARRAY(bd->keys, bd->alloc);
1305 	}
1306 
1307 	bd->keys[bd->nr] = xmalloc(sizeof(struct bloom_key));
1308 	fill_bloom_key(path, strlen(path), bd->keys[bd->nr], bd->settings);
1309 	bd->nr++;
1310 }
1311 
1312 /*
1313  * We have an origin -- check if the same path exists in the
1314  * parent and return an origin structure to represent it.
1315  */
find_origin(struct repository * r,struct commit * parent,struct blame_origin * origin,struct blame_bloom_data * bd)1316 static struct blame_origin *find_origin(struct repository *r,
1317 					struct commit *parent,
1318 					struct blame_origin *origin,
1319 					struct blame_bloom_data *bd)
1320 {
1321 	struct blame_origin *porigin;
1322 	struct diff_options diff_opts;
1323 	const char *paths[2];
1324 
1325 	/* First check any existing origins */
1326 	for (porigin = get_blame_suspects(parent); porigin; porigin = porigin->next)
1327 		if (!strcmp(porigin->path, origin->path)) {
1328 			/*
1329 			 * The same path between origin and its parent
1330 			 * without renaming -- the most common case.
1331 			 */
1332 			return blame_origin_incref (porigin);
1333 		}
1334 
1335 	/* See if the origin->path is different between parent
1336 	 * and origin first.  Most of the time they are the
1337 	 * same and diff-tree is fairly efficient about this.
1338 	 */
1339 	repo_diff_setup(r, &diff_opts);
1340 	diff_opts.flags.recursive = 1;
1341 	diff_opts.detect_rename = 0;
1342 	diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;
1343 	paths[0] = origin->path;
1344 	paths[1] = NULL;
1345 
1346 	parse_pathspec(&diff_opts.pathspec,
1347 		       PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL,
1348 		       PATHSPEC_LITERAL_PATH, "", paths);
1349 	diff_setup_done(&diff_opts);
1350 
1351 	if (is_null_oid(&origin->commit->object.oid))
1352 		do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
1353 	else {
1354 		int compute_diff = 1;
1355 		if (origin->commit->parents &&
1356 		    oideq(&parent->object.oid,
1357 			  &origin->commit->parents->item->object.oid))
1358 			compute_diff = maybe_changed_path(r, origin, bd);
1359 
1360 		if (compute_diff)
1361 			diff_tree_oid(get_commit_tree_oid(parent),
1362 				      get_commit_tree_oid(origin->commit),
1363 				      "", &diff_opts);
1364 	}
1365 	diffcore_std(&diff_opts);
1366 
1367 	if (!diff_queued_diff.nr) {
1368 		/* The path is the same as parent */
1369 		porigin = get_origin(parent, origin->path);
1370 		oidcpy(&porigin->blob_oid, &origin->blob_oid);
1371 		porigin->mode = origin->mode;
1372 	} else {
1373 		/*
1374 		 * Since origin->path is a pathspec, if the parent
1375 		 * commit had it as a directory, we will see a whole
1376 		 * bunch of deletion of files in the directory that we
1377 		 * do not care about.
1378 		 */
1379 		int i;
1380 		struct diff_filepair *p = NULL;
1381 		for (i = 0; i < diff_queued_diff.nr; i++) {
1382 			const char *name;
1383 			p = diff_queued_diff.queue[i];
1384 			name = p->one->path ? p->one->path : p->two->path;
1385 			if (!strcmp(name, origin->path))
1386 				break;
1387 		}
1388 		if (!p)
1389 			die("internal error in blame::find_origin");
1390 		switch (p->status) {
1391 		default:
1392 			die("internal error in blame::find_origin (%c)",
1393 			    p->status);
1394 		case 'M':
1395 			porigin = get_origin(parent, origin->path);
1396 			oidcpy(&porigin->blob_oid, &p->one->oid);
1397 			porigin->mode = p->one->mode;
1398 			break;
1399 		case 'A':
1400 		case 'T':
1401 			/* Did not exist in parent, or type changed */
1402 			break;
1403 		}
1404 	}
1405 	diff_flush(&diff_opts);
1406 	clear_pathspec(&diff_opts.pathspec);
1407 	return porigin;
1408 }
1409 
1410 /*
1411  * We have an origin -- find the path that corresponds to it in its
1412  * parent and return an origin structure to represent it.
1413  */
find_rename(struct repository * r,struct commit * parent,struct blame_origin * origin,struct blame_bloom_data * bd)1414 static struct blame_origin *find_rename(struct repository *r,
1415 					struct commit *parent,
1416 					struct blame_origin *origin,
1417 					struct blame_bloom_data *bd)
1418 {
1419 	struct blame_origin *porigin = NULL;
1420 	struct diff_options diff_opts;
1421 	int i;
1422 
1423 	repo_diff_setup(r, &diff_opts);
1424 	diff_opts.flags.recursive = 1;
1425 	diff_opts.detect_rename = DIFF_DETECT_RENAME;
1426 	diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;
1427 	diff_opts.single_follow = origin->path;
1428 	diff_setup_done(&diff_opts);
1429 
1430 	if (is_null_oid(&origin->commit->object.oid))
1431 		do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
1432 	else
1433 		diff_tree_oid(get_commit_tree_oid(parent),
1434 			      get_commit_tree_oid(origin->commit),
1435 			      "", &diff_opts);
1436 	diffcore_std(&diff_opts);
1437 
1438 	for (i = 0; i < diff_queued_diff.nr; i++) {
1439 		struct diff_filepair *p = diff_queued_diff.queue[i];
1440 		if ((p->status == 'R' || p->status == 'C') &&
1441 		    !strcmp(p->two->path, origin->path)) {
1442 			add_bloom_key(bd, p->one->path);
1443 			porigin = get_origin(parent, p->one->path);
1444 			oidcpy(&porigin->blob_oid, &p->one->oid);
1445 			porigin->mode = p->one->mode;
1446 			break;
1447 		}
1448 	}
1449 	diff_flush(&diff_opts);
1450 	clear_pathspec(&diff_opts.pathspec);
1451 	return porigin;
1452 }
1453 
1454 /*
1455  * Append a new blame entry to a given output queue.
1456  */
add_blame_entry(struct blame_entry *** queue,const struct blame_entry * src)1457 static void add_blame_entry(struct blame_entry ***queue,
1458 			    const struct blame_entry *src)
1459 {
1460 	struct blame_entry *e = xmalloc(sizeof(*e));
1461 	memcpy(e, src, sizeof(*e));
1462 	blame_origin_incref(e->suspect);
1463 
1464 	e->next = **queue;
1465 	**queue = e;
1466 	*queue = &e->next;
1467 }
1468 
1469 /*
1470  * src typically is on-stack; we want to copy the information in it to
1471  * a malloced blame_entry that gets added to the given queue.  The
1472  * origin of dst loses a refcnt.
1473  */
dup_entry(struct blame_entry *** queue,struct blame_entry * dst,struct blame_entry * src)1474 static void dup_entry(struct blame_entry ***queue,
1475 		      struct blame_entry *dst, struct blame_entry *src)
1476 {
1477 	blame_origin_incref(src->suspect);
1478 	blame_origin_decref(dst->suspect);
1479 	memcpy(dst, src, sizeof(*src));
1480 	dst->next = **queue;
1481 	**queue = dst;
1482 	*queue = &dst->next;
1483 }
1484 
blame_nth_line(struct blame_scoreboard * sb,long lno)1485 const char *blame_nth_line(struct blame_scoreboard *sb, long lno)
1486 {
1487 	return sb->final_buf + sb->lineno[lno];
1488 }
1489 
1490 /*
1491  * It is known that lines between tlno to same came from parent, and e
1492  * has an overlap with that range.  it also is known that parent's
1493  * line plno corresponds to e's line tlno.
1494  *
1495  *                <---- e ----->
1496  *                   <------>
1497  *                   <------------>
1498  *             <------------>
1499  *             <------------------>
1500  *
1501  * Split e into potentially three parts; before this chunk, the chunk
1502  * to be blamed for the parent, and after that portion.
1503  */
split_overlap(struct blame_entry * split,struct blame_entry * e,int tlno,int plno,int same,struct blame_origin * parent)1504 static void split_overlap(struct blame_entry *split,
1505 			  struct blame_entry *e,
1506 			  int tlno, int plno, int same,
1507 			  struct blame_origin *parent)
1508 {
1509 	int chunk_end_lno;
1510 	int i;
1511 	memset(split, 0, sizeof(struct blame_entry [3]));
1512 
1513 	for (i = 0; i < 3; i++) {
1514 		split[i].ignored = e->ignored;
1515 		split[i].unblamable = e->unblamable;
1516 	}
1517 
1518 	if (e->s_lno < tlno) {
1519 		/* there is a pre-chunk part not blamed on parent */
1520 		split[0].suspect = blame_origin_incref(e->suspect);
1521 		split[0].lno = e->lno;
1522 		split[0].s_lno = e->s_lno;
1523 		split[0].num_lines = tlno - e->s_lno;
1524 		split[1].lno = e->lno + tlno - e->s_lno;
1525 		split[1].s_lno = plno;
1526 	}
1527 	else {
1528 		split[1].lno = e->lno;
1529 		split[1].s_lno = plno + (e->s_lno - tlno);
1530 	}
1531 
1532 	if (same < e->s_lno + e->num_lines) {
1533 		/* there is a post-chunk part not blamed on parent */
1534 		split[2].suspect = blame_origin_incref(e->suspect);
1535 		split[2].lno = e->lno + (same - e->s_lno);
1536 		split[2].s_lno = e->s_lno + (same - e->s_lno);
1537 		split[2].num_lines = e->s_lno + e->num_lines - same;
1538 		chunk_end_lno = split[2].lno;
1539 	}
1540 	else
1541 		chunk_end_lno = e->lno + e->num_lines;
1542 	split[1].num_lines = chunk_end_lno - split[1].lno;
1543 
1544 	/*
1545 	 * if it turns out there is nothing to blame the parent for,
1546 	 * forget about the splitting.  !split[1].suspect signals this.
1547 	 */
1548 	if (split[1].num_lines < 1)
1549 		return;
1550 	split[1].suspect = blame_origin_incref(parent);
1551 }
1552 
1553 /*
1554  * split_overlap() divided an existing blame e into up to three parts
1555  * in split.  Any assigned blame is moved to queue to
1556  * reflect the split.
1557  */
split_blame(struct blame_entry *** blamed,struct blame_entry *** unblamed,struct blame_entry * split,struct blame_entry * e)1558 static void split_blame(struct blame_entry ***blamed,
1559 			struct blame_entry ***unblamed,
1560 			struct blame_entry *split,
1561 			struct blame_entry *e)
1562 {
1563 	if (split[0].suspect && split[2].suspect) {
1564 		/* The first part (reuse storage for the existing entry e) */
1565 		dup_entry(unblamed, e, &split[0]);
1566 
1567 		/* The last part -- me */
1568 		add_blame_entry(unblamed, &split[2]);
1569 
1570 		/* ... and the middle part -- parent */
1571 		add_blame_entry(blamed, &split[1]);
1572 	}
1573 	else if (!split[0].suspect && !split[2].suspect)
1574 		/*
1575 		 * The parent covers the entire area; reuse storage for
1576 		 * e and replace it with the parent.
1577 		 */
1578 		dup_entry(blamed, e, &split[1]);
1579 	else if (split[0].suspect) {
1580 		/* me and then parent */
1581 		dup_entry(unblamed, e, &split[0]);
1582 		add_blame_entry(blamed, &split[1]);
1583 	}
1584 	else {
1585 		/* parent and then me */
1586 		dup_entry(blamed, e, &split[1]);
1587 		add_blame_entry(unblamed, &split[2]);
1588 	}
1589 }
1590 
1591 /*
1592  * After splitting the blame, the origins used by the
1593  * on-stack blame_entry should lose one refcnt each.
1594  */
decref_split(struct blame_entry * split)1595 static void decref_split(struct blame_entry *split)
1596 {
1597 	int i;
1598 
1599 	for (i = 0; i < 3; i++)
1600 		blame_origin_decref(split[i].suspect);
1601 }
1602 
1603 /*
1604  * reverse_blame reverses the list given in head, appending tail.
1605  * That allows us to build lists in reverse order, then reverse them
1606  * afterwards.  This can be faster than building the list in proper
1607  * order right away.  The reason is that building in proper order
1608  * requires writing a link in the _previous_ element, while building
1609  * in reverse order just requires placing the list head into the
1610  * _current_ element.
1611  */
1612 
reverse_blame(struct blame_entry * head,struct blame_entry * tail)1613 static struct blame_entry *reverse_blame(struct blame_entry *head,
1614 					 struct blame_entry *tail)
1615 {
1616 	while (head) {
1617 		struct blame_entry *next = head->next;
1618 		head->next = tail;
1619 		tail = head;
1620 		head = next;
1621 	}
1622 	return tail;
1623 }
1624 
1625 /*
1626  * Splits a blame entry into two entries at 'len' lines.  The original 'e'
1627  * consists of len lines, i.e. [e->lno, e->lno + len), and the second part,
1628  * which is returned, consists of the remainder: [e->lno + len, e->lno +
1629  * e->num_lines).  The caller needs to sort out the reference counting for the
1630  * new entry's suspect.
1631  */
split_blame_at(struct blame_entry * e,int len,struct blame_origin * new_suspect)1632 static struct blame_entry *split_blame_at(struct blame_entry *e, int len,
1633 					  struct blame_origin *new_suspect)
1634 {
1635 	struct blame_entry *n = xcalloc(1, sizeof(struct blame_entry));
1636 
1637 	n->suspect = new_suspect;
1638 	n->ignored = e->ignored;
1639 	n->unblamable = e->unblamable;
1640 	n->lno = e->lno + len;
1641 	n->s_lno = e->s_lno + len;
1642 	n->num_lines = e->num_lines - len;
1643 	e->num_lines = len;
1644 	e->score = 0;
1645 	return n;
1646 }
1647 
1648 struct blame_line_tracker {
1649 	int is_parent;
1650 	int s_lno;
1651 };
1652 
are_lines_adjacent(struct blame_line_tracker * first,struct blame_line_tracker * second)1653 static int are_lines_adjacent(struct blame_line_tracker *first,
1654 			      struct blame_line_tracker *second)
1655 {
1656 	return first->is_parent == second->is_parent &&
1657 	       first->s_lno + 1 == second->s_lno;
1658 }
1659 
scan_parent_range(struct fingerprint * p_fps,struct fingerprint * t_fps,int t_idx,int from,int nr_lines)1660 static int scan_parent_range(struct fingerprint *p_fps,
1661 			     struct fingerprint *t_fps, int t_idx,
1662 			     int from, int nr_lines)
1663 {
1664 	int sim, p_idx;
1665 	#define FINGERPRINT_FILE_THRESHOLD	10
1666 	int best_sim_val = FINGERPRINT_FILE_THRESHOLD;
1667 	int best_sim_idx = -1;
1668 
1669 	for (p_idx = from; p_idx < from + nr_lines; p_idx++) {
1670 		sim = fingerprint_similarity(&t_fps[t_idx], &p_fps[p_idx]);
1671 		if (sim < best_sim_val)
1672 			continue;
1673 		/* Break ties with the closest-to-target line number */
1674 		if (sim == best_sim_val && best_sim_idx != -1 &&
1675 		    abs(best_sim_idx - t_idx) < abs(p_idx - t_idx))
1676 			continue;
1677 		best_sim_val = sim;
1678 		best_sim_idx = p_idx;
1679 	}
1680 	return best_sim_idx;
1681 }
1682 
1683 /*
1684  * The first pass checks the blame entry (from the target) against the parent's
1685  * diff chunk.  If that fails for a line, the second pass tries to match that
1686  * line to any part of parent file.  That catches cases where a change was
1687  * broken into two chunks by 'context.'
1688  */
guess_line_blames(struct blame_origin * parent,struct blame_origin * target,int tlno,int offset,int same,int parent_len,struct blame_line_tracker * line_blames)1689 static void guess_line_blames(struct blame_origin *parent,
1690 			      struct blame_origin *target,
1691 			      int tlno, int offset, int same, int parent_len,
1692 			      struct blame_line_tracker *line_blames)
1693 {
1694 	int i, best_idx, target_idx;
1695 	int parent_slno = tlno + offset;
1696 	int *fuzzy_matches;
1697 
1698 	fuzzy_matches = fuzzy_find_matching_lines(parent, target,
1699 						  tlno, parent_slno, same,
1700 						  parent_len);
1701 	for (i = 0; i < same - tlno; i++) {
1702 		target_idx = tlno + i;
1703 		if (fuzzy_matches && fuzzy_matches[i] >= 0) {
1704 			best_idx = fuzzy_matches[i];
1705 		} else {
1706 			best_idx = scan_parent_range(parent->fingerprints,
1707 						     target->fingerprints,
1708 						     target_idx, 0,
1709 						     parent->num_lines);
1710 		}
1711 		if (best_idx >= 0) {
1712 			line_blames[i].is_parent = 1;
1713 			line_blames[i].s_lno = best_idx;
1714 		} else {
1715 			line_blames[i].is_parent = 0;
1716 			line_blames[i].s_lno = target_idx;
1717 		}
1718 	}
1719 	free(fuzzy_matches);
1720 }
1721 
1722 /*
1723  * This decides which parts of a blame entry go to the parent (added to the
1724  * ignoredp list) and which stay with the target (added to the diffp list).  The
1725  * actual decision was made in a separate heuristic function, and those answers
1726  * for the lines in 'e' are in line_blames.  This consumes e, essentially
1727  * putting it on a list.
1728  *
1729  * Note that the blame entries on the ignoredp list are not necessarily sorted
1730  * with respect to the parent's line numbers yet.
1731  */
ignore_blame_entry(struct blame_entry * e,struct blame_origin * parent,struct blame_entry ** diffp,struct blame_entry ** ignoredp,struct blame_line_tracker * line_blames)1732 static void ignore_blame_entry(struct blame_entry *e,
1733 			       struct blame_origin *parent,
1734 			       struct blame_entry **diffp,
1735 			       struct blame_entry **ignoredp,
1736 			       struct blame_line_tracker *line_blames)
1737 {
1738 	int entry_len, nr_lines, i;
1739 
1740 	/*
1741 	 * We carve new entries off the front of e.  Each entry comes from a
1742 	 * contiguous chunk of lines: adjacent lines from the same origin
1743 	 * (either the parent or the target).
1744 	 */
1745 	entry_len = 1;
1746 	nr_lines = e->num_lines;	/* e changes in the loop */
1747 	for (i = 0; i < nr_lines; i++) {
1748 		struct blame_entry *next = NULL;
1749 
1750 		/*
1751 		 * We are often adjacent to the next line - only split the blame
1752 		 * entry when we have to.
1753 		 */
1754 		if (i + 1 < nr_lines) {
1755 			if (are_lines_adjacent(&line_blames[i],
1756 					       &line_blames[i + 1])) {
1757 				entry_len++;
1758 				continue;
1759 			}
1760 			next = split_blame_at(e, entry_len,
1761 					      blame_origin_incref(e->suspect));
1762 		}
1763 		if (line_blames[i].is_parent) {
1764 			e->ignored = 1;
1765 			blame_origin_decref(e->suspect);
1766 			e->suspect = blame_origin_incref(parent);
1767 			e->s_lno = line_blames[i - entry_len + 1].s_lno;
1768 			e->next = *ignoredp;
1769 			*ignoredp = e;
1770 		} else {
1771 			e->unblamable = 1;
1772 			/* e->s_lno is already in the target's address space. */
1773 			e->next = *diffp;
1774 			*diffp = e;
1775 		}
1776 		assert(e->num_lines == entry_len);
1777 		e = next;
1778 		entry_len = 1;
1779 	}
1780 	assert(!e);
1781 }
1782 
1783 /*
1784  * Process one hunk from the patch between the current suspect for
1785  * blame_entry e and its parent.  This first blames any unfinished
1786  * entries before the chunk (which is where target and parent start
1787  * differing) on the parent, and then splits blame entries at the
1788  * start and at the end of the difference region.  Since use of -M and
1789  * -C options may lead to overlapping/duplicate source line number
1790  * ranges, all we can rely on from sorting/merging is the order of the
1791  * first suspect line number.
1792  *
1793  * tlno: line number in the target where this chunk begins
1794  * same: line number in the target where this chunk ends
1795  * offset: add to tlno to get the chunk starting point in the parent
1796  * parent_len: number of lines in the parent chunk
1797  */
blame_chunk(struct blame_entry *** dstq,struct blame_entry *** srcq,int tlno,int offset,int same,int parent_len,struct blame_origin * parent,struct blame_origin * target,int ignore_diffs)1798 static void blame_chunk(struct blame_entry ***dstq, struct blame_entry ***srcq,
1799 			int tlno, int offset, int same, int parent_len,
1800 			struct blame_origin *parent,
1801 			struct blame_origin *target, int ignore_diffs)
1802 {
1803 	struct blame_entry *e = **srcq;
1804 	struct blame_entry *samep = NULL, *diffp = NULL, *ignoredp = NULL;
1805 	struct blame_line_tracker *line_blames = NULL;
1806 
1807 	while (e && e->s_lno < tlno) {
1808 		struct blame_entry *next = e->next;
1809 		/*
1810 		 * current record starts before differing portion.  If
1811 		 * it reaches into it, we need to split it up and
1812 		 * examine the second part separately.
1813 		 */
1814 		if (e->s_lno + e->num_lines > tlno) {
1815 			/* Move second half to a new record */
1816 			struct blame_entry *n;
1817 
1818 			n = split_blame_at(e, tlno - e->s_lno, e->suspect);
1819 			/* Push new record to diffp */
1820 			n->next = diffp;
1821 			diffp = n;
1822 		} else
1823 			blame_origin_decref(e->suspect);
1824 		/* Pass blame for everything before the differing
1825 		 * chunk to the parent */
1826 		e->suspect = blame_origin_incref(parent);
1827 		e->s_lno += offset;
1828 		e->next = samep;
1829 		samep = e;
1830 		e = next;
1831 	}
1832 	/*
1833 	 * As we don't know how much of a common stretch after this
1834 	 * diff will occur, the currently blamed parts are all that we
1835 	 * can assign to the parent for now.
1836 	 */
1837 
1838 	if (samep) {
1839 		**dstq = reverse_blame(samep, **dstq);
1840 		*dstq = &samep->next;
1841 	}
1842 	/*
1843 	 * Prepend the split off portions: everything after e starts
1844 	 * after the blameable portion.
1845 	 */
1846 	e = reverse_blame(diffp, e);
1847 
1848 	/*
1849 	 * Now retain records on the target while parts are different
1850 	 * from the parent.
1851 	 */
1852 	samep = NULL;
1853 	diffp = NULL;
1854 
1855 	if (ignore_diffs && same - tlno > 0) {
1856 		CALLOC_ARRAY(line_blames, same - tlno);
1857 		guess_line_blames(parent, target, tlno, offset, same,
1858 				  parent_len, line_blames);
1859 	}
1860 
1861 	while (e && e->s_lno < same) {
1862 		struct blame_entry *next = e->next;
1863 
1864 		/*
1865 		 * If current record extends into sameness, need to split.
1866 		 */
1867 		if (e->s_lno + e->num_lines > same) {
1868 			/*
1869 			 * Move second half to a new record to be
1870 			 * processed by later chunks
1871 			 */
1872 			struct blame_entry *n;
1873 
1874 			n = split_blame_at(e, same - e->s_lno,
1875 					   blame_origin_incref(e->suspect));
1876 			/* Push new record to samep */
1877 			n->next = samep;
1878 			samep = n;
1879 		}
1880 		if (ignore_diffs) {
1881 			ignore_blame_entry(e, parent, &diffp, &ignoredp,
1882 					   line_blames + e->s_lno - tlno);
1883 		} else {
1884 			e->next = diffp;
1885 			diffp = e;
1886 		}
1887 		e = next;
1888 	}
1889 	free(line_blames);
1890 	if (ignoredp) {
1891 		/*
1892 		 * Note ignoredp is not sorted yet, and thus neither is dstq.
1893 		 * That list must be sorted before we queue_blames().  We defer
1894 		 * sorting until after all diff hunks are processed, so that
1895 		 * guess_line_blames() can pick *any* line in the parent.  The
1896 		 * slight drawback is that we end up sorting all blame entries
1897 		 * passed to the parent, including those that are unrelated to
1898 		 * changes made by the ignored commit.
1899 		 */
1900 		**dstq = reverse_blame(ignoredp, **dstq);
1901 		*dstq = &ignoredp->next;
1902 	}
1903 	**srcq = reverse_blame(diffp, reverse_blame(samep, e));
1904 	/* Move across elements that are in the unblamable portion */
1905 	if (diffp)
1906 		*srcq = &diffp->next;
1907 }
1908 
1909 struct blame_chunk_cb_data {
1910 	struct blame_origin *parent;
1911 	struct blame_origin *target;
1912 	long offset;
1913 	int ignore_diffs;
1914 	struct blame_entry **dstq;
1915 	struct blame_entry **srcq;
1916 };
1917 
1918 /* diff chunks are from parent to target */
blame_chunk_cb(long start_a,long count_a,long start_b,long count_b,void * data)1919 static int blame_chunk_cb(long start_a, long count_a,
1920 			  long start_b, long count_b, void *data)
1921 {
1922 	struct blame_chunk_cb_data *d = data;
1923 	if (start_a - start_b != d->offset)
1924 		die("internal error in blame::blame_chunk_cb");
1925 	blame_chunk(&d->dstq, &d->srcq, start_b, start_a - start_b,
1926 		    start_b + count_b, count_a, d->parent, d->target,
1927 		    d->ignore_diffs);
1928 	d->offset = start_a + count_a - (start_b + count_b);
1929 	return 0;
1930 }
1931 
1932 /*
1933  * We are looking at the origin 'target' and aiming to pass blame
1934  * for the lines it is suspected to its parent.  Run diff to find
1935  * which lines came from parent and pass blame for them.
1936  */
pass_blame_to_parent(struct blame_scoreboard * sb,struct blame_origin * target,struct blame_origin * parent,int ignore_diffs)1937 static void pass_blame_to_parent(struct blame_scoreboard *sb,
1938 				 struct blame_origin *target,
1939 				 struct blame_origin *parent, int ignore_diffs)
1940 {
1941 	mmfile_t file_p, file_o;
1942 	struct blame_chunk_cb_data d;
1943 	struct blame_entry *newdest = NULL;
1944 
1945 	if (!target->suspects)
1946 		return; /* nothing remains for this target */
1947 
1948 	d.parent = parent;
1949 	d.target = target;
1950 	d.offset = 0;
1951 	d.ignore_diffs = ignore_diffs;
1952 	d.dstq = &newdest; d.srcq = &target->suspects;
1953 
1954 	fill_origin_blob(&sb->revs->diffopt, parent, &file_p,
1955 			 &sb->num_read_blob, ignore_diffs);
1956 	fill_origin_blob(&sb->revs->diffopt, target, &file_o,
1957 			 &sb->num_read_blob, ignore_diffs);
1958 	sb->num_get_patch++;
1959 
1960 	if (diff_hunks(&file_p, &file_o, blame_chunk_cb, &d, sb->xdl_opts))
1961 		die("unable to generate diff (%s -> %s)",
1962 		    oid_to_hex(&parent->commit->object.oid),
1963 		    oid_to_hex(&target->commit->object.oid));
1964 	/* The rest are the same as the parent */
1965 	blame_chunk(&d.dstq, &d.srcq, INT_MAX, d.offset, INT_MAX, 0,
1966 		    parent, target, 0);
1967 	*d.dstq = NULL;
1968 	if (ignore_diffs)
1969 		newdest = llist_mergesort(newdest, get_next_blame,
1970 					  set_next_blame,
1971 					  compare_blame_suspect);
1972 	queue_blames(sb, parent, newdest);
1973 
1974 	return;
1975 }
1976 
1977 /*
1978  * The lines in blame_entry after splitting blames many times can become
1979  * very small and trivial, and at some point it becomes pointless to
1980  * blame the parents.  E.g. "\t\t}\n\t}\n\n" appears everywhere in any
1981  * ordinary C program, and it is not worth to say it was copied from
1982  * totally unrelated file in the parent.
1983  *
1984  * Compute how trivial the lines in the blame_entry are.
1985  */
blame_entry_score(struct blame_scoreboard * sb,struct blame_entry * e)1986 unsigned blame_entry_score(struct blame_scoreboard *sb, struct blame_entry *e)
1987 {
1988 	unsigned score;
1989 	const char *cp, *ep;
1990 
1991 	if (e->score)
1992 		return e->score;
1993 
1994 	score = 1;
1995 	cp = blame_nth_line(sb, e->lno);
1996 	ep = blame_nth_line(sb, e->lno + e->num_lines);
1997 	while (cp < ep) {
1998 		unsigned ch = *((unsigned char *)cp);
1999 		if (isalnum(ch))
2000 			score++;
2001 		cp++;
2002 	}
2003 	e->score = score;
2004 	return score;
2005 }
2006 
2007 /*
2008  * best_so_far[] and potential[] are both a split of an existing blame_entry
2009  * that passes blame to the parent.  Maintain best_so_far the best split so
2010  * far, by comparing potential and best_so_far and copying potential into
2011  * bst_so_far as needed.
2012  */
copy_split_if_better(struct blame_scoreboard * sb,struct blame_entry * best_so_far,struct blame_entry * potential)2013 static void copy_split_if_better(struct blame_scoreboard *sb,
2014 				 struct blame_entry *best_so_far,
2015 				 struct blame_entry *potential)
2016 {
2017 	int i;
2018 
2019 	if (!potential[1].suspect)
2020 		return;
2021 	if (best_so_far[1].suspect) {
2022 		if (blame_entry_score(sb, &potential[1]) <
2023 		    blame_entry_score(sb, &best_so_far[1]))
2024 			return;
2025 	}
2026 
2027 	for (i = 0; i < 3; i++)
2028 		blame_origin_incref(potential[i].suspect);
2029 	decref_split(best_so_far);
2030 	memcpy(best_so_far, potential, sizeof(struct blame_entry[3]));
2031 }
2032 
2033 /*
2034  * We are looking at a part of the final image represented by
2035  * ent (tlno and same are offset by ent->s_lno).
2036  * tlno is where we are looking at in the final image.
2037  * up to (but not including) same match preimage.
2038  * plno is where we are looking at in the preimage.
2039  *
2040  * <-------------- final image ---------------------->
2041  *       <------ent------>
2042  *         ^tlno ^same
2043  *    <---------preimage----->
2044  *         ^plno
2045  *
2046  * All line numbers are 0-based.
2047  */
handle_split(struct blame_scoreboard * sb,struct blame_entry * ent,int tlno,int plno,int same,struct blame_origin * parent,struct blame_entry * split)2048 static void handle_split(struct blame_scoreboard *sb,
2049 			 struct blame_entry *ent,
2050 			 int tlno, int plno, int same,
2051 			 struct blame_origin *parent,
2052 			 struct blame_entry *split)
2053 {
2054 	if (ent->num_lines <= tlno)
2055 		return;
2056 	if (tlno < same) {
2057 		struct blame_entry potential[3];
2058 		tlno += ent->s_lno;
2059 		same += ent->s_lno;
2060 		split_overlap(potential, ent, tlno, plno, same, parent);
2061 		copy_split_if_better(sb, split, potential);
2062 		decref_split(potential);
2063 	}
2064 }
2065 
2066 struct handle_split_cb_data {
2067 	struct blame_scoreboard *sb;
2068 	struct blame_entry *ent;
2069 	struct blame_origin *parent;
2070 	struct blame_entry *split;
2071 	long plno;
2072 	long tlno;
2073 };
2074 
handle_split_cb(long start_a,long count_a,long start_b,long count_b,void * data)2075 static int handle_split_cb(long start_a, long count_a,
2076 			   long start_b, long count_b, void *data)
2077 {
2078 	struct handle_split_cb_data *d = data;
2079 	handle_split(d->sb, d->ent, d->tlno, d->plno, start_b, d->parent,
2080 		     d->split);
2081 	d->plno = start_a + count_a;
2082 	d->tlno = start_b + count_b;
2083 	return 0;
2084 }
2085 
2086 /*
2087  * Find the lines from parent that are the same as ent so that
2088  * we can pass blames to it.  file_p has the blob contents for
2089  * the parent.
2090  */
find_copy_in_blob(struct blame_scoreboard * sb,struct blame_entry * ent,struct blame_origin * parent,struct blame_entry * split,mmfile_t * file_p)2091 static void find_copy_in_blob(struct blame_scoreboard *sb,
2092 			      struct blame_entry *ent,
2093 			      struct blame_origin *parent,
2094 			      struct blame_entry *split,
2095 			      mmfile_t *file_p)
2096 {
2097 	const char *cp;
2098 	mmfile_t file_o;
2099 	struct handle_split_cb_data d;
2100 
2101 	memset(&d, 0, sizeof(d));
2102 	d.sb = sb; d.ent = ent; d.parent = parent; d.split = split;
2103 	/*
2104 	 * Prepare mmfile that contains only the lines in ent.
2105 	 */
2106 	cp = blame_nth_line(sb, ent->lno);
2107 	file_o.ptr = (char *) cp;
2108 	file_o.size = blame_nth_line(sb, ent->lno + ent->num_lines) - cp;
2109 
2110 	/*
2111 	 * file_o is a part of final image we are annotating.
2112 	 * file_p partially may match that image.
2113 	 */
2114 	memset(split, 0, sizeof(struct blame_entry [3]));
2115 	if (diff_hunks(file_p, &file_o, handle_split_cb, &d, sb->xdl_opts))
2116 		die("unable to generate diff (%s)",
2117 		    oid_to_hex(&parent->commit->object.oid));
2118 	/* remainder, if any, all match the preimage */
2119 	handle_split(sb, ent, d.tlno, d.plno, ent->num_lines, parent, split);
2120 }
2121 
2122 /* Move all blame entries from list *source that have a score smaller
2123  * than score_min to the front of list *small.
2124  * Returns a pointer to the link pointing to the old head of the small list.
2125  */
2126 
filter_small(struct blame_scoreboard * sb,struct blame_entry ** small,struct blame_entry ** source,unsigned score_min)2127 static struct blame_entry **filter_small(struct blame_scoreboard *sb,
2128 					 struct blame_entry **small,
2129 					 struct blame_entry **source,
2130 					 unsigned score_min)
2131 {
2132 	struct blame_entry *p = *source;
2133 	struct blame_entry *oldsmall = *small;
2134 	while (p) {
2135 		if (blame_entry_score(sb, p) <= score_min) {
2136 			*small = p;
2137 			small = &p->next;
2138 			p = *small;
2139 		} else {
2140 			*source = p;
2141 			source = &p->next;
2142 			p = *source;
2143 		}
2144 	}
2145 	*small = oldsmall;
2146 	*source = NULL;
2147 	return small;
2148 }
2149 
2150 /*
2151  * See if lines currently target is suspected for can be attributed to
2152  * parent.
2153  */
find_move_in_parent(struct blame_scoreboard * sb,struct blame_entry *** blamed,struct blame_entry ** toosmall,struct blame_origin * target,struct blame_origin * parent)2154 static void find_move_in_parent(struct blame_scoreboard *sb,
2155 				struct blame_entry ***blamed,
2156 				struct blame_entry **toosmall,
2157 				struct blame_origin *target,
2158 				struct blame_origin *parent)
2159 {
2160 	struct blame_entry *e, split[3];
2161 	struct blame_entry *unblamed = target->suspects;
2162 	struct blame_entry *leftover = NULL;
2163 	mmfile_t file_p;
2164 
2165 	if (!unblamed)
2166 		return; /* nothing remains for this target */
2167 
2168 	fill_origin_blob(&sb->revs->diffopt, parent, &file_p,
2169 			 &sb->num_read_blob, 0);
2170 	if (!file_p.ptr)
2171 		return;
2172 
2173 	/* At each iteration, unblamed has a NULL-terminated list of
2174 	 * entries that have not yet been tested for blame.  leftover
2175 	 * contains the reversed list of entries that have been tested
2176 	 * without being assignable to the parent.
2177 	 */
2178 	do {
2179 		struct blame_entry **unblamedtail = &unblamed;
2180 		struct blame_entry *next;
2181 		for (e = unblamed; e; e = next) {
2182 			next = e->next;
2183 			find_copy_in_blob(sb, e, parent, split, &file_p);
2184 			if (split[1].suspect &&
2185 			    sb->move_score < blame_entry_score(sb, &split[1])) {
2186 				split_blame(blamed, &unblamedtail, split, e);
2187 			} else {
2188 				e->next = leftover;
2189 				leftover = e;
2190 			}
2191 			decref_split(split);
2192 		}
2193 		*unblamedtail = NULL;
2194 		toosmall = filter_small(sb, toosmall, &unblamed, sb->move_score);
2195 	} while (unblamed);
2196 	target->suspects = reverse_blame(leftover, NULL);
2197 }
2198 
2199 struct blame_list {
2200 	struct blame_entry *ent;
2201 	struct blame_entry split[3];
2202 };
2203 
2204 /*
2205  * Count the number of entries the target is suspected for,
2206  * and prepare a list of entry and the best split.
2207  */
setup_blame_list(struct blame_entry * unblamed,int * num_ents_p)2208 static struct blame_list *setup_blame_list(struct blame_entry *unblamed,
2209 					   int *num_ents_p)
2210 {
2211 	struct blame_entry *e;
2212 	int num_ents, i;
2213 	struct blame_list *blame_list = NULL;
2214 
2215 	for (e = unblamed, num_ents = 0; e; e = e->next)
2216 		num_ents++;
2217 	if (num_ents) {
2218 		CALLOC_ARRAY(blame_list, num_ents);
2219 		for (e = unblamed, i = 0; e; e = e->next)
2220 			blame_list[i++].ent = e;
2221 	}
2222 	*num_ents_p = num_ents;
2223 	return blame_list;
2224 }
2225 
2226 /*
2227  * For lines target is suspected for, see if we can find code movement
2228  * across file boundary from the parent commit.  porigin is the path
2229  * in the parent we already tried.
2230  */
find_copy_in_parent(struct blame_scoreboard * sb,struct blame_entry *** blamed,struct blame_entry ** toosmall,struct blame_origin * target,struct commit * parent,struct blame_origin * porigin,int opt)2231 static void find_copy_in_parent(struct blame_scoreboard *sb,
2232 				struct blame_entry ***blamed,
2233 				struct blame_entry **toosmall,
2234 				struct blame_origin *target,
2235 				struct commit *parent,
2236 				struct blame_origin *porigin,
2237 				int opt)
2238 {
2239 	struct diff_options diff_opts;
2240 	int i, j;
2241 	struct blame_list *blame_list;
2242 	int num_ents;
2243 	struct blame_entry *unblamed = target->suspects;
2244 	struct blame_entry *leftover = NULL;
2245 
2246 	if (!unblamed)
2247 		return; /* nothing remains for this target */
2248 
2249 	repo_diff_setup(sb->repo, &diff_opts);
2250 	diff_opts.flags.recursive = 1;
2251 	diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;
2252 
2253 	diff_setup_done(&diff_opts);
2254 
2255 	/* Try "find copies harder" on new path if requested;
2256 	 * we do not want to use diffcore_rename() actually to
2257 	 * match things up; find_copies_harder is set only to
2258 	 * force diff_tree_oid() to feed all filepairs to diff_queue,
2259 	 * and this code needs to be after diff_setup_done(), which
2260 	 * usually makes find-copies-harder imply copy detection.
2261 	 */
2262 	if ((opt & PICKAXE_BLAME_COPY_HARDEST)
2263 	    || ((opt & PICKAXE_BLAME_COPY_HARDER)
2264 		&& (!porigin || strcmp(target->path, porigin->path))))
2265 		diff_opts.flags.find_copies_harder = 1;
2266 
2267 	if (is_null_oid(&target->commit->object.oid))
2268 		do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
2269 	else
2270 		diff_tree_oid(get_commit_tree_oid(parent),
2271 			      get_commit_tree_oid(target->commit),
2272 			      "", &diff_opts);
2273 
2274 	if (!diff_opts.flags.find_copies_harder)
2275 		diffcore_std(&diff_opts);
2276 
2277 	do {
2278 		struct blame_entry **unblamedtail = &unblamed;
2279 		blame_list = setup_blame_list(unblamed, &num_ents);
2280 
2281 		for (i = 0; i < diff_queued_diff.nr; i++) {
2282 			struct diff_filepair *p = diff_queued_diff.queue[i];
2283 			struct blame_origin *norigin;
2284 			mmfile_t file_p;
2285 			struct blame_entry potential[3];
2286 
2287 			if (!DIFF_FILE_VALID(p->one))
2288 				continue; /* does not exist in parent */
2289 			if (S_ISGITLINK(p->one->mode))
2290 				continue; /* ignore git links */
2291 			if (porigin && !strcmp(p->one->path, porigin->path))
2292 				/* find_move already dealt with this path */
2293 				continue;
2294 
2295 			norigin = get_origin(parent, p->one->path);
2296 			oidcpy(&norigin->blob_oid, &p->one->oid);
2297 			norigin->mode = p->one->mode;
2298 			fill_origin_blob(&sb->revs->diffopt, norigin, &file_p,
2299 					 &sb->num_read_blob, 0);
2300 			if (!file_p.ptr)
2301 				continue;
2302 
2303 			for (j = 0; j < num_ents; j++) {
2304 				find_copy_in_blob(sb, blame_list[j].ent,
2305 						  norigin, potential, &file_p);
2306 				copy_split_if_better(sb, blame_list[j].split,
2307 						     potential);
2308 				decref_split(potential);
2309 			}
2310 			blame_origin_decref(norigin);
2311 		}
2312 
2313 		for (j = 0; j < num_ents; j++) {
2314 			struct blame_entry *split = blame_list[j].split;
2315 			if (split[1].suspect &&
2316 			    sb->copy_score < blame_entry_score(sb, &split[1])) {
2317 				split_blame(blamed, &unblamedtail, split,
2318 					    blame_list[j].ent);
2319 			} else {
2320 				blame_list[j].ent->next = leftover;
2321 				leftover = blame_list[j].ent;
2322 			}
2323 			decref_split(split);
2324 		}
2325 		free(blame_list);
2326 		*unblamedtail = NULL;
2327 		toosmall = filter_small(sb, toosmall, &unblamed, sb->copy_score);
2328 	} while (unblamed);
2329 	target->suspects = reverse_blame(leftover, NULL);
2330 	diff_flush(&diff_opts);
2331 	clear_pathspec(&diff_opts.pathspec);
2332 }
2333 
2334 /*
2335  * The blobs of origin and porigin exactly match, so everything
2336  * origin is suspected for can be blamed on the parent.
2337  */
pass_whole_blame(struct blame_scoreboard * sb,struct blame_origin * origin,struct blame_origin * porigin)2338 static void pass_whole_blame(struct blame_scoreboard *sb,
2339 			     struct blame_origin *origin, struct blame_origin *porigin)
2340 {
2341 	struct blame_entry *e, *suspects;
2342 
2343 	if (!porigin->file.ptr && origin->file.ptr) {
2344 		/* Steal its file */
2345 		porigin->file = origin->file;
2346 		origin->file.ptr = NULL;
2347 	}
2348 	suspects = origin->suspects;
2349 	origin->suspects = NULL;
2350 	for (e = suspects; e; e = e->next) {
2351 		blame_origin_incref(porigin);
2352 		blame_origin_decref(e->suspect);
2353 		e->suspect = porigin;
2354 	}
2355 	queue_blames(sb, porigin, suspects);
2356 }
2357 
2358 /*
2359  * We pass blame from the current commit to its parents.  We keep saying
2360  * "parent" (and "porigin"), but what we mean is to find scapegoat to
2361  * exonerate ourselves.
2362  */
first_scapegoat(struct rev_info * revs,struct commit * commit,int reverse)2363 static struct commit_list *first_scapegoat(struct rev_info *revs, struct commit *commit,
2364 					int reverse)
2365 {
2366 	if (!reverse) {
2367 		if (revs->first_parent_only &&
2368 		    commit->parents &&
2369 		    commit->parents->next) {
2370 			free_commit_list(commit->parents->next);
2371 			commit->parents->next = NULL;
2372 		}
2373 		return commit->parents;
2374 	}
2375 	return lookup_decoration(&revs->children, &commit->object);
2376 }
2377 
num_scapegoats(struct rev_info * revs,struct commit * commit,int reverse)2378 static int num_scapegoats(struct rev_info *revs, struct commit *commit, int reverse)
2379 {
2380 	struct commit_list *l = first_scapegoat(revs, commit, reverse);
2381 	return commit_list_count(l);
2382 }
2383 
2384 /* Distribute collected unsorted blames to the respected sorted lists
2385  * in the various origins.
2386  */
distribute_blame(struct blame_scoreboard * sb,struct blame_entry * blamed)2387 static void distribute_blame(struct blame_scoreboard *sb, struct blame_entry *blamed)
2388 {
2389 	blamed = llist_mergesort(blamed, get_next_blame, set_next_blame,
2390 				 compare_blame_suspect);
2391 	while (blamed)
2392 	{
2393 		struct blame_origin *porigin = blamed->suspect;
2394 		struct blame_entry *suspects = NULL;
2395 		do {
2396 			struct blame_entry *next = blamed->next;
2397 			blamed->next = suspects;
2398 			suspects = blamed;
2399 			blamed = next;
2400 		} while (blamed && blamed->suspect == porigin);
2401 		suspects = reverse_blame(suspects, NULL);
2402 		queue_blames(sb, porigin, suspects);
2403 	}
2404 }
2405 
2406 #define MAXSG 16
2407 
2408 typedef struct blame_origin *(*blame_find_alg)(struct repository *,
2409 					       struct commit *,
2410 					       struct blame_origin *,
2411 					       struct blame_bloom_data *);
2412 
2413 static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin, int opt)
2414 {
2415 	struct rev_info *revs = sb->revs;
2416 	int i, pass, num_sg;
2417 	struct commit *commit = origin->commit;
2418 	struct commit_list *sg;
2419 	struct blame_origin *sg_buf[MAXSG];
2420 	struct blame_origin *porigin, **sg_origin = sg_buf;
2421 	struct blame_entry *toosmall = NULL;
2422 	struct blame_entry *blames, **blametail = &blames;
2423 
2424 	num_sg = num_scapegoats(revs, commit, sb->reverse);
2425 	if (!num_sg)
2426 		goto finish;
2427 	else if (num_sg < ARRAY_SIZE(sg_buf))
2428 		memset(sg_buf, 0, sizeof(sg_buf));
2429 	else
2430 		CALLOC_ARRAY(sg_origin, num_sg);
2431 
2432 	/*
2433 	 * The first pass looks for unrenamed path to optimize for
2434 	 * common cases, then we look for renames in the second pass.
2435 	 */
2436 	for (pass = 0; pass < 2 - sb->no_whole_file_rename; pass++) {
2437 		blame_find_alg find = pass ? find_rename : find_origin;
2438 
2439 		for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse);
2440 		     i < num_sg && sg;
2441 		     sg = sg->next, i++) {
2442 			struct commit *p = sg->item;
2443 			int j, same;
2444 
2445 			if (sg_origin[i])
2446 				continue;
2447 			if (parse_commit(p))
2448 				continue;
2449 			porigin = find(sb->repo, p, origin, sb->bloom_data);
2450 			if (!porigin)
2451 				continue;
2452 			if (oideq(&porigin->blob_oid, &origin->blob_oid)) {
2453 				pass_whole_blame(sb, origin, porigin);
2454 				blame_origin_decref(porigin);
2455 				goto finish;
2456 			}
2457 			for (j = same = 0; j < i; j++)
2458 				if (sg_origin[j] &&
2459 				    oideq(&sg_origin[j]->blob_oid, &porigin->blob_oid)) {
2460 					same = 1;
2461 					break;
2462 				}
2463 			if (!same)
2464 				sg_origin[i] = porigin;
2465 			else
2466 				blame_origin_decref(porigin);
2467 		}
2468 	}
2469 
2470 	sb->num_commits++;
2471 	for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse);
2472 	     i < num_sg && sg;
2473 	     sg = sg->next, i++) {
2474 		struct blame_origin *porigin = sg_origin[i];
2475 		if (!porigin)
2476 			continue;
2477 		if (!origin->previous) {
2478 			blame_origin_incref(porigin);
2479 			origin->previous = porigin;
2480 		}
2481 		pass_blame_to_parent(sb, origin, porigin, 0);
2482 		if (!origin->suspects)
2483 			goto finish;
2484 	}
2485 
2486 	/*
2487 	 * Pass remaining suspects for ignored commits to their parents.
2488 	 */
2489 	if (oidset_contains(&sb->ignore_list, &commit->object.oid)) {
2490 		for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse);
2491 		     i < num_sg && sg;
2492 		     sg = sg->next, i++) {
2493 			struct blame_origin *porigin = sg_origin[i];
2494 
2495 			if (!porigin)
2496 				continue;
2497 			pass_blame_to_parent(sb, origin, porigin, 1);
2498 			/*
2499 			 * Preemptively drop porigin so we can refresh the
2500 			 * fingerprints if we use the parent again, which can
2501 			 * occur if you ignore back-to-back commits.
2502 			 */
2503 			drop_origin_blob(porigin);
2504 			if (!origin->suspects)
2505 				goto finish;
2506 		}
2507 	}
2508 
2509 	/*
2510 	 * Optionally find moves in parents' files.
2511 	 */
2512 	if (opt & PICKAXE_BLAME_MOVE) {
2513 		filter_small(sb, &toosmall, &origin->suspects, sb->move_score);
2514 		if (origin->suspects) {
2515 			for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse);
2516 			     i < num_sg && sg;
2517 			     sg = sg->next, i++) {
2518 				struct blame_origin *porigin = sg_origin[i];
2519 				if (!porigin)
2520 					continue;
2521 				find_move_in_parent(sb, &blametail, &toosmall, origin, porigin);
2522 				if (!origin->suspects)
2523 					break;
2524 			}
2525 		}
2526 	}
2527 
2528 	/*
2529 	 * Optionally find copies from parents' files.
2530 	 */
2531 	if (opt & PICKAXE_BLAME_COPY) {
2532 		if (sb->copy_score > sb->move_score)
2533 			filter_small(sb, &toosmall, &origin->suspects, sb->copy_score);
2534 		else if (sb->copy_score < sb->move_score) {
2535 			origin->suspects = blame_merge(origin->suspects, toosmall);
2536 			toosmall = NULL;
2537 			filter_small(sb, &toosmall, &origin->suspects, sb->copy_score);
2538 		}
2539 		if (!origin->suspects)
2540 			goto finish;
2541 
2542 		for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse);
2543 		     i < num_sg && sg;
2544 		     sg = sg->next, i++) {
2545 			struct blame_origin *porigin = sg_origin[i];
2546 			find_copy_in_parent(sb, &blametail, &toosmall,
2547 					    origin, sg->item, porigin, opt);
2548 			if (!origin->suspects)
2549 				goto finish;
2550 		}
2551 	}
2552 
2553 finish:
2554 	*blametail = NULL;
2555 	distribute_blame(sb, blames);
2556 	/*
2557 	 * prepend toosmall to origin->suspects
2558 	 *
2559 	 * There is no point in sorting: this ends up on a big
2560 	 * unsorted list in the caller anyway.
2561 	 */
2562 	if (toosmall) {
2563 		struct blame_entry **tail = &toosmall;
2564 		while (*tail)
2565 			tail = &(*tail)->next;
2566 		*tail = origin->suspects;
2567 		origin->suspects = toosmall;
2568 	}
2569 	for (i = 0; i < num_sg; i++) {
2570 		if (sg_origin[i]) {
2571 			if (!sg_origin[i]->suspects)
2572 				drop_origin_blob(sg_origin[i]);
2573 			blame_origin_decref(sg_origin[i]);
2574 		}
2575 	}
2576 	drop_origin_blob(origin);
2577 	if (sg_buf != sg_origin)
2578 		free(sg_origin);
2579 }
2580 
2581 /*
2582  * The main loop -- while we have blobs with lines whose true origin
2583  * is still unknown, pick one blob, and allow its lines to pass blames
2584  * to its parents. */
2585 void assign_blame(struct blame_scoreboard *sb, int opt)
2586 {
2587 	struct rev_info *revs = sb->revs;
2588 	struct commit *commit = prio_queue_get(&sb->commits);
2589 
2590 	while (commit) {
2591 		struct blame_entry *ent;
2592 		struct blame_origin *suspect = get_blame_suspects(commit);
2593 
2594 		/* find one suspect to break down */
2595 		while (suspect && !suspect->suspects)
2596 			suspect = suspect->next;
2597 
2598 		if (!suspect) {
2599 			commit = prio_queue_get(&sb->commits);
2600 			continue;
2601 		}
2602 
2603 		assert(commit == suspect->commit);
2604 
2605 		/*
2606 		 * We will use this suspect later in the loop,
2607 		 * so hold onto it in the meantime.
2608 		 */
2609 		blame_origin_incref(suspect);
2610 		parse_commit(commit);
2611 		if (sb->reverse ||
2612 		    (!(commit->object.flags & UNINTERESTING) &&
2613 		     !(revs->max_age != -1 && commit->date < revs->max_age)))
2614 			pass_blame(sb, suspect, opt);
2615 		else {
2616 			commit->object.flags |= UNINTERESTING;
2617 			if (commit->object.parsed)
2618 				mark_parents_uninteresting(commit);
2619 		}
2620 		/* treat root commit as boundary */
2621 		if (!commit->parents && !sb->show_root)
2622 			commit->object.flags |= UNINTERESTING;
2623 
2624 		/* Take responsibility for the remaining entries */
2625 		ent = suspect->suspects;
2626 		if (ent) {
2627 			suspect->guilty = 1;
2628 			for (;;) {
2629 				struct blame_entry *next = ent->next;
2630 				if (sb->found_guilty_entry)
2631 					sb->found_guilty_entry(ent, sb->found_guilty_entry_data);
2632 				if (next) {
2633 					ent = next;
2634 					continue;
2635 				}
2636 				ent->next = sb->ent;
2637 				sb->ent = suspect->suspects;
2638 				suspect->suspects = NULL;
2639 				break;
2640 			}
2641 		}
2642 		blame_origin_decref(suspect);
2643 
2644 		if (sb->debug) /* sanity */
2645 			sanity_check_refcnt(sb);
2646 	}
2647 }
2648 
2649 /*
2650  * To allow quick access to the contents of nth line in the
2651  * final image, prepare an index in the scoreboard.
2652  */
prepare_lines(struct blame_scoreboard * sb)2653 static int prepare_lines(struct blame_scoreboard *sb)
2654 {
2655 	sb->num_lines = find_line_starts(&sb->lineno, sb->final_buf,
2656 					 sb->final_buf_size);
2657 	return sb->num_lines;
2658 }
2659 
find_single_final(struct rev_info * revs,const char ** name_p)2660 static struct commit *find_single_final(struct rev_info *revs,
2661 					const char **name_p)
2662 {
2663 	int i;
2664 	struct commit *found = NULL;
2665 	const char *name = NULL;
2666 
2667 	for (i = 0; i < revs->pending.nr; i++) {
2668 		struct object *obj = revs->pending.objects[i].item;
2669 		if (obj->flags & UNINTERESTING)
2670 			continue;
2671 		obj = deref_tag(revs->repo, obj, NULL, 0);
2672 		if (!obj || obj->type != OBJ_COMMIT)
2673 			die("Non commit %s?", revs->pending.objects[i].name);
2674 		if (found)
2675 			die("More than one commit to dig from %s and %s?",
2676 			    revs->pending.objects[i].name, name);
2677 		found = (struct commit *)obj;
2678 		name = revs->pending.objects[i].name;
2679 	}
2680 	if (name_p)
2681 		*name_p = xstrdup_or_null(name);
2682 	return found;
2683 }
2684 
dwim_reverse_initial(struct rev_info * revs,const char ** name_p)2685 static struct commit *dwim_reverse_initial(struct rev_info *revs,
2686 					   const char **name_p)
2687 {
2688 	/*
2689 	 * DWIM "git blame --reverse ONE -- PATH" as
2690 	 * "git blame --reverse ONE..HEAD -- PATH" but only do so
2691 	 * when it makes sense.
2692 	 */
2693 	struct object *obj;
2694 	struct commit *head_commit;
2695 	struct object_id head_oid;
2696 
2697 	if (revs->pending.nr != 1)
2698 		return NULL;
2699 
2700 	/* Is that sole rev a committish? */
2701 	obj = revs->pending.objects[0].item;
2702 	obj = deref_tag(revs->repo, obj, NULL, 0);
2703 	if (!obj || obj->type != OBJ_COMMIT)
2704 		return NULL;
2705 
2706 	/* Do we have HEAD? */
2707 	if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))
2708 		return NULL;
2709 	head_commit = lookup_commit_reference_gently(revs->repo,
2710 						     &head_oid, 1);
2711 	if (!head_commit)
2712 		return NULL;
2713 
2714 	/* Turn "ONE" into "ONE..HEAD" then */
2715 	obj->flags |= UNINTERESTING;
2716 	add_pending_object(revs, &head_commit->object, "HEAD");
2717 
2718 	if (name_p)
2719 		*name_p = revs->pending.objects[0].name;
2720 	return (struct commit *)obj;
2721 }
2722 
find_single_initial(struct rev_info * revs,const char ** name_p)2723 static struct commit *find_single_initial(struct rev_info *revs,
2724 					  const char **name_p)
2725 {
2726 	int i;
2727 	struct commit *found = NULL;
2728 	const char *name = NULL;
2729 
2730 	/*
2731 	 * There must be one and only one negative commit, and it must be
2732 	 * the boundary.
2733 	 */
2734 	for (i = 0; i < revs->pending.nr; i++) {
2735 		struct object *obj = revs->pending.objects[i].item;
2736 		if (!(obj->flags & UNINTERESTING))
2737 			continue;
2738 		obj = deref_tag(revs->repo, obj, NULL, 0);
2739 		if (!obj || obj->type != OBJ_COMMIT)
2740 			die("Non commit %s?", revs->pending.objects[i].name);
2741 		if (found)
2742 			die("More than one commit to dig up from, %s and %s?",
2743 			    revs->pending.objects[i].name, name);
2744 		found = (struct commit *) obj;
2745 		name = revs->pending.objects[i].name;
2746 	}
2747 
2748 	if (!name)
2749 		found = dwim_reverse_initial(revs, &name);
2750 	if (!name)
2751 		die("No commit to dig up from?");
2752 
2753 	if (name_p)
2754 		*name_p = xstrdup(name);
2755 	return found;
2756 }
2757 
init_scoreboard(struct blame_scoreboard * sb)2758 void init_scoreboard(struct blame_scoreboard *sb)
2759 {
2760 	memset(sb, 0, sizeof(struct blame_scoreboard));
2761 	sb->move_score = BLAME_DEFAULT_MOVE_SCORE;
2762 	sb->copy_score = BLAME_DEFAULT_COPY_SCORE;
2763 }
2764 
setup_scoreboard(struct blame_scoreboard * sb,struct blame_origin ** orig)2765 void setup_scoreboard(struct blame_scoreboard *sb,
2766 		      struct blame_origin **orig)
2767 {
2768 	const char *final_commit_name = NULL;
2769 	struct blame_origin *o;
2770 	struct commit *final_commit = NULL;
2771 	enum object_type type;
2772 
2773 	init_blame_suspects(&blame_suspects);
2774 
2775 	if (sb->reverse && sb->contents_from)
2776 		die(_("--contents and --reverse do not blend well."));
2777 
2778 	if (!sb->repo)
2779 		BUG("repo is NULL");
2780 
2781 	if (!sb->reverse) {
2782 		sb->final = find_single_final(sb->revs, &final_commit_name);
2783 		sb->commits.compare = compare_commits_by_commit_date;
2784 	} else {
2785 		sb->final = find_single_initial(sb->revs, &final_commit_name);
2786 		sb->commits.compare = compare_commits_by_reverse_commit_date;
2787 	}
2788 
2789 	if (sb->final && sb->contents_from)
2790 		die(_("cannot use --contents with final commit object name"));
2791 
2792 	if (sb->reverse && sb->revs->first_parent_only)
2793 		sb->revs->children.name = NULL;
2794 
2795 	if (!sb->final) {
2796 		/*
2797 		 * "--not A B -- path" without anything positive;
2798 		 * do not default to HEAD, but use the working tree
2799 		 * or "--contents".
2800 		 */
2801 		setup_work_tree();
2802 		sb->final = fake_working_tree_commit(sb->repo,
2803 						     &sb->revs->diffopt,
2804 						     sb->path, sb->contents_from);
2805 		add_pending_object(sb->revs, &(sb->final->object), ":");
2806 	}
2807 
2808 	if (sb->reverse && sb->revs->first_parent_only) {
2809 		final_commit = find_single_final(sb->revs, NULL);
2810 		if (!final_commit)
2811 			die(_("--reverse and --first-parent together require specified latest commit"));
2812 	}
2813 
2814 	/*
2815 	 * If we have bottom, this will mark the ancestors of the
2816 	 * bottom commits we would reach while traversing as
2817 	 * uninteresting.
2818 	 */
2819 	if (prepare_revision_walk(sb->revs))
2820 		die(_("revision walk setup failed"));
2821 
2822 	if (sb->reverse && sb->revs->first_parent_only) {
2823 		struct commit *c = final_commit;
2824 
2825 		sb->revs->children.name = "children";
2826 		while (c->parents &&
2827 		       !oideq(&c->object.oid, &sb->final->object.oid)) {
2828 			struct commit_list *l = xcalloc(1, sizeof(*l));
2829 
2830 			l->item = c;
2831 			if (add_decoration(&sb->revs->children,
2832 					   &c->parents->item->object, l))
2833 				BUG("not unique item in first-parent chain");
2834 			c = c->parents->item;
2835 		}
2836 
2837 		if (!oideq(&c->object.oid, &sb->final->object.oid))
2838 			die(_("--reverse --first-parent together require range along first-parent chain"));
2839 	}
2840 
2841 	if (is_null_oid(&sb->final->object.oid)) {
2842 		o = get_blame_suspects(sb->final);
2843 		sb->final_buf = xmemdupz(o->file.ptr, o->file.size);
2844 		sb->final_buf_size = o->file.size;
2845 	}
2846 	else {
2847 		o = get_origin(sb->final, sb->path);
2848 		if (fill_blob_sha1_and_mode(sb->repo, o))
2849 			die(_("no such path %s in %s"), sb->path, final_commit_name);
2850 
2851 		if (sb->revs->diffopt.flags.allow_textconv &&
2852 		    textconv_object(sb->repo, sb->path, o->mode, &o->blob_oid, 1, (char **) &sb->final_buf,
2853 				    &sb->final_buf_size))
2854 			;
2855 		else
2856 			sb->final_buf = read_object_file(&o->blob_oid, &type,
2857 							 &sb->final_buf_size);
2858 
2859 		if (!sb->final_buf)
2860 			die(_("cannot read blob %s for path %s"),
2861 			    oid_to_hex(&o->blob_oid),
2862 			    sb->path);
2863 	}
2864 	sb->num_read_blob++;
2865 	prepare_lines(sb);
2866 
2867 	if (orig)
2868 		*orig = o;
2869 
2870 	free((char *)final_commit_name);
2871 }
2872 
2873 
2874 
blame_entry_prepend(struct blame_entry * head,long start,long end,struct blame_origin * o)2875 struct blame_entry *blame_entry_prepend(struct blame_entry *head,
2876 					long start, long end,
2877 					struct blame_origin *o)
2878 {
2879 	struct blame_entry *new_head = xcalloc(1, sizeof(struct blame_entry));
2880 	new_head->lno = start;
2881 	new_head->num_lines = end - start;
2882 	new_head->suspect = o;
2883 	new_head->s_lno = start;
2884 	new_head->next = head;
2885 	blame_origin_incref(o);
2886 	return new_head;
2887 }
2888 
setup_blame_bloom_data(struct blame_scoreboard * sb)2889 void setup_blame_bloom_data(struct blame_scoreboard *sb)
2890 {
2891 	struct blame_bloom_data *bd;
2892 	struct bloom_filter_settings *bs;
2893 
2894 	if (!sb->repo->objects->commit_graph)
2895 		return;
2896 
2897 	bs = get_bloom_filter_settings(sb->repo);
2898 	if (!bs)
2899 		return;
2900 
2901 	bd = xmalloc(sizeof(struct blame_bloom_data));
2902 
2903 	bd->settings = bs;
2904 
2905 	bd->alloc = 4;
2906 	bd->nr = 0;
2907 	ALLOC_ARRAY(bd->keys, bd->alloc);
2908 
2909 	add_bloom_key(bd, sb->path);
2910 
2911 	sb->bloom_data = bd;
2912 }
2913 
cleanup_scoreboard(struct blame_scoreboard * sb)2914 void cleanup_scoreboard(struct blame_scoreboard *sb)
2915 {
2916 	if (sb->bloom_data) {
2917 		int i;
2918 		for (i = 0; i < sb->bloom_data->nr; i++) {
2919 			free(sb->bloom_data->keys[i]->hashes);
2920 			free(sb->bloom_data->keys[i]);
2921 		}
2922 		free(sb->bloom_data->keys);
2923 		FREE_AND_NULL(sb->bloom_data);
2924 
2925 		trace2_data_intmax("blame", sb->repo,
2926 				   "bloom/queries", bloom_count_queries);
2927 		trace2_data_intmax("blame", sb->repo,
2928 				   "bloom/response-no", bloom_count_no);
2929 	}
2930 }
2931