1 /*
2 * compose_delta.c: Delta window composition.
3 *
4 * ====================================================================
5 * Licensed to the Apache Software Foundation (ASF) under one
6 * or more contributor license agreements. See the NOTICE file
7 * distributed with this work for additional information
8 * regarding copyright ownership. The ASF licenses this file
9 * to you under the Apache License, Version 2.0 (the
10 * "License"); you may not use this file except in compliance
11 * with the License. You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing,
16 * software distributed under the License is distributed on an
17 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
18 * KIND, either express or implied. See the License for the
19 * specific language governing permissions and limitations
20 * under the License.
21 * ====================================================================
22 */
23
24
25 #include <assert.h>
26
27 #include <apr_general.h> /* For APR_INLINE */
28
29 #include "svn_delta.h"
30 #include "svn_pools.h"
31 #include "delta.h"
32
33 /* Define a MIN macro if this platform doesn't already have one. */
34 #ifndef MIN
35 #define MIN(a, b) ((a) < (b) ? (a) : (b))
36 #endif
37
38
39 /* ==================================================================== */
40 /* Support for efficient small-block allocation from pools. */
41
42 /* The following structs will be allocated and freed often: */
43
44 /* A node in the range index tree. */
45 typedef struct range_index_node_t range_index_node_t;
46 struct range_index_node_t
47 {
48 /* 'offset' and 'limit' define the range in the source window. */
49 apr_size_t offset;
50 apr_size_t limit;
51
52 /* 'target_offset' is where that range is represented in the target. */
53 apr_size_t target_offset;
54
55 /* 'left' and 'right' link the node into a splay tree. */
56 range_index_node_t *left, *right;
57
58 /* 'prev' and 'next' link it into an ordered, doubly-linked list. */
59 range_index_node_t *prev, *next;
60 };
61
62 /* A node in a list of ranges for source and target op copies. */
63 enum range_kind
64 {
65 range_from_source,
66 range_from_target
67 };
68
69 typedef struct range_list_node_t range_list_node_t;
70 struct range_list_node_t
71 {
72 /* Where does the range come from?
73 'offset' and 'limit' always refer to the "virtual" source data
74 for the second delta window. For a target range, the actual
75 offset to use for generating the target op is 'target_offset';
76 that field isn't used by source ranges. */
77 enum range_kind kind;
78
79 /* 'offset' and 'limit' define the range. */
80 apr_size_t offset;
81 apr_size_t limit;
82
83 /* 'target_offset' is the start of the range in the target. */
84 apr_size_t target_offset;
85
86 /* 'prev' and 'next' link the node into an ordered, doubly-linked list. */
87 range_list_node_t *prev, *next;
88 };
89
90
91 /* This is what will be allocated: */
92 typedef union alloc_block_t alloc_block_t;
93 union alloc_block_t
94 {
95 range_index_node_t index_node;
96 range_list_node_t list_node;
97
98 /* Links free blocks into a freelist. */
99 alloc_block_t *next_free;
100 };
101
102
103 /* Allocate a block. */
104 static APR_INLINE void *
alloc_block(apr_pool_t * pool,alloc_block_t ** free_list)105 alloc_block(apr_pool_t *pool, alloc_block_t **free_list)
106 {
107 alloc_block_t *block;
108 if (*free_list == NULL)
109 block = apr_palloc(pool, sizeof(*block));
110 else
111 {
112 block = *free_list;
113 *free_list = block->next_free;
114 }
115 return block;
116 }
117
118 /* Return the block back to the free list. */
119 static APR_INLINE void
free_block(void * ptr,alloc_block_t ** free_list)120 free_block(void *ptr, alloc_block_t **free_list)
121 {
122 /* Wrapper functions take care of type safety. */
123 alloc_block_t *const block = ptr;
124 block->next_free = *free_list;
125 *free_list = block;
126 }
127
128
129
130 /* ==================================================================== */
131 /* Mapping offsets in the target streem to txdelta ops. */
132
133 typedef struct offset_index_t
134 {
135 int length;
136 apr_size_t *offs;
137 } offset_index_t;
138
139 /* Create an index mapping target stream offsets to delta ops in
140 WINDOW. Allocate from POOL. */
141
142 static offset_index_t *
create_offset_index(const svn_txdelta_window_t * window,apr_pool_t * pool)143 create_offset_index(const svn_txdelta_window_t *window, apr_pool_t *pool)
144 {
145 offset_index_t *ndx = apr_palloc(pool, sizeof(*ndx));
146 apr_size_t offset = 0;
147 int i;
148
149 ndx->length = window->num_ops;
150 ndx->offs = apr_palloc(pool, (ndx->length + 1) * sizeof(*ndx->offs));
151
152 for (i = 0; i < ndx->length; ++i)
153 {
154 ndx->offs[i] = offset;
155 offset += window->ops[i].length;
156 }
157 ndx->offs[ndx->length] = offset;
158
159 return ndx;
160 }
161
162 /* Find the index of the delta op thet defines that data at OFFSET in
163 NDX. HINT is an arbitrary positin within NDX and doesn't even need
164 to be valid. To effectively speed up the search, use the last result
165 as hint because most lookups come as a sequence of decreasing values
166 for OFFSET and they concentrate on the lower end of the array. */
167
168 static apr_size_t
search_offset_index(const offset_index_t * ndx,apr_size_t offset,apr_size_t hint)169 search_offset_index(const offset_index_t *ndx,
170 apr_size_t offset,
171 apr_size_t hint)
172 {
173 apr_size_t lo, hi, op;
174
175 assert(offset < ndx->offs[ndx->length]);
176
177 lo = 0;
178 hi = ndx->length;
179
180 /* If we got a valid hint, use it to reduce the range to cover.
181 Note that this will only be useful if either the hint is a
182 hit (i.e. equals the desired result) or narrows the range
183 length by a factor larger than 2. */
184
185 if (hint < hi)
186 {
187 if (offset < ndx->offs[hint])
188 hi = hint;
189 else if (offset < ndx->offs[hint+1])
190 return hint;
191 else
192 lo = hint+1;
193 }
194
195 /* ordinary binary search */
196
197 for (op = (lo + hi)/2; lo != hi; op = (lo + hi)/2)
198 {
199 if (offset < ndx->offs[op])
200 hi = op;
201 else
202 lo = ++op;
203 }
204
205 --lo;
206 assert(ndx->offs[lo] <= offset && offset < ndx->offs[lo + 1]);
207 return lo;
208 }
209
210
211
212 /* ==================================================================== */
213 /* Mapping ranges in the source stream to ranges in the composed delta. */
214
215 /* The range index tree. */
216 typedef struct range_index_t
217 {
218 range_index_node_t *tree;
219 alloc_block_t *free_list;
220 apr_pool_t *pool;
221 } range_index_t;
222
223 /* Create a range index tree. Allocate from POOL. */
224 static range_index_t *
create_range_index(apr_pool_t * pool)225 create_range_index(apr_pool_t *pool)
226 {
227 range_index_t *ndx = apr_palloc(pool, sizeof(*ndx));
228 ndx->tree = NULL;
229 ndx->pool = pool;
230 ndx->free_list = NULL;
231 return ndx;
232 }
233
234 /* Allocate a node for the range index tree. */
235 static range_index_node_t *
alloc_range_index_node(range_index_t * ndx,apr_size_t offset,apr_size_t limit,apr_size_t target_offset)236 alloc_range_index_node(range_index_t *ndx,
237 apr_size_t offset,
238 apr_size_t limit,
239 apr_size_t target_offset)
240 {
241 range_index_node_t *const node = alloc_block(ndx->pool, &ndx->free_list);
242 node->offset = offset;
243 node->limit = limit;
244 node->target_offset = target_offset;
245 node->left = node->right = NULL;
246 node->prev = node->next = NULL;
247 return node;
248 }
249
250 /* Free a node from the range index tree. */
251 static void
free_range_index_node(range_index_t * ndx,range_index_node_t * node)252 free_range_index_node(range_index_t *ndx, range_index_node_t *node)
253 {
254 if (node->next)
255 node->next->prev = node->prev;
256 if (node->prev)
257 node->prev->next = node->next;
258 free_block(node, &ndx->free_list);
259 }
260
261
262 /* Splay the index tree, using OFFSET as the key. */
263
264 static void
splay_range_index(apr_size_t offset,range_index_t * ndx)265 splay_range_index(apr_size_t offset, range_index_t *ndx)
266 {
267 range_index_node_t *tree = ndx->tree;
268 range_index_node_t scratch_node;
269 range_index_node_t *left, *right;
270
271 if (tree == NULL)
272 return;
273
274 scratch_node.left = scratch_node.right = NULL;
275 left = right = &scratch_node;
276
277 for (;;)
278 {
279 if (offset < tree->offset)
280 {
281 if (tree->left != NULL
282 && offset < tree->left->offset)
283 {
284 /* Right rotation */
285 range_index_node_t *const node = tree->left;
286 tree->left = node->right;
287 node->right = tree;
288 tree = node;
289 }
290 if (tree->left == NULL)
291 break;
292
293 /* Remember the right subtree */
294 right->left = tree;
295 right = tree;
296 tree = tree->left;
297 }
298 else if (offset > tree->offset)
299 {
300 if (tree->right != NULL
301 && offset > tree->right->offset)
302 {
303 /* Left rotation */
304 range_index_node_t *const node = tree->right;
305 tree->right = node->left;
306 node->left = tree;
307 tree = node;
308 }
309 if (tree->right == NULL)
310 break;
311
312 /* Remember the left subtree */
313 left->right = tree;
314 left = tree;
315 tree = tree->right;
316 }
317 else
318 break;
319 }
320
321 /* Link in the left and right subtrees */
322 left->right = tree->left;
323 right->left = tree->right;
324 tree->left = scratch_node.right;
325 tree->right = scratch_node.left;
326
327 /* The basic top-down splay is finished, but we may still need to
328 turn the tree around. What we want is to put the node with the
329 largest offset where node->offset <= offset at the top of the
330 tree, so that we can insert the new data (or search for existing
331 ranges) to the right of the root. This makes cleaning up the
332 tree after an insert much simpler, and -- incidentally -- makes
333 the whole range index magic work. */
334 if (offset < tree->offset && tree->left != NULL)
335 {
336 if (tree->left->right == NULL)
337 {
338 /* A single right rotation is enough. */
339 range_index_node_t *const node = tree->left;
340 tree->left = node->right; /* Which is always NULL. */
341 node->right = tree;
342 tree = node;
343 }
344 else
345 {
346 /* Slide down to the rightmost node in the left subtree. */
347 range_index_node_t **nodep = &tree->left;
348 while ((*nodep)->right != NULL)
349 nodep = &(*nodep)->right;
350
351 /* Now move this node to root in one giant promotion. */
352 right = tree;
353 left = tree->left;
354 tree = *nodep;
355 *nodep = tree->left;
356 right->left = tree->right; /* Which is always NULL, too. */
357 tree->left = left;
358 tree->right = right;
359 }
360 }
361
362 /* Sanity check ... */
363 assert((offset >= tree->offset)
364 || ((tree->left == NULL)
365 && (tree->prev == NULL)));
366 ndx->tree = tree;
367 }
368
369
370 /* Remove all ranges from NDX that fall into the root's range. To
371 keep the range index as small as possible, we must also remove
372 nodes that don't fall into the new range, but have become redundant
373 because the new range overlaps the beginning of the next range.
374 Like this:
375
376 new-range: |-----------------|
377 range-1: |-----------------|
378 range-2: |--------------------|
379
380 Before new-range was inserted, range-1 and range-2 were both
381 necessary. Now the union of new-range and range-2 completely covers
382 range-1, which has become redundant now.
383
384 FIXME: But, of course, there's a catch. range-1 must still remain
385 in the tree if we want to optimize the number of target copy ops in
386 the case were a copy falls within range-1, but starts before
387 range-2 and ends after new-range. */
388
389 static void
delete_subtree(range_index_t * ndx,range_index_node_t * node)390 delete_subtree(range_index_t *ndx, range_index_node_t *node)
391 {
392 if (node != NULL)
393 {
394 delete_subtree(ndx, node->left);
395 delete_subtree(ndx, node->right);
396 free_range_index_node(ndx, node);
397 }
398 }
399
400 static void
clean_tree(range_index_t * ndx,apr_size_t limit)401 clean_tree(range_index_t *ndx, apr_size_t limit)
402 {
403 apr_size_t top_offset = limit + 1;
404 range_index_node_t **nodep = &ndx->tree->right;
405 while (*nodep != NULL)
406 {
407 range_index_node_t *const node = *nodep;
408 apr_size_t const offset =
409 (node->right != NULL && node->right->offset < top_offset
410 ? node->right->offset
411 : top_offset);
412
413 if (node->limit <= limit
414 || (node->offset < limit && offset < limit))
415 {
416 *nodep = node->right;
417 node->right = NULL;
418 delete_subtree(ndx, node);
419 }
420 else
421 {
422 top_offset = node->offset;
423 nodep = &node->left;
424 }
425 }
426 }
427
428
429 /* Add a range [OFFSET, LIMIT) into NDX. If NDX already contains a
430 range that encloses [OFFSET, LIMIT), do nothing. Otherwise, remove
431 all ranges from NDX that are superseded by the new range.
432 NOTE: The range index must be splayed to OFFSET! */
433
434 static void
insert_range(apr_size_t offset,apr_size_t limit,apr_size_t target_offset,range_index_t * ndx)435 insert_range(apr_size_t offset, apr_size_t limit, apr_size_t target_offset,
436 range_index_t *ndx)
437 {
438 range_index_node_t *node = NULL;
439
440 if (ndx->tree == NULL)
441 {
442 node = alloc_range_index_node(ndx, offset, limit, target_offset);
443 ndx->tree = node;
444 }
445 else
446 {
447 if (offset == ndx->tree->offset
448 && limit > ndx->tree->limit)
449 {
450 ndx->tree->limit = limit;
451 ndx->tree->target_offset = target_offset;
452 clean_tree(ndx, limit);
453 }
454 else if (offset > ndx->tree->offset
455 && limit > ndx->tree->limit)
456 {
457 /* We have to make the same sort of checks as clean_tree()
458 does for superseded ranges. Have to merge these someday. */
459
460 const svn_boolean_t insert_range_p =
461 (!ndx->tree->next
462 || ndx->tree->limit < ndx->tree->next->offset
463 || limit > ndx->tree->next->limit);
464
465 if (insert_range_p)
466 {
467 /* Again, we have to check if the new node and the one
468 to the left of the root override root's range. */
469 if (ndx->tree->prev && ndx->tree->prev->limit > offset)
470 {
471 /* Replace the data in the splayed node. */
472 ndx->tree->offset = offset;
473 ndx->tree->limit = limit;
474 ndx->tree->target_offset = target_offset;
475 }
476 else
477 {
478 /* Insert the range to the right of the splayed node. */
479 node = alloc_range_index_node(ndx, offset, limit,
480 target_offset);
481 if ((node->next = ndx->tree->next) != NULL)
482 node->next->prev = node;
483 ndx->tree->next = node;
484 node->prev = ndx->tree;
485
486 node->right = ndx->tree->right;
487 ndx->tree->right = NULL;
488 node->left = ndx->tree;
489 ndx->tree = node;
490 }
491 clean_tree(ndx, limit);
492 }
493 else
494 /* Ignore the range */;
495 }
496 else if (offset < ndx->tree->offset)
497 {
498 assert(ndx->tree->left == NULL);
499
500 /* Insert the range left of the splayed node */
501 node = alloc_range_index_node(ndx, offset, limit, target_offset);
502 node->left = node->prev = NULL;
503 node->right = node->next = ndx->tree;
504 ndx->tree = node->next->prev = node;
505 clean_tree(ndx, limit);
506 }
507 else
508 /* Ignore the range */;
509 }
510 }
511
512
513
514 /* ==================================================================== */
515 /* Juggling with lists of ranges. */
516
517 /* Allocate a node and add it to the range list. LIST is the head of
518 the range list, TAIL is the last node in the list. NDX holds the
519 freelist; OFFSET, LIMIT and KIND are node data. */
520 static range_list_node_t *
alloc_range_list(range_list_node_t ** list,range_list_node_t ** tail,range_index_t * ndx,enum range_kind kind,apr_size_t offset,apr_size_t limit,apr_size_t target_offset)521 alloc_range_list(range_list_node_t **list,
522 range_list_node_t **tail,
523 range_index_t *ndx,
524 enum range_kind kind,
525 apr_size_t offset,
526 apr_size_t limit,
527 apr_size_t target_offset)
528 {
529 range_list_node_t *const node = alloc_block(ndx->pool, &ndx->free_list);
530 node->kind = kind;
531 node->offset = offset;
532 node->limit = limit;
533 node->target_offset = target_offset;
534 if (*list == NULL)
535 {
536 node->prev = node->next = NULL;
537 *list = *tail = node;
538 }
539 else
540 {
541 node->prev = *tail;
542 node->next = NULL;
543 (*tail)->next = node;
544 *tail = node;
545 }
546 return *list;
547 }
548
549 /* Free a range list. LIST is the head of the list, NDX holds the freelist. */
550 static void
free_range_list(range_list_node_t * list,range_index_t * ndx)551 free_range_list(range_list_node_t *list, range_index_t *ndx)
552 {
553 while (list)
554 {
555 range_list_node_t *const node = list;
556 list = node->next;
557 free_block(node, &ndx->free_list);
558 }
559 }
560
561
562 /* Based on the data in NDX, build a list of ranges that cover
563 [OFFSET, LIMIT) in the "virtual" source data.
564 NOTE: The range index must be splayed to OFFSET! */
565
566 static range_list_node_t *
build_range_list(apr_size_t offset,apr_size_t limit,range_index_t * ndx)567 build_range_list(apr_size_t offset, apr_size_t limit, range_index_t *ndx)
568 {
569 range_list_node_t *range_list = NULL;
570 range_list_node_t *last_range = NULL;
571 range_index_node_t *node = ndx->tree;
572
573 while (offset < limit)
574 {
575 if (node == NULL)
576 return alloc_range_list(&range_list, &last_range, ndx,
577 range_from_source,
578 offset, limit, 0);
579
580 if (offset < node->offset)
581 {
582 if (limit <= node->offset)
583 return alloc_range_list(&range_list, &last_range, ndx,
584 range_from_source,
585 offset, limit, 0);
586 else
587 {
588 alloc_range_list(&range_list, &last_range, ndx,
589 range_from_source,
590 offset, node->offset, 0);
591 offset = node->offset;
592 }
593 }
594 else
595 {
596 /* TODO: (Potential optimization) Investigate if it would
597 make sense to forbid short range_from_target lengths
598 (this comment originally said "shorter than, say,
599 VD_KEY_SIZE (see vdelta.c)", but Subversion no longer
600 uses vdelta). */
601
602 if (offset >= node->limit)
603 node = node->next;
604 else
605 {
606 const apr_size_t target_offset =
607 offset - node->offset + node->target_offset;
608
609 if (limit <= node->limit)
610 return alloc_range_list(&range_list, &last_range, ndx,
611 range_from_target,
612 offset, limit, target_offset);
613 else
614 {
615 alloc_range_list(&range_list, &last_range, ndx,
616 range_from_target,
617 offset, node->limit, target_offset);
618 offset = node->limit;
619 node = node->next;
620 }
621 }
622 }
623 }
624
625 /* A range's offset isn't smaller than its limit? Impossible! */
626 SVN_ERR_MALFUNCTION_NO_RETURN();
627 }
628
629
630 /* Copy the instructions from WINDOW that define the range [OFFSET,
631 LIMIT) in WINDOW's target stream to TARGET_OFFSET in the window
632 represented by BUILD_BATON. HINT is a position in the instructions
633 array that helps finding the position for OFFSET. A safe default
634 is 0. Use NDX to find the instructions in WINDOW. Allocate space
635 in BUILD_BATON from POOL. */
636
637 static void
copy_source_ops(apr_size_t offset,apr_size_t limit,apr_size_t target_offset,apr_size_t hint,svn_txdelta__ops_baton_t * build_baton,const svn_txdelta_window_t * window,const offset_index_t * ndx,apr_pool_t * pool)638 copy_source_ops(apr_size_t offset, apr_size_t limit,
639 apr_size_t target_offset,
640 apr_size_t hint,
641 svn_txdelta__ops_baton_t *build_baton,
642 const svn_txdelta_window_t *window,
643 const offset_index_t *ndx,
644 apr_pool_t *pool)
645 {
646 apr_size_t op_ndx = search_offset_index(ndx, offset, hint);
647 for (;; ++op_ndx)
648 {
649 const svn_txdelta_op_t *const op = &window->ops[op_ndx];
650 const apr_size_t *const off = &ndx->offs[op_ndx];
651 const apr_size_t fix_offset = (offset > off[0] ? offset - off[0] : 0);
652 const apr_size_t fix_limit = (off[0] >= limit ? 0
653 : (off[1] > limit ? off[1] - limit : 0));
654
655 /* Ideally, we'd do this check before assigning fix_offset and
656 fix_limit; but then we couldn't make them const whilst still
657 adhering to C90 rules. Instead, we're going to assume that a
658 smart optimizing compiler will reorder this check before the
659 local variable initialization. */
660 if (off[0] >= limit)
661 break;
662
663 /* It would be extremely weird if the fixed-up op had zero length. */
664 assert(fix_offset + fix_limit < op->length);
665
666 if (op->action_code != svn_txdelta_target)
667 {
668 /* Delta ops that don't depend on the virtual target can be
669 copied to the composite unchanged. */
670 const char *const new_data = (op->action_code == svn_txdelta_new
671 ? (window->new_data->data
672 + op->offset + fix_offset)
673 : NULL);
674
675 svn_txdelta__insert_op(build_baton, op->action_code,
676 op->offset + fix_offset,
677 op->length - fix_offset - fix_limit,
678 new_data, pool);
679 }
680 else
681 {
682 /* The source of a target copy must start before the current
683 offset in the (virtual) target stream. */
684 assert(op->offset < off[0]);
685
686 if (op->offset + op->length - fix_limit <= off[0])
687 {
688 /* The recursion _must_ end, otherwise the delta has
689 circular references, and that is not possible. */
690 copy_source_ops(op->offset + fix_offset,
691 op->offset + op->length - fix_limit,
692 target_offset,
693 op_ndx,
694 build_baton, window, ndx, pool);
695 }
696 else
697 {
698 /* This is an overlapping target copy.
699 The idea here is to transpose the pattern, then generate
700 another overlapping copy. */
701 const apr_size_t ptn_length = off[0] - op->offset;
702 const apr_size_t ptn_overlap = fix_offset % ptn_length;
703 apr_size_t fix_off = fix_offset;
704 apr_size_t tgt_off = target_offset;
705 assert(ptn_length > ptn_overlap);
706
707 /* Unconditionally issue the second subrange of the
708 pattern. This is always correct, since the outer
709 condition already verifies that there is an overlap
710 in the target copy. */
711 {
712 const apr_size_t length =
713 MIN(op->length - fix_off - fix_limit,
714 ptn_length - ptn_overlap);
715 copy_source_ops(op->offset + ptn_overlap,
716 op->offset + ptn_overlap + length,
717 tgt_off,
718 op_ndx,
719 build_baton, window, ndx, pool);
720 fix_off += length;
721 tgt_off += length;
722 }
723
724 assert(fix_off + fix_limit <= op->length);
725 if (ptn_overlap > 0
726 && fix_off + fix_limit < op->length)
727 {
728 /* Issue the first subrange in the pattern. */
729 const apr_size_t length =
730 MIN(op->length - fix_off - fix_limit, ptn_overlap);
731 copy_source_ops(op->offset,
732 op->offset + length,
733 tgt_off,
734 op_ndx,
735 build_baton, window, ndx, pool);
736 fix_off += length;
737 tgt_off += length;
738 }
739
740 assert(fix_off + fix_limit <= op->length);
741 if (fix_off + fix_limit < op->length)
742 {
743 /* Now multiply the pattern */
744 svn_txdelta__insert_op(build_baton, svn_txdelta_target,
745 tgt_off - ptn_length,
746 op->length - fix_off - fix_limit,
747 NULL, pool);
748 }
749 }
750 }
751
752 /* Adjust the target offset for the next op in the list. */
753 target_offset += op->length - fix_offset - fix_limit;
754 }
755 }
756
757
758
759 /* ==================================================================== */
760 /* Bringing it all together. */
761
762
763 svn_txdelta_window_t *
svn_txdelta_compose_windows(const svn_txdelta_window_t * window_A,const svn_txdelta_window_t * window_B,apr_pool_t * pool)764 svn_txdelta_compose_windows(const svn_txdelta_window_t *window_A,
765 const svn_txdelta_window_t *window_B,
766 apr_pool_t *pool)
767 {
768 svn_txdelta__ops_baton_t build_baton = { 0 };
769 svn_txdelta_window_t *composite;
770 apr_pool_t *subpool = svn_pool_create(pool);
771 offset_index_t *offset_index = create_offset_index(window_A, subpool);
772 range_index_t *range_index = create_range_index(subpool);
773 apr_size_t target_offset = 0;
774 int i;
775
776 /* Read the description of the delta composition algorithm in
777 notes/fs-improvements.txt before going any further.
778 You have been warned. */
779 build_baton.new_data = svn_stringbuf_create_empty(pool);
780 for (i = 0; i < window_B->num_ops; ++i)
781 {
782 const svn_txdelta_op_t *const op = &window_B->ops[i];
783 if (op->action_code != svn_txdelta_source)
784 {
785 /* Delta ops that don't depend on the source can be copied
786 to the composite unchanged. */
787 const char *const new_data =
788 (op->action_code == svn_txdelta_new
789 ? window_B->new_data->data + op->offset
790 : NULL);
791 svn_txdelta__insert_op(&build_baton, op->action_code,
792 op->offset, op->length,
793 new_data, pool);
794 }
795 else
796 {
797 /* NOTE: Remember that `offset' and `limit' refer to
798 positions in window_B's _source_ stream, which is the
799 same as window_A's _target_ stream! */
800 const apr_size_t offset = op->offset;
801 const apr_size_t limit = op->offset + op->length;
802 range_list_node_t *range_list, *range;
803 apr_size_t tgt_off = target_offset;
804
805 splay_range_index(offset, range_index);
806 range_list = build_range_list(offset, limit, range_index);
807
808 for (range = range_list; range; range = range->next)
809 {
810 if (range->kind == range_from_target)
811 svn_txdelta__insert_op(&build_baton, svn_txdelta_target,
812 range->target_offset,
813 range->limit - range->offset,
814 NULL, pool);
815 else
816 copy_source_ops(range->offset, range->limit, tgt_off, 0,
817 &build_baton, window_A, offset_index,
818 pool);
819
820 tgt_off += range->limit - range->offset;
821 }
822 assert(tgt_off == target_offset + op->length);
823
824 free_range_list(range_list, range_index);
825 insert_range(offset, limit, target_offset, range_index);
826 }
827
828 /* Remember the new offset in the would-be target stream. */
829 target_offset += op->length;
830 }
831
832 svn_pool_destroy(subpool);
833
834 composite = svn_txdelta__make_window(&build_baton, pool);
835 composite->sview_offset = window_A->sview_offset;
836 composite->sview_len = window_A->sview_len;
837 composite->tview_len = window_B->tview_len;
838 return composite;
839 }
840