blob: 344c2dfc3eb443a721cc45b6ba3be56e9f186c97 [file] [log] [blame]
Linus Torvalds34435462006-03-24 20:13:22 -08001/*
2 * LibXDiff by Davide Libenzi ( File Differential Library )
3 * Copyright (C) 2003 Davide Libenzi
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
Todd Zullinger48425792017-11-07 00:39:33 -050016 * License along with this library; if not, see
17 * <http://www.gnu.org/licenses/>.
Linus Torvalds34435462006-03-24 20:13:22 -080018 *
19 * Davide Libenzi <davidel@xmailserver.org>
20 *
21 */
22
23#include "xinclude.h"
24
Linus Torvalds34435462006-03-24 20:13:22 -080025#define XDL_MAX_COST_MIN 256
26#define XDL_HEUR_MIN_COST 256
Pierre Habouzitf630cfd2009-07-22 23:34:34 +020027#define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
Linus Torvalds34435462006-03-24 20:13:22 -080028#define XDL_SNAKE_CNT 20
29#define XDL_K_HEUR 4
30
Linus Torvalds34435462006-03-24 20:13:22 -080031typedef struct s_xdpsplit {
32 long i1, i2;
33 int min_lo, min_hi;
34} xdpsplit_t;
35
Linus Torvalds34435462006-03-24 20:13:22 -080036/*
37 * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers.
38 * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both
39 * the forward diagonal starting from (off1, off2) and the backward diagonal
40 * starting from (lim1, lim2). If the K values on the same diagonal crosses
Johannes Schindelin03d3b122019-10-08 11:27:52 -070041 * returns the furthest point of reach. We might encounter expensive edge cases
42 * using this algorithm, so a little bit of heuristic is needed to cut the
43 * search and to return a suboptimal point.
Linus Torvalds34435462006-03-24 20:13:22 -080044 */
45static long xdl_split(unsigned long const *ha1, long off1, long lim1,
46 unsigned long const *ha2, long off2, long lim2,
47 long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl,
48 xdalgoenv_t *xenv) {
49 long dmin = off1 - lim2, dmax = lim1 - off2;
50 long fmid = off1 - off2, bmid = lim1 - lim2;
51 long odd = (fmid - bmid) & 1;
52 long fmin = fmid, fmax = fmid;
53 long bmin = bmid, bmax = bmid;
54 long ec, d, i1, i2, prev1, best, dd, v, k;
55
56 /*
57 * Set initial diagonal values for both forward and backward path.
58 */
59 kvdf[fmid] = off1;
60 kvdb[bmid] = lim1;
61
62 for (ec = 1;; ec++) {
63 int got_snake = 0;
64
65 /*
Johannes Schindelin03d3b122019-10-08 11:27:52 -070066 * We need to extend the diagonal "domain" by one. If the next
Linus Torvalds34435462006-03-24 20:13:22 -080067 * values exits the box boundaries we need to change it in the
Johannes Schindelin03d3b122019-10-08 11:27:52 -070068 * opposite direction because (max - min) must be a power of
69 * two.
70 *
Pavel Roskin82e5a822006-07-10 01:50:18 -040071 * Also we initialize the external K value to -1 so that we can
Johannes Schindelin03d3b122019-10-08 11:27:52 -070072 * avoid extra conditions in the check inside the core loop.
Linus Torvalds34435462006-03-24 20:13:22 -080073 */
74 if (fmin > dmin)
75 kvdf[--fmin - 1] = -1;
76 else
77 ++fmin;
78 if (fmax < dmax)
79 kvdf[++fmax + 1] = -1;
80 else
81 --fmax;
82
83 for (d = fmax; d >= fmin; d -= 2) {
84 if (kvdf[d - 1] >= kvdf[d + 1])
85 i1 = kvdf[d - 1] + 1;
86 else
87 i1 = kvdf[d + 1];
88 prev1 = i1;
89 i2 = i1 - d;
90 for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++);
91 if (i1 - prev1 > xenv->snake_cnt)
92 got_snake = 1;
93 kvdf[d] = i1;
94 if (odd && bmin <= d && d <= bmax && kvdb[d] <= i1) {
95 spl->i1 = i1;
96 spl->i2 = i2;
97 spl->min_lo = spl->min_hi = 1;
98 return ec;
99 }
100 }
101
102 /*
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700103 * We need to extend the diagonal "domain" by one. If the next
Linus Torvalds34435462006-03-24 20:13:22 -0800104 * values exits the box boundaries we need to change it in the
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700105 * opposite direction because (max - min) must be a power of
106 * two.
107 *
Pavel Roskin82e5a822006-07-10 01:50:18 -0400108 * Also we initialize the external K value to -1 so that we can
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700109 * avoid extra conditions in the check inside the core loop.
Linus Torvalds34435462006-03-24 20:13:22 -0800110 */
111 if (bmin > dmin)
112 kvdb[--bmin - 1] = XDL_LINE_MAX;
113 else
114 ++bmin;
115 if (bmax < dmax)
116 kvdb[++bmax + 1] = XDL_LINE_MAX;
117 else
118 --bmax;
119
120 for (d = bmax; d >= bmin; d -= 2) {
121 if (kvdb[d - 1] < kvdb[d + 1])
122 i1 = kvdb[d - 1];
123 else
124 i1 = kvdb[d + 1] - 1;
125 prev1 = i1;
126 i2 = i1 - d;
127 for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--);
128 if (prev1 - i1 > xenv->snake_cnt)
129 got_snake = 1;
130 kvdb[d] = i1;
131 if (!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) {
132 spl->i1 = i1;
133 spl->i2 = i2;
134 spl->min_lo = spl->min_hi = 1;
135 return ec;
136 }
137 }
138
139 if (need_min)
140 continue;
141
142 /*
143 * If the edit cost is above the heuristic trigger and if
144 * we got a good snake, we sample current diagonals to see
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700145 * if some of them have reached an "interesting" path. Our
Linus Torvalds34435462006-03-24 20:13:22 -0800146 * measure is a function of the distance from the diagonal
147 * corner (i1 + i2) penalized with the distance from the
148 * mid diagonal itself. If this value is above the current
149 * edit cost times a magic factor (XDL_K_HEUR) we consider
150 * it interesting.
151 */
152 if (got_snake && ec > xenv->heur_min) {
153 for (best = 0, d = fmax; d >= fmin; d -= 2) {
154 dd = d > fmid ? d - fmid: fmid - d;
155 i1 = kvdf[d];
156 i2 = i1 - d;
157 v = (i1 - off1) + (i2 - off2) - dd;
158
159 if (v > XDL_K_HEUR * ec && v > best &&
160 off1 + xenv->snake_cnt <= i1 && i1 < lim1 &&
161 off2 + xenv->snake_cnt <= i2 && i2 < lim2) {
162 for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++)
163 if (k == xenv->snake_cnt) {
164 best = v;
165 spl->i1 = i1;
166 spl->i2 = i2;
167 break;
168 }
169 }
170 }
171 if (best > 0) {
172 spl->min_lo = 1;
173 spl->min_hi = 0;
174 return ec;
175 }
176
177 for (best = 0, d = bmax; d >= bmin; d -= 2) {
178 dd = d > bmid ? d - bmid: bmid - d;
179 i1 = kvdb[d];
180 i2 = i1 - d;
181 v = (lim1 - i1) + (lim2 - i2) - dd;
182
183 if (v > XDL_K_HEUR * ec && v > best &&
184 off1 < i1 && i1 <= lim1 - xenv->snake_cnt &&
185 off2 < i2 && i2 <= lim2 - xenv->snake_cnt) {
186 for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++)
187 if (k == xenv->snake_cnt - 1) {
188 best = v;
189 spl->i1 = i1;
190 spl->i2 = i2;
191 break;
192 }
193 }
194 }
195 if (best > 0) {
196 spl->min_lo = 0;
197 spl->min_hi = 1;
198 return ec;
199 }
200 }
201
202 /*
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700203 * Enough is enough. We spent too much time here and now we
204 * collect the furthest reaching path using the (i1 + i2)
205 * measure.
Linus Torvalds34435462006-03-24 20:13:22 -0800206 */
207 if (ec >= xenv->mxcost) {
208 long fbest, fbest1, bbest, bbest1;
209
Marco Roeland0ed49a32006-04-08 17:27:20 +0200210 fbest = fbest1 = -1;
Linus Torvalds34435462006-03-24 20:13:22 -0800211 for (d = fmax; d >= fmin; d -= 2) {
212 i1 = XDL_MIN(kvdf[d], lim1);
213 i2 = i1 - d;
214 if (lim2 < i2)
215 i1 = lim2 + d, i2 = lim2;
216 if (fbest < i1 + i2) {
217 fbest = i1 + i2;
218 fbest1 = i1;
219 }
220 }
221
Marco Roeland0ed49a32006-04-08 17:27:20 +0200222 bbest = bbest1 = XDL_LINE_MAX;
Linus Torvalds34435462006-03-24 20:13:22 -0800223 for (d = bmax; d >= bmin; d -= 2) {
224 i1 = XDL_MAX(off1, kvdb[d]);
225 i2 = i1 - d;
226 if (i2 < off2)
227 i1 = off2 + d, i2 = off2;
228 if (i1 + i2 < bbest) {
229 bbest = i1 + i2;
230 bbest1 = i1;
231 }
232 }
233
234 if ((lim1 + lim2) - bbest < fbest - (off1 + off2)) {
235 spl->i1 = fbest1;
236 spl->i2 = fbest - fbest1;
237 spl->min_lo = 1;
238 spl->min_hi = 0;
239 } else {
240 spl->i1 = bbest1;
241 spl->i2 = bbest - bbest1;
242 spl->min_lo = 0;
243 spl->min_hi = 1;
244 }
245 return ec;
246 }
247 }
Linus Torvalds34435462006-03-24 20:13:22 -0800248}
249
250
251/*
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700252 * Rule: "Divide et Impera" (divide & conquer). Recursively split the box in
253 * sub-boxes by calling the box splitting function. Note that the real job
254 * (marking changed lines) is done in the two boundary reaching checks.
Linus Torvalds34435462006-03-24 20:13:22 -0800255 */
256int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1,
257 diffdata_t *dd2, long off2, long lim2,
258 long *kvdf, long *kvdb, int need_min, xdalgoenv_t *xenv) {
259 unsigned long const *ha1 = dd1->ha, *ha2 = dd2->ha;
260
261 /*
262 * Shrink the box by walking through each diagonal snake (SW and NE).
263 */
264 for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++);
265 for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--);
266
267 /*
268 * If one dimension is empty, then all records on the other one must
269 * be obviously changed.
270 */
271 if (off1 == lim1) {
272 char *rchg2 = dd2->rchg;
273 long *rindex2 = dd2->rindex;
274
275 for (; off2 < lim2; off2++)
276 rchg2[rindex2[off2]] = 1;
277 } else if (off2 == lim2) {
278 char *rchg1 = dd1->rchg;
279 long *rindex1 = dd1->rindex;
280
281 for (; off1 < lim1; off1++)
282 rchg1[rindex1[off1]] = 1;
283 } else {
Linus Torvalds34435462006-03-24 20:13:22 -0800284 xdpsplit_t spl;
Marco Roeland0ed49a32006-04-08 17:27:20 +0200285 spl.i1 = spl.i2 = 0;
Linus Torvalds34435462006-03-24 20:13:22 -0800286
287 /*
288 * Divide ...
289 */
Benjamin Kramer8e24cba2009-03-15 22:01:20 +0100290 if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb,
291 need_min, &spl, xenv) < 0) {
Linus Torvalds34435462006-03-24 20:13:22 -0800292
293 return -1;
294 }
295
296 /*
297 * ... et Impera.
298 */
299 if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2,
300 kvdf, kvdb, spl.min_lo, xenv) < 0 ||
301 xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2,
302 kvdf, kvdb, spl.min_hi, xenv) < 0) {
303
304 return -1;
305 }
306 }
307
308 return 0;
309}
310
311
312int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
313 xdfenv_t *xe) {
314 long ndiags;
315 long *kvd, *kvdf, *kvdb;
316 xdalgoenv_t xenv;
317 diffdata_t dd1, dd2;
Phillip Wood9df0fc32022-02-16 10:15:06 +0000318 int res;
Linus Torvalds34435462006-03-24 20:13:22 -0800319
Phillip Wood9df0fc32022-02-16 10:15:06 +0000320 if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0)
Linus Torvalds34435462006-03-24 20:13:22 -0800321 return -1;
Phillip Wood9df0fc32022-02-16 10:15:06 +0000322
323 if (XDF_DIFF_ALG(xpp->flags) == XDF_PATIENCE_DIFF) {
Jeff King12a58f92022-08-20 03:36:25 -0400324 res = xdl_do_patience_diff(xpp, xe);
Phillip Wood9df0fc32022-02-16 10:15:06 +0000325 goto out;
326 }
327
328 if (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF) {
Jeff Kingf1d01902022-08-19 04:49:41 -0400329 res = xdl_do_histogram_diff(xpp, xe);
Phillip Wood9df0fc32022-02-16 10:15:06 +0000330 goto out;
Linus Torvalds34435462006-03-24 20:13:22 -0800331 }
332
333 /*
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700334 * Allocate and setup K vectors to be used by the differential
335 * algorithm.
336 *
Linus Torvalds34435462006-03-24 20:13:22 -0800337 * One is to store the forward path and one to store the backward path.
338 */
339 ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3;
Phillip Woodabf04bd2022-07-08 16:25:16 +0000340 if (!XDL_ALLOC_ARRAY(kvd, 2 * ndiags + 2)) {
Linus Torvalds34435462006-03-24 20:13:22 -0800341
342 xdl_free_env(xe);
343 return -1;
344 }
345 kvdf = kvd;
346 kvdb = kvdf + ndiags;
347 kvdf += xe->xdf2.nreff + 1;
348 kvdb += xe->xdf2.nreff + 1;
349
Davide Libenzica557af2006-04-03 18:47:55 -0700350 xenv.mxcost = xdl_bogosqrt(ndiags);
Linus Torvalds34435462006-03-24 20:13:22 -0800351 if (xenv.mxcost < XDL_MAX_COST_MIN)
352 xenv.mxcost = XDL_MAX_COST_MIN;
353 xenv.snake_cnt = XDL_SNAKE_CNT;
354 xenv.heur_min = XDL_HEUR_MIN_COST;
355
356 dd1.nrec = xe->xdf1.nreff;
357 dd1.ha = xe->xdf1.ha;
358 dd1.rchg = xe->xdf1.rchg;
359 dd1.rindex = xe->xdf1.rindex;
360 dd2.nrec = xe->xdf2.nreff;
361 dd2.ha = xe->xdf2.ha;
362 dd2.rchg = xe->xdf2.rchg;
363 dd2.rindex = xe->xdf2.rindex;
364
Phillip Wood9df0fc32022-02-16 10:15:06 +0000365 res = xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
366 kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0,
367 &xenv);
Linus Torvalds34435462006-03-24 20:13:22 -0800368 xdl_free(kvd);
Phillip Wood9df0fc32022-02-16 10:15:06 +0000369 out:
370 if (res < 0)
371 xdl_free_env(xe);
Linus Torvalds34435462006-03-24 20:13:22 -0800372
Phillip Wood9df0fc32022-02-16 10:15:06 +0000373 return res;
Linus Torvalds34435462006-03-24 20:13:22 -0800374}
375
376
377static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2) {
378 xdchange_t *xch;
379
380 if (!(xch = (xdchange_t *) xdl_malloc(sizeof(xdchange_t))))
381 return NULL;
382
383 xch->next = xscr;
384 xch->i1 = i1;
385 xch->i2 = i2;
386 xch->chg1 = chg1;
387 xch->chg2 = chg2;
Antoine Pelisse36617af2013-06-19 20:46:07 +0200388 xch->ignore = 0;
Linus Torvalds34435462006-03-24 20:13:22 -0800389
390 return xch;
391}
392
393
Jeff King1e45db12021-12-03 00:12:01 -0500394static int recs_match(xrecord_t *rec1, xrecord_t *rec2)
Jacob Keller92e5b622016-04-15 16:01:45 -0700395{
Phillip Wood6b13bc32021-11-17 11:20:25 +0000396 return (rec1->ha == rec2->ha);
Jacob Keller92e5b622016-04-15 16:01:45 -0700397}
398
Michael Haggertye8adf232016-08-22 13:22:44 +0200399/*
Michael Haggerty433860f2016-09-05 11:44:51 +0200400 * If a line is indented more than this, get_indent() just returns this value.
401 * This avoids having to do absurd amounts of work for data that are not
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700402 * human-readable text, and also ensures that the output of get_indent fits
403 * within an int.
Michael Haggerty433860f2016-09-05 11:44:51 +0200404 */
405#define MAX_INDENT 200
406
407/*
408 * Return the amount of indentation of the specified line, treating TAB as 8
409 * columns. Return -1 if line is empty or contains only whitespace. Clamp the
410 * output value at MAX_INDENT.
411 */
412static int get_indent(xrecord_t *rec)
413{
414 long i;
415 int ret = 0;
416
417 for (i = 0; i < rec->size; i++) {
418 char c = rec->ptr[i];
419
420 if (!XDL_ISSPACE(c))
421 return ret;
422 else if (c == ' ')
423 ret += 1;
424 else if (c == '\t')
425 ret += 8 - ret % 8;
426 /* ignore other whitespace characters */
427
428 if (ret >= MAX_INDENT)
429 return MAX_INDENT;
430 }
431
432 /* The line contains only whitespace. */
433 return -1;
434}
435
436/*
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700437 * If more than this number of consecutive blank rows are found, just return
438 * this value. This avoids requiring O(N^2) work for pathological cases, and
439 * also ensures that the output of score_split fits in an int.
Michael Haggerty433860f2016-09-05 11:44:51 +0200440 */
441#define MAX_BLANKS 20
442
443/* Characteristics measured about a hypothetical split position. */
444struct split_measurement {
445 /*
446 * Is the split at the end of the file (aside from any blank lines)?
447 */
448 int end_of_file;
449
450 /*
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700451 * How much is the line immediately following the split indented (or -1
452 * if the line is blank):
Michael Haggerty433860f2016-09-05 11:44:51 +0200453 */
454 int indent;
455
456 /*
457 * How many consecutive lines above the split are blank?
458 */
459 int pre_blank;
460
461 /*
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700462 * How much is the nearest non-blank line above the split indented (or
463 * -1 if there is no such line)?
Michael Haggerty433860f2016-09-05 11:44:51 +0200464 */
465 int pre_indent;
466
467 /*
468 * How many lines after the line following the split are blank?
469 */
470 int post_blank;
471
472 /*
473 * How much is the nearest non-blank line after the line following the
474 * split indented (or -1 if there is no such line)?
475 */
476 int post_indent;
477};
478
479struct split_score {
480 /* The effective indent of this split (smaller is preferred). */
481 int effective_indent;
482
483 /* Penalty for this split (smaller is preferred). */
484 int penalty;
485};
486
487/*
488 * Fill m with information about a hypothetical split of xdf above line split.
489 */
490static void measure_split(const xdfile_t *xdf, long split,
491 struct split_measurement *m)
492{
493 long i;
494
495 if (split >= xdf->nrec) {
496 m->end_of_file = 1;
497 m->indent = -1;
498 } else {
499 m->end_of_file = 0;
500 m->indent = get_indent(xdf->recs[split]);
501 }
502
503 m->pre_blank = 0;
504 m->pre_indent = -1;
505 for (i = split - 1; i >= 0; i--) {
506 m->pre_indent = get_indent(xdf->recs[i]);
507 if (m->pre_indent != -1)
508 break;
509 m->pre_blank += 1;
510 if (m->pre_blank == MAX_BLANKS) {
511 m->pre_indent = 0;
512 break;
513 }
514 }
515
516 m->post_blank = 0;
517 m->post_indent = -1;
518 for (i = split + 1; i < xdf->nrec; i++) {
519 m->post_indent = get_indent(xdf->recs[i]);
520 if (m->post_indent != -1)
521 break;
522 m->post_blank += 1;
523 if (m->post_blank == MAX_BLANKS) {
524 m->post_indent = 0;
525 break;
526 }
527 }
528}
529
530/*
531 * The empirically-determined weight factors used by score_split() below.
532 * Larger values means that the position is a less favorable place to split.
533 *
534 * Note that scores are only ever compared against each other, so multiplying
535 * all of these weight/penalty values by the same factor wouldn't change the
536 * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*.
537 * In practice, these numbers are chosen to be large enough that they can be
538 * adjusted relative to each other with sufficient precision despite using
539 * integer math.
540 */
541
542/* Penalty if there are no non-blank lines before the split */
543#define START_OF_FILE_PENALTY 1
544
545/* Penalty if there are no non-blank lines after the split */
546#define END_OF_FILE_PENALTY 21
547
548/* Multiplier for the number of blank lines around the split */
549#define TOTAL_BLANK_WEIGHT (-30)
550
551/* Multiplier for the number of blank lines after the split */
552#define POST_BLANK_WEIGHT 6
553
554/*
555 * Penalties applied if the line is indented more than its predecessor
556 */
557#define RELATIVE_INDENT_PENALTY (-4)
558#define RELATIVE_INDENT_WITH_BLANK_PENALTY 10
559
560/*
561 * Penalties applied if the line is indented less than both its predecessor and
562 * its successor
563 */
564#define RELATIVE_OUTDENT_PENALTY 24
565#define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17
566
567/*
568 * Penalties applied if the line is indented less than its predecessor but not
569 * less than its successor
570 */
571#define RELATIVE_DEDENT_PENALTY 23
572#define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17
573
574/*
575 * We only consider whether the sum of the effective indents for splits are
576 * less than (-1), equal to (0), or greater than (+1) each other. The resulting
577 * value is multiplied by the following weight and combined with the penalty to
578 * determine the better of two scores.
579 */
580#define INDENT_WEIGHT 60
581
582/*
Stefan Beller301ef852018-07-27 15:23:56 -0700583 * How far do we slide a hunk at most?
584 */
585#define INDENT_HEURISTIC_MAX_SLIDING 100
586
587/*
Michael Haggerty433860f2016-09-05 11:44:51 +0200588 * Compute a badness score for the hypothetical split whose measurements are
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700589 * stored in m. The weight factors were determined empirically using the tools
590 * and corpus described in
Michael Haggerty433860f2016-09-05 11:44:51 +0200591 *
592 * https://github.com/mhagger/diff-slider-tools
593 *
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700594 * Also see that project if you want to improve the weights based on, for
595 * example, a larger or more diverse corpus.
Michael Haggerty433860f2016-09-05 11:44:51 +0200596 */
597static void score_add_split(const struct split_measurement *m, struct split_score *s)
598{
599 /*
600 * A place to accumulate penalty factors (positive makes this index more
601 * favored):
602 */
603 int post_blank, total_blank, indent, any_blanks;
604
605 if (m->pre_indent == -1 && m->pre_blank == 0)
606 s->penalty += START_OF_FILE_PENALTY;
607
608 if (m->end_of_file)
609 s->penalty += END_OF_FILE_PENALTY;
610
611 /*
612 * Set post_blank to the number of blank lines following the split,
613 * including the line immediately after the split:
614 */
615 post_blank = (m->indent == -1) ? 1 + m->post_blank : 0;
616 total_blank = m->pre_blank + post_blank;
617
618 /* Penalties based on nearby blank lines: */
619 s->penalty += TOTAL_BLANK_WEIGHT * total_blank;
620 s->penalty += POST_BLANK_WEIGHT * post_blank;
621
622 if (m->indent != -1)
623 indent = m->indent;
624 else
625 indent = m->post_indent;
626
627 any_blanks = (total_blank != 0);
628
629 /* Note that the effective indent is -1 at the end of the file: */
630 s->effective_indent += indent;
631
632 if (indent == -1) {
633 /* No additional adjustments needed. */
634 } else if (m->pre_indent == -1) {
635 /* No additional adjustments needed. */
636 } else if (indent > m->pre_indent) {
637 /*
638 * The line is indented more than its predecessor.
639 */
640 s->penalty += any_blanks ?
641 RELATIVE_INDENT_WITH_BLANK_PENALTY :
642 RELATIVE_INDENT_PENALTY;
643 } else if (indent == m->pre_indent) {
644 /*
645 * The line has the same indentation level as its predecessor.
646 * No additional adjustments needed.
647 */
648 } else {
649 /*
650 * The line is indented less than its predecessor. It could be
651 * the block terminator of the previous block, but it could
652 * also be the start of a new block (e.g., an "else" block, or
653 * maybe the previous block didn't have a block terminator).
654 * Try to distinguish those cases based on what comes next:
655 */
656 if (m->post_indent != -1 && m->post_indent > indent) {
657 /*
658 * The following line is indented more. So it is likely
659 * that this line is the start of a block.
660 */
661 s->penalty += any_blanks ?
662 RELATIVE_OUTDENT_WITH_BLANK_PENALTY :
663 RELATIVE_OUTDENT_PENALTY;
664 } else {
665 /*
666 * That was probably the end of a block.
667 */
668 s->penalty += any_blanks ?
669 RELATIVE_DEDENT_WITH_BLANK_PENALTY :
670 RELATIVE_DEDENT_PENALTY;
671 }
672 }
673}
674
675static int score_cmp(struct split_score *s1, struct split_score *s2)
676{
677 /* -1 if s1.effective_indent < s2->effective_indent, etc. */
678 int cmp_indents = ((s1->effective_indent > s2->effective_indent) -
679 (s1->effective_indent < s2->effective_indent));
680
681 return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty);
682}
683
684/*
Michael Haggertye8adf232016-08-22 13:22:44 +0200685 * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group
686 * of lines that was inserted or deleted from the corresponding version of the
687 * file). We consider there to be such a group at the beginning of the file, at
688 * the end of the file, and between any two unchanged lines, though most such
689 * groups will usually be empty.
690 *
691 * If the first line in a group is equal to the line following the group, then
692 * the group can be slid down. Similarly, if the last line in a group is equal
693 * to the line preceding the group, then the group can be slid up. See
694 * group_slide_down() and group_slide_up().
695 *
696 * Note that loops that are testing for changed lines in xdf->rchg do not need
697 * index bounding since the array is prepared with a zero at position -1 and N.
698 */
Jeff King134e40d2016-09-27 00:37:33 -0400699struct xdlgroup {
Michael Haggertye8adf232016-08-22 13:22:44 +0200700 /*
701 * The index of the first changed line in the group, or the index of
702 * the unchanged line above which the (empty) group is located.
703 */
704 long start;
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700705
706 /*
Michael Haggertye8adf232016-08-22 13:22:44 +0200707 * The index of the first unchanged line after the group. For an empty
708 * group, end is equal to start.
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700709 */
Michael Haggertye8adf232016-08-22 13:22:44 +0200710 long end;
711};
712
713/*
714 * Initialize g to point at the first group in xdf.
715 */
Jeff King134e40d2016-09-27 00:37:33 -0400716static void group_init(xdfile_t *xdf, struct xdlgroup *g)
Michael Haggertye8adf232016-08-22 13:22:44 +0200717{
718 g->start = g->end = 0;
719 while (xdf->rchg[g->end])
720 g->end++;
721}
722
723/*
724 * Move g to describe the next (possibly empty) group in xdf and return 0. If g
725 * is already at the end of the file, do nothing and return -1.
726 */
Jeff King134e40d2016-09-27 00:37:33 -0400727static inline int group_next(xdfile_t *xdf, struct xdlgroup *g)
Michael Haggertye8adf232016-08-22 13:22:44 +0200728{
729 if (g->end == xdf->nrec)
730 return -1;
731
732 g->start = g->end + 1;
733 for (g->end = g->start; xdf->rchg[g->end]; g->end++)
734 ;
735
736 return 0;
737}
738
739/*
740 * Move g to describe the previous (possibly empty) group in xdf and return 0.
741 * If g is already at the beginning of the file, do nothing and return -1.
742 */
Jeff King134e40d2016-09-27 00:37:33 -0400743static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g)
Michael Haggertye8adf232016-08-22 13:22:44 +0200744{
745 if (g->start == 0)
746 return -1;
747
748 g->end = g->start - 1;
749 for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--)
750 ;
751
752 return 0;
753}
754
755/*
756 * If g can be slid toward the end of the file, do so, and if it bumps into a
757 * following group, expand this group to include it. Return 0 on success or -1
758 * if g cannot be slid down.
759 */
Jeff King1e45db12021-12-03 00:12:01 -0500760static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g)
Michael Haggertye8adf232016-08-22 13:22:44 +0200761{
762 if (g->end < xdf->nrec &&
Jeff King1e45db12021-12-03 00:12:01 -0500763 recs_match(xdf->recs[g->start], xdf->recs[g->end])) {
Michael Haggertye8adf232016-08-22 13:22:44 +0200764 xdf->rchg[g->start++] = 0;
765 xdf->rchg[g->end++] = 1;
766
767 while (xdf->rchg[g->end])
768 g->end++;
769
770 return 0;
771 } else {
772 return -1;
773 }
774}
775
776/*
777 * If g can be slid toward the beginning of the file, do so, and if it bumps
778 * into a previous group, expand this group to include it. Return 0 on success
779 * or -1 if g cannot be slid up.
780 */
Jeff King1e45db12021-12-03 00:12:01 -0500781static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g)
Michael Haggertye8adf232016-08-22 13:22:44 +0200782{
783 if (g->start > 0 &&
Jeff King1e45db12021-12-03 00:12:01 -0500784 recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1])) {
Michael Haggertye8adf232016-08-22 13:22:44 +0200785 xdf->rchg[--g->start] = 1;
786 xdf->rchg[--g->end] = 0;
787
788 while (xdf->rchg[g->start - 1])
789 g->start--;
790
791 return 0;
792 } else {
793 return -1;
794 }
795}
796
Michael Haggertye8adf232016-08-22 13:22:44 +0200797/*
798 * Move back and forward change groups for a consistent and pretty diff output.
799 * This also helps in finding joinable change groups and reducing the diff
800 * size.
801 */
802int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) {
Jeff King134e40d2016-09-27 00:37:33 -0400803 struct xdlgroup g, go;
Michael Haggertye8adf232016-08-22 13:22:44 +0200804 long earliest_end, end_matching_other;
805 long groupsize;
Michael Haggertye8adf232016-08-22 13:22:44 +0200806
807 group_init(xdf, &g);
808 group_init(xdfo, &go);
809
810 while (1) {
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700811 /*
812 * If the group is empty in the to-be-compacted file, skip it:
813 */
Michael Haggertye8adf232016-08-22 13:22:44 +0200814 if (g.end == g.start)
815 goto next;
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700816
817 /*
Michael Haggertye8adf232016-08-22 13:22:44 +0200818 * Now shift the change up and then down as far as possible in
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700819 * each direction. If it bumps into any other changes, merge
820 * them.
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700821 */
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700822 do {
Michael Haggertye8adf232016-08-22 13:22:44 +0200823 groupsize = g.end - g.start;
824
825 /*
826 * Keep track of the last "end" index that causes this
827 * group to align with a group of changed lines in the
828 * other file. -1 indicates that we haven't found such
829 * a match yet:
830 */
831 end_matching_other = -1;
832
Michael Haggertye8adf232016-08-22 13:22:44 +0200833 /* Shift the group backward as much as possible: */
Jeff King1e45db12021-12-03 00:12:01 -0500834 while (!group_slide_up(xdf, &g))
Michael Haggertye8adf232016-08-22 13:22:44 +0200835 if (group_previous(xdfo, &go))
Ævar Arnfjörð Bjarmason546096a2021-06-07 18:43:49 +0200836 BUG("group sync broken sliding up");
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700837
Michael Haggertye8adf232016-08-22 13:22:44 +0200838 /*
839 * This is this highest that this group can be shifted.
840 * Record its end index:
841 */
842 earliest_end = g.end;
843
844 if (go.end > go.start)
845 end_matching_other = g.end;
846
847 /* Now shift the group forward as far as possible: */
848 while (1) {
Jeff King1e45db12021-12-03 00:12:01 -0500849 if (group_slide_down(xdf, &g))
Michael Haggertye8adf232016-08-22 13:22:44 +0200850 break;
851 if (group_next(xdfo, &go))
Ævar Arnfjörð Bjarmason546096a2021-06-07 18:43:49 +0200852 BUG("group sync broken sliding down");
Michael Haggertye8adf232016-08-22 13:22:44 +0200853
854 if (go.end > go.start)
855 end_matching_other = g.end;
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700856 }
Michael Haggertye8adf232016-08-22 13:22:44 +0200857 } while (groupsize != g.end - g.start);
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700858
Michael Haggerty433860f2016-09-05 11:44:51 +0200859 /*
860 * If the group can be shifted, then we can possibly use this
861 * freedom to produce a more intuitive diff.
862 *
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700863 * The group is currently shifted as far down as possible, so
864 * the heuristics below only have to handle upwards shifts.
Michael Haggerty433860f2016-09-05 11:44:51 +0200865 */
866
Michael Haggertye8adf232016-08-22 13:22:44 +0200867 if (g.end == earliest_end) {
868 /* no shifting was possible */
869 } else if (end_matching_other != -1) {
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700870 /*
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700871 * Move the possibly merged group of changes back to
872 * line up with the last group of changes from the
873 * other file that it can align with.
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700874 */
Michael Haggertye8adf232016-08-22 13:22:44 +0200875 while (go.end == go.start) {
Jeff King1e45db12021-12-03 00:12:01 -0500876 if (group_slide_up(xdf, &g))
Ævar Arnfjörð Bjarmason546096a2021-06-07 18:43:49 +0200877 BUG("match disappeared");
Michael Haggertye8adf232016-08-22 13:22:44 +0200878 if (group_previous(xdfo, &go))
Ævar Arnfjörð Bjarmason546096a2021-06-07 18:43:49 +0200879 BUG("group sync broken sliding to match");
Michael Haggertycb0eded2016-08-22 13:22:41 +0200880 }
Michael Haggerty433860f2016-09-05 11:44:51 +0200881 } else if (flags & XDF_INDENT_HEURISTIC) {
882 /*
883 * Indent heuristic: a group of pure add/delete lines
Johannes Schindelin03d3b122019-10-08 11:27:52 -0700884 * implies two splits, one between the end of the
885 * "before" context and the start of the group, and
886 * another between the end of the group and the
887 * beginning of the "after" context. Some splits are
888 * aesthetically better and some are worse. We compute
889 * a badness "score" for each split, and add the scores
890 * for the two splits to define a "score" for each
891 * position that the group can be shifted to. Then we
892 * pick the shift with the lowest score.
Michael Haggerty433860f2016-09-05 11:44:51 +0200893 */
894 long shift, best_shift = -1;
895 struct split_score best_score;
896
Stefan Beller301ef852018-07-27 15:23:56 -0700897 shift = earliest_end;
898 if (g.end - groupsize - 1 > shift)
899 shift = g.end - groupsize - 1;
900 if (g.end - INDENT_HEURISTIC_MAX_SLIDING > shift)
901 shift = g.end - INDENT_HEURISTIC_MAX_SLIDING;
902 for (; shift <= g.end; shift++) {
Michael Haggerty433860f2016-09-05 11:44:51 +0200903 struct split_measurement m;
904 struct split_score score = {0, 0};
905
906 measure_split(xdf, shift, &m);
907 score_add_split(&m, &score);
908 measure_split(xdf, shift - groupsize, &m);
909 score_add_split(&m, &score);
910 if (best_shift == -1 ||
911 score_cmp(&score, &best_score) <= 0) {
912 best_score.effective_indent = score.effective_indent;
913 best_score.penalty = score.penalty;
914 best_shift = shift;
915 }
916 }
917
918 while (g.end > best_shift) {
Jeff King1e45db12021-12-03 00:12:01 -0500919 if (group_slide_up(xdf, &g))
Ævar Arnfjörð Bjarmason546096a2021-06-07 18:43:49 +0200920 BUG("best shift unreached");
Michael Haggerty433860f2016-09-05 11:44:51 +0200921 if (group_previous(xdfo, &go))
Ævar Arnfjörð Bjarmason546096a2021-06-07 18:43:49 +0200922 BUG("group sync broken sliding to blank line");
Michael Haggerty433860f2016-09-05 11:44:51 +0200923 }
Stefan Bellerd634d612016-04-19 08:21:30 -0700924 }
Michael Haggertye8adf232016-08-22 13:22:44 +0200925
926 next:
927 /* Move past the just-processed group: */
928 if (group_next(xdf, &g))
929 break;
930 if (group_next(xdfo, &go))
Ævar Arnfjörð Bjarmason546096a2021-06-07 18:43:49 +0200931 BUG("group sync broken moving to next group");
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700932 }
933
Michael Haggertye8adf232016-08-22 13:22:44 +0200934 if (!group_next(xdfo, &go))
Ævar Arnfjörð Bjarmason546096a2021-06-07 18:43:49 +0200935 BUG("group sync broken at end of file");
Michael Haggertye8adf232016-08-22 13:22:44 +0200936
Davide Libenzi295ba2f2006-04-13 16:45:13 -0700937 return 0;
938}
939
940
Linus Torvalds34435462006-03-24 20:13:22 -0800941int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) {
942 xdchange_t *cscr = NULL, *xch;
943 char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg;
944 long i1, i2, l1, l2;
945
946 /*
947 * Trivial. Collects "groups" of changes and creates an edit script.
948 */
949 for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--)
950 if (rchg1[i1 - 1] || rchg2[i2 - 1]) {
951 for (l1 = i1; rchg1[i1 - 1]; i1--);
952 for (l2 = i2; rchg2[i2 - 1]; i2--);
953
954 if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) {
955 xdl_free_script(cscr);
956 return -1;
957 }
958 cscr = xch;
959 }
960
961 *xscr = cscr;
962
963 return 0;
964}
965
966
967void xdl_free_script(xdchange_t *xscr) {
968 xdchange_t *xch;
969
970 while ((xch = xscr) != NULL) {
971 xscr = xscr->next;
972 xdl_free(xch);
973 }
974}
975
Jeff King8157ed42022-12-13 06:13:24 -0500976static int xdl_call_hunk_func(xdfenv_t *xe UNUSED, xdchange_t *xscr, xdemitcb_t *ecb,
René Scharfe467d3482012-05-09 22:20:55 +0200977 xdemitconf_t const *xecfg)
978{
979 xdchange_t *xch, *xche;
980
981 for (xch = xscr; xch; xch = xche->next) {
Antoine Pelisse36617af2013-06-19 20:46:07 +0200982 xche = xdl_get_hunk(&xch, xecfg);
983 if (!xch)
984 break;
René Scharfe467d3482012-05-09 22:20:55 +0200985 if (xecfg->hunk_func(xch->i1, xche->i1 + xche->chg1 - xch->i1,
986 xch->i2, xche->i2 + xche->chg2 - xch->i2,
987 ecb->priv) < 0)
988 return -1;
989 }
990 return 0;
991}
Linus Torvalds34435462006-03-24 20:13:22 -0800992
Michał Kępień296d4a92020-10-20 08:48:09 +0200993static void xdl_mark_ignorable_lines(xdchange_t *xscr, xdfenv_t *xe, long flags)
Antoine Pelisse36617af2013-06-19 20:46:07 +0200994{
995 xdchange_t *xch;
996
997 for (xch = xscr; xch; xch = xch->next) {
998 int ignore = 1;
999 xrecord_t **rec;
1000 long i;
1001
1002 rec = &xe->xdf1.recs[xch->i1];
1003 for (i = 0; i < xch->chg1 && ignore; i++)
1004 ignore = xdl_blankline(rec[i]->ptr, rec[i]->size, flags);
1005
1006 rec = &xe->xdf2.recs[xch->i2];
1007 for (i = 0; i < xch->chg2 && ignore; i++)
1008 ignore = xdl_blankline(rec[i]->ptr, rec[i]->size, flags);
1009
1010 xch->ignore = ignore;
1011 }
1012}
1013
Michał Kępień296d4a92020-10-20 08:48:09 +02001014static int record_matches_regex(xrecord_t *rec, xpparam_t const *xpp) {
1015 regmatch_t regmatch;
1016 int i;
1017
1018 for (i = 0; i < xpp->ignore_regex_nr; i++)
1019 if (!regexec_buf(xpp->ignore_regex[i], rec->ptr, rec->size, 1,
1020 &regmatch, 0))
1021 return 1;
1022
1023 return 0;
1024}
1025
1026static void xdl_mark_ignorable_regex(xdchange_t *xscr, const xdfenv_t *xe,
1027 xpparam_t const *xpp)
1028{
1029 xdchange_t *xch;
1030
1031 for (xch = xscr; xch; xch = xch->next) {
1032 xrecord_t **rec;
1033 int ignore = 1;
1034 long i;
1035
1036 /*
1037 * Do not override --ignore-blank-lines.
1038 */
1039 if (xch->ignore)
1040 continue;
1041
1042 rec = &xe->xdf1.recs[xch->i1];
1043 for (i = 0; i < xch->chg1 && ignore; i++)
1044 ignore = record_matches_regex(rec[i], xpp);
1045
1046 rec = &xe->xdf2.recs[xch->i2];
1047 for (i = 0; i < xch->chg2 && ignore; i++)
1048 ignore = record_matches_regex(rec[i], xpp);
1049
1050 xch->ignore = ignore;
1051 }
1052}
1053
Linus Torvalds34435462006-03-24 20:13:22 -08001054int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
1055 xdemitconf_t const *xecfg, xdemitcb_t *ecb) {
1056 xdchange_t *xscr;
1057 xdfenv_t xe;
René Scharfe3319e602012-05-09 22:24:34 +02001058 emit_func_t ef = xecfg->hunk_func ? xdl_call_hunk_func : xdl_emit_diff;
René Scharfe467d3482012-05-09 22:20:55 +02001059
Linus Torvalds34435462006-03-24 20:13:22 -08001060 if (xdl_do_diff(mf1, mf2, xpp, &xe) < 0) {
1061
1062 return -1;
1063 }
Johannes Schindelin0d21efa2006-06-14 17:40:23 +02001064 if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 ||
1065 xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 ||
Davide Libenzi295ba2f2006-04-13 16:45:13 -07001066 xdl_build_script(&xe, &xscr) < 0) {
Linus Torvalds34435462006-03-24 20:13:22 -08001067
1068 xdl_free_env(&xe);
1069 return -1;
1070 }
Linus Torvalds34435462006-03-24 20:13:22 -08001071 if (xscr) {
Antoine Pelisse36617af2013-06-19 20:46:07 +02001072 if (xpp->flags & XDF_IGNORE_BLANK_LINES)
Michał Kępień296d4a92020-10-20 08:48:09 +02001073 xdl_mark_ignorable_lines(xscr, &xe, xpp->flags);
1074
1075 if (xpp->ignore_regex)
1076 xdl_mark_ignorable_regex(xscr, &xe, xpp);
Antoine Pelisse36617af2013-06-19 20:46:07 +02001077
Brian Downingef2e62f2008-10-25 15:30:54 +02001078 if (ef(&xe, xscr, ecb, xecfg) < 0) {
Linus Torvalds34435462006-03-24 20:13:22 -08001079
1080 xdl_free_script(xscr);
1081 xdl_free_env(&xe);
1082 return -1;
1083 }
Linus Torvalds34435462006-03-24 20:13:22 -08001084 xdl_free_script(xscr);
1085 }
Linus Torvalds34435462006-03-24 20:13:22 -08001086 xdl_free_env(&xe);
1087
1088 return 0;
1089}