blob: a881f851810f60cfd7a1a039adfade8f9cf7a332 [file] [log] [blame]
Derrick Stolee3964fc22021-03-30 13:10:47 +00001#include "cache.h"
2#include "repository.h"
3#include "sparse-index.h"
Derrick Stolee4300f842021-03-30 13:10:48 +00004#include "tree.h"
5#include "pathspec.h"
6#include "trace2.h"
Derrick Stolee6e773522021-03-30 13:10:55 +00007#include "cache-tree.h"
8#include "config.h"
9#include "dir.h"
10#include "fsmonitor.h"
11
Derrick Stolee02439302022-05-23 13:48:43 +000012struct modify_index_context {
13 struct index_state *write;
14 struct pattern_list *pl;
15};
16
Derrick Stolee6e773522021-03-30 13:10:55 +000017static struct cache_entry *construct_sparse_dir_entry(
18 struct index_state *istate,
19 const char *sparse_dir,
20 struct cache_tree *tree)
21{
22 struct cache_entry *de;
23
24 de = make_cache_entry(istate, S_IFDIR, &tree->oid, sparse_dir, 0, 0);
25
26 de->ce_flags |= CE_SKIP_WORKTREE;
27 return de;
28}
29
30/*
31 * Returns the number of entries "inserted" into the index.
32 */
33static int convert_to_sparse_rec(struct index_state *istate,
34 int num_converted,
35 int start, int end,
36 const char *ct_path, size_t ct_pathlen,
37 struct cache_tree *ct)
38{
39 int i, can_convert = 1;
40 int start_converted = num_converted;
Derrick Stolee6e773522021-03-30 13:10:55 +000041 struct strbuf child_path = STRBUF_INIT;
Derrick Stolee6e773522021-03-30 13:10:55 +000042
43 /*
44 * Is the current path outside of the sparse cone?
45 * Then check if the region can be replaced by a sparse
46 * directory entry (everything is sparse and merged).
47 */
Derrick Stolee02155c82021-09-08 01:42:30 +000048 if (path_in_sparse_checkout(ct_path, istate))
Derrick Stolee6e773522021-03-30 13:10:55 +000049 can_convert = 0;
50
51 for (i = start; can_convert && i < end; i++) {
52 struct cache_entry *ce = istate->cache[i];
53
54 if (ce_stage(ce) ||
Derrick Stoleef4423132021-03-30 13:10:56 +000055 S_ISGITLINK(ce->ce_mode) ||
Derrick Stolee6e773522021-03-30 13:10:55 +000056 !(ce->ce_flags & CE_SKIP_WORKTREE))
57 can_convert = 0;
58 }
59
60 if (can_convert) {
61 struct cache_entry *se;
62 se = construct_sparse_dir_entry(istate, ct_path, ct);
63
64 istate->cache[num_converted++] = se;
65 return 1;
66 }
67
68 for (i = start; i < end; ) {
69 int count, span, pos = -1;
70 const char *base, *slash;
71 struct cache_entry *ce = istate->cache[i];
72
73 /*
74 * Detect if this is a normal entry outside of any subtree
75 * entry.
76 */
77 base = ce->name + ct_pathlen;
78 slash = strchr(base, '/');
79
80 if (slash)
81 pos = cache_tree_subtree_pos(ct, base, slash - base);
82
83 if (pos < 0) {
84 istate->cache[num_converted++] = ce;
85 i++;
86 continue;
87 }
88
89 strbuf_setlen(&child_path, 0);
90 strbuf_add(&child_path, ce->name, slash - ce->name + 1);
91
92 span = ct->down[pos]->cache_tree->entry_count;
93 count = convert_to_sparse_rec(istate,
94 num_converted, i, i + span,
95 child_path.buf, child_path.len,
96 ct->down[pos]->cache_tree);
97 num_converted += count;
98 i += span;
99 }
100
101 strbuf_release(&child_path);
102 return num_converted - start_converted;
103}
104
Ævar Arnfjörð Bjarmasonb79f9c02021-05-05 14:11:58 +0200105int set_sparse_index_config(struct repository *repo, int enable)
Derrick Stolee58300f42021-03-30 13:10:59 +0000106{
Derrick Stolee7316dc52022-02-07 21:33:01 +0000107 int res = repo_config_set_worktree_gently(repo,
108 "index.sparse",
109 enable ? "true" : "false");
Derrick Stolee58300f42021-03-30 13:10:59 +0000110 prepare_repo_settings(repo);
Derrick Stolee122ba1f2021-03-30 13:11:00 +0000111 repo->settings.sparse_index = enable;
112 return res;
Derrick Stolee58300f42021-03-30 13:10:59 +0000113}
114
Derrick Stoleefc6609d2021-07-14 13:12:25 +0000115static int index_has_unmerged_entries(struct index_state *istate)
116{
117 int i;
118 for (i = 0; i < istate->cache_nr; i++) {
119 if (ce_stage(istate->cache[i]))
120 return 1;
121 }
122
123 return 0;
124}
125
Victoria Dyeb93fea02021-11-23 00:20:32 +0000126static int is_sparse_index_allowed(struct index_state *istate, int flags)
Derrick Stolee6e773522021-03-30 13:10:55 +0000127{
Victoria Dyeb93fea02021-11-23 00:20:32 +0000128 if (!core_apply_sparse_checkout || !core_sparse_checkout_cone)
Derrick Stolee6e773522021-03-30 13:10:55 +0000129 return 0;
130
Derrick Stolee58300f42021-03-30 13:10:59 +0000131 if (!istate->repo)
132 istate->repo = the_repository;
133
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000134 if (!(flags & SPARSE_INDEX_MEMORY_ONLY)) {
Victoria Dyeb93fea02021-11-23 00:20:32 +0000135 int test_env;
136
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000137 /*
138 * The sparse index is not (yet) integrated with a split index.
139 */
Johannes Schindelinae103c32022-01-19 17:29:37 +0000140 if (istate->split_index || git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000141 return 0;
142 /*
143 * The GIT_TEST_SPARSE_INDEX environment variable triggers the
144 * index.sparse config variable to be on.
145 */
146 test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1);
147 if (test_env >= 0)
148 set_sparse_index_config(istate->repo, test_env);
Derrick Stolee58300f42021-03-30 13:10:59 +0000149
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000150 /*
151 * Only convert to sparse if index.sparse is set.
152 */
153 prepare_repo_settings(istate->repo);
154 if (!istate->repo->settings.sparse_index)
Derrick Stolee6e773522021-03-30 13:10:55 +0000155 return 0;
156 }
157
Derrick Stolee02155c82021-09-08 01:42:30 +0000158 if (init_sparse_checkout_patterns(istate))
159 return 0;
Derrick Stolee6e773522021-03-30 13:10:55 +0000160
Derrick Stoleee27eab42021-09-08 01:42:26 +0000161 /*
162 * We need cone-mode patterns to use sparse-index. If a user edits
163 * their sparse-checkout file manually, then we can detect during
164 * parsing that they are not actually using cone-mode patterns and
165 * hence we need to abort this conversion _without error_. Warnings
166 * already exist in the pattern parsing to inform the user of their
167 * bad patterns.
168 */
169 if (!istate->sparse_checkout_patterns->use_cone_patterns)
170 return 0;
Derrick Stolee6e773522021-03-30 13:10:55 +0000171
Victoria Dyeb93fea02021-11-23 00:20:32 +0000172 return 1;
173}
174
175int convert_to_sparse(struct index_state *istate, int flags)
176{
177 /*
178 * If the index is already sparse, empty, or otherwise
179 * cannot be converted to sparse, do not convert.
180 */
Derrick Stolee9fadb372022-05-23 13:48:40 +0000181 if (istate->sparse_index == INDEX_COLLAPSED || !istate->cache_nr ||
Victoria Dyeb93fea02021-11-23 00:20:32 +0000182 !is_sparse_index_allowed(istate, flags))
183 return 0;
184
Derrick Stoleefc6609d2021-07-14 13:12:25 +0000185 /*
186 * NEEDSWORK: If we have unmerged entries, then stay full.
187 * Unmerged entries prevent the cache-tree extension from working.
188 */
189 if (index_has_unmerged_entries(istate))
190 return 0;
191
Victoria Dye13f69f32021-11-23 00:20:31 +0000192 if (!cache_tree_fully_valid(istate->cache_tree)) {
193 /* Clear and recompute the cache-tree */
194 cache_tree_free(&istate->cache_tree);
195
196 /*
197 * Silently return if there is a problem with the cache tree update,
198 * which might just be due to a conflict state in some entry.
199 *
200 * This might create new tree objects, so be sure to use
201 * WRITE_TREE_MISSING_OK.
202 */
203 if (cache_tree_update(istate, WRITE_TREE_MISSING_OK))
204 return 0;
205 }
Derrick Stolee6e773522021-03-30 13:10:55 +0000206
207 remove_fsmonitor(istate);
208
209 trace2_region_enter("index", "convert_to_sparse", istate->repo);
210 istate->cache_nr = convert_to_sparse_rec(istate,
211 0, 0, istate->cache_nr,
212 "", 0, istate->cache_tree);
Derrick Stolee2de37c52021-03-30 13:11:02 +0000213
214 /* Clear and recompute the cache-tree */
215 cache_tree_free(&istate->cache_tree);
216 cache_tree_update(istate, 0);
217
Derrick Stoleef8fe49e2021-07-14 13:12:39 +0000218 istate->fsmonitor_has_run_once = 0;
219 FREE_AND_NULL(istate->fsmonitor_dirty);
220 FREE_AND_NULL(istate->fsmonitor_last_update);
221
Derrick Stolee9fadb372022-05-23 13:48:40 +0000222 istate->sparse_index = INDEX_COLLAPSED;
Derrick Stolee6e773522021-03-30 13:10:55 +0000223 trace2_region_leave("index", "convert_to_sparse", istate->repo);
224 return 0;
225}
Derrick Stolee4300f842021-03-30 13:10:48 +0000226
227static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
228{
229 ALLOC_GROW(istate->cache, nr + 1, istate->cache_alloc);
230
231 istate->cache[nr] = ce;
232 add_name_hash(istate, ce);
233}
234
235static int add_path_to_index(const struct object_id *oid,
236 struct strbuf *base, const char *path,
237 unsigned int mode, void *context)
238{
Derrick Stolee02439302022-05-23 13:48:43 +0000239 struct modify_index_context *ctx = (struct modify_index_context *)context;
Derrick Stolee4300f842021-03-30 13:10:48 +0000240 struct cache_entry *ce;
241 size_t len = base->len;
242
Derrick Stolee02439302022-05-23 13:48:43 +0000243 if (S_ISDIR(mode)) {
244 int dtype;
245 size_t baselen = base->len;
246 if (!ctx->pl)
247 return READ_TREE_RECURSIVE;
Derrick Stolee4300f842021-03-30 13:10:48 +0000248
Derrick Stolee02439302022-05-23 13:48:43 +0000249 /*
250 * Have we expanded to a point outside of the sparse-checkout?
251 *
252 * Artificially pad the path name with a slash "/" to
253 * indicate it as a directory, and add an arbitrary file
254 * name ("-") so we can consider base->buf as a file name
255 * to match against the cone-mode patterns.
256 *
257 * If we compared just "path", then we would expand more
258 * than we should. Since every file at root is always
259 * included, we would expand every directory at root at
260 * least one level deep instead of using sparse directory
261 * entries.
262 */
263 strbuf_addstr(base, path);
264 strbuf_add(base, "/-", 2);
Derrick Stolee4300f842021-03-30 13:10:48 +0000265
Derrick Stolee02439302022-05-23 13:48:43 +0000266 if (path_matches_pattern_list(base->buf, base->len,
267 NULL, &dtype,
268 ctx->pl, ctx->write)) {
269 strbuf_setlen(base, baselen);
270 return READ_TREE_RECURSIVE;
271 }
272
273 /*
274 * The path "{base}{path}/" is a sparse directory. Create the correct
275 * name for inserting the entry into the index.
276 */
277 strbuf_setlen(base, base->len - 1);
278 } else {
279 strbuf_addstr(base, path);
280 }
281
282 ce = make_cache_entry(ctx->write, mode, oid, base->buf, 0, 0);
Derrick Stolee47410772021-07-14 13:12:26 +0000283 ce->ce_flags |= CE_SKIP_WORKTREE | CE_EXTENDED;
Derrick Stolee02439302022-05-23 13:48:43 +0000284 set_index_entry(ctx->write, ctx->write->cache_nr++, ce);
Derrick Stolee4300f842021-03-30 13:10:48 +0000285
286 strbuf_setlen(base, len);
287 return 0;
288}
Derrick Stolee3964fc22021-03-30 13:10:47 +0000289
Derrick Stoleedce241b2022-05-23 13:48:39 +0000290void expand_index(struct index_state *istate, struct pattern_list *pl)
Derrick Stolee3964fc22021-03-30 13:10:47 +0000291{
Derrick Stolee4300f842021-03-30 13:10:48 +0000292 int i;
293 struct index_state *full;
294 struct strbuf base = STRBUF_INIT;
Derrick Stoleedce241b2022-05-23 13:48:39 +0000295 const char *tr_region;
Derrick Stolee02439302022-05-23 13:48:43 +0000296 struct modify_index_context ctx;
Derrick Stolee4300f842021-03-30 13:10:48 +0000297
Derrick Stoleedce241b2022-05-23 13:48:39 +0000298 /*
299 * If the index is already full, then keep it full. We will convert
300 * it to a sparse index on write, if possible.
301 */
Derrick Stolee9fadb372022-05-23 13:48:40 +0000302 if (!istate || istate->sparse_index == INDEX_EXPANDED)
Derrick Stolee4300f842021-03-30 13:10:48 +0000303 return;
304
Derrick Stoleedce241b2022-05-23 13:48:39 +0000305 /*
306 * If our index is sparse, but our new pattern set does not use
307 * cone mode patterns, then we need to expand the index before we
308 * continue. A NULL pattern set indicates a full expansion to a
309 * full index.
310 */
311 if (pl && !pl->use_cone_patterns)
312 pl = NULL;
313
Derrick Stolee4300f842021-03-30 13:10:48 +0000314 if (!istate->repo)
315 istate->repo = the_repository;
316
Derrick Stoleedce241b2022-05-23 13:48:39 +0000317 /*
318 * A NULL pattern set indicates we are expanding a full index, so
319 * we use a special region name that indicates the full expansion.
320 * This is used by test cases, but also helps to differentiate the
321 * two cases.
322 */
323 tr_region = pl ? "expand_index" : "ensure_full_index";
324 trace2_region_enter("index", tr_region, istate->repo);
Derrick Stolee4300f842021-03-30 13:10:48 +0000325
326 /* initialize basics of new index */
327 full = xcalloc(1, sizeof(struct index_state));
328 memcpy(full, istate, sizeof(struct index_state));
329
330 /* then change the necessary things */
331 full->sparse_index = 0;
332 full->cache_alloc = (3 * istate->cache_alloc) / 2;
333 full->cache_nr = 0;
334 ALLOC_ARRAY(full->cache, full->cache_alloc);
335
Derrick Stolee02439302022-05-23 13:48:43 +0000336 ctx.write = full;
337 ctx.pl = pl;
338
Derrick Stolee4300f842021-03-30 13:10:48 +0000339 for (i = 0; i < istate->cache_nr; i++) {
340 struct cache_entry *ce = istate->cache[i];
341 struct tree *tree;
342 struct pathspec ps;
343
344 if (!S_ISSPARSEDIR(ce->ce_mode)) {
345 set_index_entry(full, full->cache_nr++, ce);
346 continue;
347 }
348 if (!(ce->ce_flags & CE_SKIP_WORKTREE))
349 warning(_("index entry is a directory, but not sparse (%08x)"),
350 ce->ce_flags);
351
352 /* recursively walk into cd->name */
353 tree = lookup_tree(istate->repo, &ce->oid);
354
355 memset(&ps, 0, sizeof(ps));
356 ps.recursive = 1;
357 ps.has_wildcard = 1;
358 ps.max_depth = -1;
359
360 strbuf_setlen(&base, 0);
361 strbuf_add(&base, ce->name, strlen(ce->name));
362
363 read_tree_at(istate->repo, tree, &base, &ps,
Derrick Stolee02439302022-05-23 13:48:43 +0000364 add_path_to_index, &ctx);
Derrick Stolee4300f842021-03-30 13:10:48 +0000365
366 /* free directory entries. full entries are re-used */
367 discard_cache_entry(ce);
368 }
369
370 /* Copy back into original index. */
371 memcpy(&istate->name_hash, &full->name_hash, sizeof(full->name_hash));
Jeff Hostetlerd9e9b442021-08-16 17:48:55 +0000372 memcpy(&istate->dir_hash, &full->dir_hash, sizeof(full->dir_hash));
Derrick Stolee4300f842021-03-30 13:10:48 +0000373 istate->sparse_index = 0;
374 free(istate->cache);
375 istate->cache = full->cache;
376 istate->cache_nr = full->cache_nr;
377 istate->cache_alloc = full->cache_alloc;
Derrick Stoleef8fe49e2021-07-14 13:12:39 +0000378 istate->fsmonitor_has_run_once = 0;
379 FREE_AND_NULL(istate->fsmonitor_dirty);
380 FREE_AND_NULL(istate->fsmonitor_last_update);
Derrick Stolee4300f842021-03-30 13:10:48 +0000381
382 strbuf_release(&base);
383 free(full);
384
Derrick Stolee2de37c52021-03-30 13:11:02 +0000385 /* Clear and recompute the cache-tree */
386 cache_tree_free(&istate->cache_tree);
387 cache_tree_update(istate, 0);
388
Derrick Stoleedce241b2022-05-23 13:48:39 +0000389 trace2_region_leave("index", tr_region, istate->repo);
390}
391
392void ensure_full_index(struct index_state *istate)
393{
394 expand_index(istate, NULL);
Derrick Stolee3964fc22021-03-30 13:10:47 +0000395}
Derrick Stolee71f82d02021-04-12 21:08:16 +0000396
Victoria Dyeb93fea02021-11-23 00:20:32 +0000397void ensure_correct_sparsity(struct index_state *istate)
398{
399 /*
400 * If the index can be sparse, make it sparse. Otherwise,
401 * ensure the index is full.
402 */
403 if (is_sparse_index_allowed(istate, 0))
404 convert_to_sparse(istate, 0);
405 else
406 ensure_full_index(istate);
407}
408
Elijah Newrend79d2992022-01-14 15:59:43 +0000409static int path_found(const char *path, const char **dirname, size_t *dir_len,
410 int *dir_found)
411{
412 struct stat st;
413 char *newdir;
414 char *tmp;
415
416 /*
417 * If dirname corresponds to a directory that doesn't exist, and this
418 * path starts with dirname, then path can't exist.
419 */
420 if (!*dir_found && !memcmp(path, *dirname, *dir_len))
421 return 0;
422
423 /*
424 * If path itself exists, return 1.
425 */
426 if (!lstat(path, &st))
427 return 1;
428
429 /*
430 * Otherwise, path does not exist so we'll return 0...but we'll first
431 * determine some info about its parent directory so we can avoid
432 * lstat calls for future cache entries.
433 */
434 newdir = strrchr(path, '/');
435 if (!newdir)
436 return 0; /* Didn't find a parent dir; just return 0 now. */
437
438 /*
439 * If path starts with directory (which we already lstat'ed and found),
440 * then no need to lstat parent directory again.
441 */
442 if (*dir_found && *dirname && memcmp(path, *dirname, *dir_len))
443 return 0;
444
445 /* Free previous dirname, and cache path's dirname */
446 *dirname = path;
447 *dir_len = newdir - path + 1;
448
449 tmp = xstrndup(path, *dir_len);
450 *dir_found = !lstat(tmp, &st);
451 free(tmp);
452
453 return 0;
454}
455
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000456void clear_skip_worktree_from_present_files(struct index_state *istate)
457{
Elijah Newrend79d2992022-01-14 15:59:43 +0000458 const char *last_dirname = NULL;
459 size_t dir_len = 0;
460 int dir_found = 1;
461
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000462 int i;
Elijah Newrend79d2992022-01-14 15:59:43 +0000463
Elijah Newrenecc7c882022-02-25 22:12:22 -0800464 if (!core_apply_sparse_checkout ||
465 sparse_expect_files_outside_of_patterns)
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000466 return;
467
468restart:
469 for (i = 0; i < istate->cache_nr; i++) {
470 struct cache_entry *ce = istate->cache[i];
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000471
Elijah Newrend79d2992022-01-14 15:59:43 +0000472 if (ce_skip_worktree(ce) &&
473 path_found(ce->name, &last_dirname, &dir_len, &dir_found)) {
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000474 if (S_ISSPARSEDIR(ce->ce_mode)) {
475 ensure_full_index(istate);
476 goto restart;
477 }
478 ce->ce_flags &= ~CE_SKIP_WORKTREE;
479 }
480 }
481}
482
Derrick Stolee71f82d02021-04-12 21:08:16 +0000483/*
484 * This static global helps avoid infinite recursion between
485 * expand_to_path() and index_file_exists().
486 */
487static int in_expand_to_path = 0;
488
489void expand_to_path(struct index_state *istate,
490 const char *path, size_t pathlen, int icase)
491{
492 struct strbuf path_mutable = STRBUF_INIT;
493 size_t substr_len;
494
495 /* prevent extra recursion */
496 if (in_expand_to_path)
497 return;
498
499 if (!istate || !istate->sparse_index)
500 return;
501
502 if (!istate->repo)
503 istate->repo = the_repository;
504
505 in_expand_to_path = 1;
506
507 /*
508 * We only need to actually expand a region if the
509 * following are both true:
510 *
511 * 1. 'path' is not already in the index.
512 * 2. Some parent directory of 'path' is a sparse directory.
513 */
514
515 if (index_file_exists(istate, path, pathlen, icase))
516 goto cleanup;
517
518 strbuf_add(&path_mutable, path, pathlen);
519 strbuf_addch(&path_mutable, '/');
520
521 /* Check the name hash for all parent directories */
522 substr_len = 0;
523 while (substr_len < pathlen) {
524 char temp;
525 char *replace = strchr(path_mutable.buf + substr_len, '/');
526
527 if (!replace)
528 break;
529
530 /* replace the character _after_ the slash */
531 replace++;
532 temp = *replace;
533 *replace = '\0';
534 if (index_file_exists(istate, path_mutable.buf,
535 path_mutable.len, icase)) {
536 /*
537 * We found a parent directory in the name-hash
538 * hashtable, because only sparse directory entries
539 * have a trailing '/' character. Since "path" wasn't
540 * in the index, perhaps it exists within this
541 * sparse-directory. Expand accordingly.
542 */
543 ensure_full_index(istate);
544 break;
545 }
546
547 *replace = temp;
548 substr_len = replace - path_mutable.buf;
549 }
550
551cleanup:
552 strbuf_release(&path_mutable);
553 in_expand_to_path = 0;
554}