blob: 886054729e5e343f664d81da5c5ad6b48cf06e8a [file] [log] [blame]
Derrick Stolee3964fc22021-03-30 13:10:47 +00001#include "cache.h"
Elijah Newren36bf1952023-02-24 00:09:24 +00002#include "alloc.h"
Elijah Newren32a8f512023-03-21 06:26:03 +00003#include "environment.h"
Elijah Newrenf394e092023-03-21 06:25:54 +00004#include "gettext.h"
Derrick Stolee3964fc22021-03-30 13:10:47 +00005#include "repository.h"
6#include "sparse-index.h"
Derrick Stolee4300f842021-03-30 13:10:48 +00007#include "tree.h"
8#include "pathspec.h"
9#include "trace2.h"
Derrick Stolee6e773522021-03-30 13:10:55 +000010#include "cache-tree.h"
11#include "config.h"
12#include "dir.h"
13#include "fsmonitor.h"
14
Derrick Stolee02439302022-05-23 13:48:43 +000015struct modify_index_context {
16 struct index_state *write;
17 struct pattern_list *pl;
18};
19
Derrick Stolee6e773522021-03-30 13:10:55 +000020static struct cache_entry *construct_sparse_dir_entry(
21 struct index_state *istate,
22 const char *sparse_dir,
23 struct cache_tree *tree)
24{
25 struct cache_entry *de;
26
27 de = make_cache_entry(istate, S_IFDIR, &tree->oid, sparse_dir, 0, 0);
28
29 de->ce_flags |= CE_SKIP_WORKTREE;
30 return de;
31}
32
33/*
34 * Returns the number of entries "inserted" into the index.
35 */
36static int convert_to_sparse_rec(struct index_state *istate,
37 int num_converted,
38 int start, int end,
39 const char *ct_path, size_t ct_pathlen,
40 struct cache_tree *ct)
41{
42 int i, can_convert = 1;
43 int start_converted = num_converted;
Derrick Stolee6e773522021-03-30 13:10:55 +000044 struct strbuf child_path = STRBUF_INIT;
Derrick Stolee6e773522021-03-30 13:10:55 +000045
46 /*
47 * Is the current path outside of the sparse cone?
48 * Then check if the region can be replaced by a sparse
49 * directory entry (everything is sparse and merged).
50 */
Derrick Stolee02155c82021-09-08 01:42:30 +000051 if (path_in_sparse_checkout(ct_path, istate))
Derrick Stolee6e773522021-03-30 13:10:55 +000052 can_convert = 0;
53
54 for (i = start; can_convert && i < end; i++) {
55 struct cache_entry *ce = istate->cache[i];
56
57 if (ce_stage(ce) ||
Derrick Stoleef4423132021-03-30 13:10:56 +000058 S_ISGITLINK(ce->ce_mode) ||
Derrick Stolee6e773522021-03-30 13:10:55 +000059 !(ce->ce_flags & CE_SKIP_WORKTREE))
60 can_convert = 0;
61 }
62
63 if (can_convert) {
64 struct cache_entry *se;
65 se = construct_sparse_dir_entry(istate, ct_path, ct);
66
67 istate->cache[num_converted++] = se;
68 return 1;
69 }
70
71 for (i = start; i < end; ) {
72 int count, span, pos = -1;
73 const char *base, *slash;
74 struct cache_entry *ce = istate->cache[i];
75
76 /*
77 * Detect if this is a normal entry outside of any subtree
78 * entry.
79 */
80 base = ce->name + ct_pathlen;
81 slash = strchr(base, '/');
82
83 if (slash)
84 pos = cache_tree_subtree_pos(ct, base, slash - base);
85
86 if (pos < 0) {
87 istate->cache[num_converted++] = ce;
88 i++;
89 continue;
90 }
91
92 strbuf_setlen(&child_path, 0);
93 strbuf_add(&child_path, ce->name, slash - ce->name + 1);
94
95 span = ct->down[pos]->cache_tree->entry_count;
96 count = convert_to_sparse_rec(istate,
97 num_converted, i, i + span,
98 child_path.buf, child_path.len,
99 ct->down[pos]->cache_tree);
100 num_converted += count;
101 i += span;
102 }
103
104 strbuf_release(&child_path);
105 return num_converted - start_converted;
106}
107
Ævar Arnfjörð Bjarmasonb79f9c02021-05-05 14:11:58 +0200108int set_sparse_index_config(struct repository *repo, int enable)
Derrick Stolee58300f42021-03-30 13:10:59 +0000109{
Derrick Stolee7316dc52022-02-07 21:33:01 +0000110 int res = repo_config_set_worktree_gently(repo,
111 "index.sparse",
112 enable ? "true" : "false");
Derrick Stolee58300f42021-03-30 13:10:59 +0000113 prepare_repo_settings(repo);
Derrick Stolee122ba1f2021-03-30 13:11:00 +0000114 repo->settings.sparse_index = enable;
115 return res;
Derrick Stolee58300f42021-03-30 13:10:59 +0000116}
117
Derrick Stoleefc6609d2021-07-14 13:12:25 +0000118static int index_has_unmerged_entries(struct index_state *istate)
119{
120 int i;
121 for (i = 0; i < istate->cache_nr; i++) {
122 if (ce_stage(istate->cache[i]))
123 return 1;
124 }
125
126 return 0;
127}
128
Victoria Dyecfde4cd2022-05-10 23:32:29 +0000129int is_sparse_index_allowed(struct index_state *istate, int flags)
Derrick Stolee6e773522021-03-30 13:10:55 +0000130{
Victoria Dyeb93fea02021-11-23 00:20:32 +0000131 if (!core_apply_sparse_checkout || !core_sparse_checkout_cone)
Derrick Stolee6e773522021-03-30 13:10:55 +0000132 return 0;
133
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000134 if (!(flags & SPARSE_INDEX_MEMORY_ONLY)) {
Victoria Dyeb93fea02021-11-23 00:20:32 +0000135 int test_env;
136
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000137 /*
138 * The sparse index is not (yet) integrated with a split index.
139 */
Johannes Schindelinae103c32022-01-19 17:29:37 +0000140 if (istate->split_index || git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000141 return 0;
142 /*
143 * The GIT_TEST_SPARSE_INDEX environment variable triggers the
144 * index.sparse config variable to be on.
145 */
146 test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1);
147 if (test_env >= 0)
148 set_sparse_index_config(istate->repo, test_env);
Derrick Stolee58300f42021-03-30 13:10:59 +0000149
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000150 /*
151 * Only convert to sparse if index.sparse is set.
152 */
153 prepare_repo_settings(istate->repo);
154 if (!istate->repo->settings.sparse_index)
Derrick Stolee6e773522021-03-30 13:10:55 +0000155 return 0;
156 }
157
Derrick Stolee02155c82021-09-08 01:42:30 +0000158 if (init_sparse_checkout_patterns(istate))
159 return 0;
Derrick Stolee6e773522021-03-30 13:10:55 +0000160
Derrick Stoleee27eab42021-09-08 01:42:26 +0000161 /*
162 * We need cone-mode patterns to use sparse-index. If a user edits
163 * their sparse-checkout file manually, then we can detect during
164 * parsing that they are not actually using cone-mode patterns and
165 * hence we need to abort this conversion _without error_. Warnings
166 * already exist in the pattern parsing to inform the user of their
167 * bad patterns.
168 */
169 if (!istate->sparse_checkout_patterns->use_cone_patterns)
170 return 0;
Derrick Stolee6e773522021-03-30 13:10:55 +0000171
Victoria Dyeb93fea02021-11-23 00:20:32 +0000172 return 1;
173}
174
175int convert_to_sparse(struct index_state *istate, int flags)
176{
177 /*
178 * If the index is already sparse, empty, or otherwise
179 * cannot be converted to sparse, do not convert.
180 */
Derrick Stolee9fadb372022-05-23 13:48:40 +0000181 if (istate->sparse_index == INDEX_COLLAPSED || !istate->cache_nr ||
Victoria Dyeb93fea02021-11-23 00:20:32 +0000182 !is_sparse_index_allowed(istate, flags))
183 return 0;
184
Derrick Stoleefc6609d2021-07-14 13:12:25 +0000185 /*
186 * NEEDSWORK: If we have unmerged entries, then stay full.
187 * Unmerged entries prevent the cache-tree extension from working.
188 */
189 if (index_has_unmerged_entries(istate))
190 return 0;
191
Victoria Dye13f69f32021-11-23 00:20:31 +0000192 if (!cache_tree_fully_valid(istate->cache_tree)) {
193 /* Clear and recompute the cache-tree */
194 cache_tree_free(&istate->cache_tree);
195
196 /*
197 * Silently return if there is a problem with the cache tree update,
198 * which might just be due to a conflict state in some entry.
199 *
200 * This might create new tree objects, so be sure to use
201 * WRITE_TREE_MISSING_OK.
202 */
203 if (cache_tree_update(istate, WRITE_TREE_MISSING_OK))
204 return 0;
205 }
Derrick Stolee6e773522021-03-30 13:10:55 +0000206
207 remove_fsmonitor(istate);
208
209 trace2_region_enter("index", "convert_to_sparse", istate->repo);
210 istate->cache_nr = convert_to_sparse_rec(istate,
211 0, 0, istate->cache_nr,
212 "", 0, istate->cache_tree);
Derrick Stolee2de37c52021-03-30 13:11:02 +0000213
214 /* Clear and recompute the cache-tree */
215 cache_tree_free(&istate->cache_tree);
216 cache_tree_update(istate, 0);
217
Derrick Stoleef8fe49e2021-07-14 13:12:39 +0000218 istate->fsmonitor_has_run_once = 0;
219 FREE_AND_NULL(istate->fsmonitor_dirty);
220 FREE_AND_NULL(istate->fsmonitor_last_update);
221
Derrick Stolee9fadb372022-05-23 13:48:40 +0000222 istate->sparse_index = INDEX_COLLAPSED;
Derrick Stolee6e773522021-03-30 13:10:55 +0000223 trace2_region_leave("index", "convert_to_sparse", istate->repo);
224 return 0;
225}
Derrick Stolee4300f842021-03-30 13:10:48 +0000226
227static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
228{
229 ALLOC_GROW(istate->cache, nr + 1, istate->cache_alloc);
230
231 istate->cache[nr] = ce;
232 add_name_hash(istate, ce);
233}
234
235static int add_path_to_index(const struct object_id *oid,
236 struct strbuf *base, const char *path,
237 unsigned int mode, void *context)
238{
Derrick Stolee02439302022-05-23 13:48:43 +0000239 struct modify_index_context *ctx = (struct modify_index_context *)context;
Derrick Stolee4300f842021-03-30 13:10:48 +0000240 struct cache_entry *ce;
241 size_t len = base->len;
242
Derrick Stolee02439302022-05-23 13:48:43 +0000243 if (S_ISDIR(mode)) {
244 int dtype;
245 size_t baselen = base->len;
246 if (!ctx->pl)
247 return READ_TREE_RECURSIVE;
Derrick Stolee4300f842021-03-30 13:10:48 +0000248
Derrick Stolee02439302022-05-23 13:48:43 +0000249 /*
250 * Have we expanded to a point outside of the sparse-checkout?
251 *
252 * Artificially pad the path name with a slash "/" to
253 * indicate it as a directory, and add an arbitrary file
254 * name ("-") so we can consider base->buf as a file name
255 * to match against the cone-mode patterns.
256 *
257 * If we compared just "path", then we would expand more
258 * than we should. Since every file at root is always
259 * included, we would expand every directory at root at
260 * least one level deep instead of using sparse directory
261 * entries.
262 */
263 strbuf_addstr(base, path);
264 strbuf_add(base, "/-", 2);
Derrick Stolee4300f842021-03-30 13:10:48 +0000265
Derrick Stolee02439302022-05-23 13:48:43 +0000266 if (path_matches_pattern_list(base->buf, base->len,
267 NULL, &dtype,
268 ctx->pl, ctx->write)) {
269 strbuf_setlen(base, baselen);
270 return READ_TREE_RECURSIVE;
271 }
272
273 /*
274 * The path "{base}{path}/" is a sparse directory. Create the correct
275 * name for inserting the entry into the index.
276 */
277 strbuf_setlen(base, base->len - 1);
278 } else {
279 strbuf_addstr(base, path);
280 }
281
282 ce = make_cache_entry(ctx->write, mode, oid, base->buf, 0, 0);
Derrick Stolee47410772021-07-14 13:12:26 +0000283 ce->ce_flags |= CE_SKIP_WORKTREE | CE_EXTENDED;
Derrick Stolee02439302022-05-23 13:48:43 +0000284 set_index_entry(ctx->write, ctx->write->cache_nr++, ce);
Derrick Stolee4300f842021-03-30 13:10:48 +0000285
286 strbuf_setlen(base, len);
287 return 0;
288}
Derrick Stolee3964fc22021-03-30 13:10:47 +0000289
Derrick Stoleedce241b2022-05-23 13:48:39 +0000290void expand_index(struct index_state *istate, struct pattern_list *pl)
Derrick Stolee3964fc22021-03-30 13:10:47 +0000291{
Derrick Stolee4300f842021-03-30 13:10:48 +0000292 int i;
293 struct index_state *full;
294 struct strbuf base = STRBUF_INIT;
Derrick Stoleedce241b2022-05-23 13:48:39 +0000295 const char *tr_region;
Derrick Stolee02439302022-05-23 13:48:43 +0000296 struct modify_index_context ctx;
Derrick Stolee4300f842021-03-30 13:10:48 +0000297
Derrick Stoleedce241b2022-05-23 13:48:39 +0000298 /*
299 * If the index is already full, then keep it full. We will convert
300 * it to a sparse index on write, if possible.
301 */
Ævar Arnfjörð Bjarmason29fefaf2023-01-12 13:55:25 +0100302 if (istate->sparse_index == INDEX_EXPANDED)
Derrick Stolee4300f842021-03-30 13:10:48 +0000303 return;
304
Derrick Stoleedce241b2022-05-23 13:48:39 +0000305 /*
306 * If our index is sparse, but our new pattern set does not use
307 * cone mode patterns, then we need to expand the index before we
308 * continue. A NULL pattern set indicates a full expansion to a
309 * full index.
310 */
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000311 if (pl && !pl->use_cone_patterns) {
Derrick Stoleedce241b2022-05-23 13:48:39 +0000312 pl = NULL;
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000313 } else {
314 /*
315 * We might contract file entries into sparse-directory
316 * entries, and for that we will need the cache tree to
317 * be recomputed.
318 */
319 cache_tree_free(&istate->cache_tree);
320
321 /*
322 * If there is a problem creating the cache tree, then we
323 * need to expand to a full index since we cannot satisfy
324 * the current request as a sparse index.
325 */
326 if (cache_tree_update(istate, 0))
327 pl = NULL;
328 }
Derrick Stoleedce241b2022-05-23 13:48:39 +0000329
Derrick Stoleedce241b2022-05-23 13:48:39 +0000330 /*
331 * A NULL pattern set indicates we are expanding a full index, so
332 * we use a special region name that indicates the full expansion.
333 * This is used by test cases, but also helps to differentiate the
334 * two cases.
335 */
336 tr_region = pl ? "expand_index" : "ensure_full_index";
337 trace2_region_enter("index", tr_region, istate->repo);
Derrick Stolee4300f842021-03-30 13:10:48 +0000338
339 /* initialize basics of new index */
340 full = xcalloc(1, sizeof(struct index_state));
341 memcpy(full, istate, sizeof(struct index_state));
342
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000343 /*
344 * This slightly-misnamed 'full' index might still be sparse if we
345 * are only modifying the list of sparse directories. This hinges
346 * on whether we have a non-NULL pattern list.
347 */
348 full->sparse_index = pl ? INDEX_PARTIALLY_SPARSE : INDEX_EXPANDED;
349
Derrick Stolee4300f842021-03-30 13:10:48 +0000350 /* then change the necessary things */
Derrick Stolee4300f842021-03-30 13:10:48 +0000351 full->cache_alloc = (3 * istate->cache_alloc) / 2;
352 full->cache_nr = 0;
353 ALLOC_ARRAY(full->cache, full->cache_alloc);
354
Derrick Stolee02439302022-05-23 13:48:43 +0000355 ctx.write = full;
356 ctx.pl = pl;
357
Derrick Stolee4300f842021-03-30 13:10:48 +0000358 for (i = 0; i < istate->cache_nr; i++) {
359 struct cache_entry *ce = istate->cache[i];
360 struct tree *tree;
361 struct pathspec ps;
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000362 int dtype;
Derrick Stolee4300f842021-03-30 13:10:48 +0000363
364 if (!S_ISSPARSEDIR(ce->ce_mode)) {
365 set_index_entry(full, full->cache_nr++, ce);
366 continue;
367 }
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000368
369 /* We now have a sparse directory entry. Should we expand? */
370 if (pl &&
371 path_matches_pattern_list(ce->name, ce->ce_namelen,
372 NULL, &dtype,
373 pl, istate) == NOT_MATCHED) {
374 set_index_entry(full, full->cache_nr++, ce);
375 continue;
376 }
377
Derrick Stolee4300f842021-03-30 13:10:48 +0000378 if (!(ce->ce_flags & CE_SKIP_WORKTREE))
379 warning(_("index entry is a directory, but not sparse (%08x)"),
380 ce->ce_flags);
381
382 /* recursively walk into cd->name */
383 tree = lookup_tree(istate->repo, &ce->oid);
384
385 memset(&ps, 0, sizeof(ps));
386 ps.recursive = 1;
387 ps.has_wildcard = 1;
388 ps.max_depth = -1;
389
390 strbuf_setlen(&base, 0);
391 strbuf_add(&base, ce->name, strlen(ce->name));
392
393 read_tree_at(istate->repo, tree, &base, &ps,
Derrick Stolee02439302022-05-23 13:48:43 +0000394 add_path_to_index, &ctx);
Derrick Stolee4300f842021-03-30 13:10:48 +0000395
396 /* free directory entries. full entries are re-used */
397 discard_cache_entry(ce);
398 }
399
400 /* Copy back into original index. */
401 memcpy(&istate->name_hash, &full->name_hash, sizeof(full->name_hash));
Jeff Hostetlerd9e9b442021-08-16 17:48:55 +0000402 memcpy(&istate->dir_hash, &full->dir_hash, sizeof(full->dir_hash));
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000403 istate->sparse_index = pl ? INDEX_PARTIALLY_SPARSE : INDEX_EXPANDED;
Derrick Stolee4300f842021-03-30 13:10:48 +0000404 free(istate->cache);
405 istate->cache = full->cache;
406 istate->cache_nr = full->cache_nr;
407 istate->cache_alloc = full->cache_alloc;
Derrick Stoleef8fe49e2021-07-14 13:12:39 +0000408 istate->fsmonitor_has_run_once = 0;
409 FREE_AND_NULL(istate->fsmonitor_dirty);
410 FREE_AND_NULL(istate->fsmonitor_last_update);
Derrick Stolee4300f842021-03-30 13:10:48 +0000411
412 strbuf_release(&base);
413 free(full);
414
Derrick Stolee2de37c52021-03-30 13:11:02 +0000415 /* Clear and recompute the cache-tree */
416 cache_tree_free(&istate->cache_tree);
417 cache_tree_update(istate, 0);
418
Derrick Stoleedce241b2022-05-23 13:48:39 +0000419 trace2_region_leave("index", tr_region, istate->repo);
420}
421
422void ensure_full_index(struct index_state *istate)
423{
Ævar Arnfjörð Bjarmason29fefaf2023-01-12 13:55:25 +0100424 if (!istate)
425 BUG("ensure_full_index() must get an index!");
Derrick Stoleedce241b2022-05-23 13:48:39 +0000426 expand_index(istate, NULL);
Derrick Stolee3964fc22021-03-30 13:10:47 +0000427}
Derrick Stolee71f82d02021-04-12 21:08:16 +0000428
Victoria Dyeb93fea02021-11-23 00:20:32 +0000429void ensure_correct_sparsity(struct index_state *istate)
430{
431 /*
432 * If the index can be sparse, make it sparse. Otherwise,
433 * ensure the index is full.
434 */
435 if (is_sparse_index_allowed(istate, 0))
436 convert_to_sparse(istate, 0);
437 else
438 ensure_full_index(istate);
439}
440
Elijah Newrend79d2992022-01-14 15:59:43 +0000441static int path_found(const char *path, const char **dirname, size_t *dir_len,
442 int *dir_found)
443{
444 struct stat st;
445 char *newdir;
446 char *tmp;
447
448 /*
449 * If dirname corresponds to a directory that doesn't exist, and this
450 * path starts with dirname, then path can't exist.
451 */
452 if (!*dir_found && !memcmp(path, *dirname, *dir_len))
453 return 0;
454
455 /*
456 * If path itself exists, return 1.
457 */
458 if (!lstat(path, &st))
459 return 1;
460
461 /*
462 * Otherwise, path does not exist so we'll return 0...but we'll first
463 * determine some info about its parent directory so we can avoid
464 * lstat calls for future cache entries.
465 */
466 newdir = strrchr(path, '/');
467 if (!newdir)
468 return 0; /* Didn't find a parent dir; just return 0 now. */
469
470 /*
471 * If path starts with directory (which we already lstat'ed and found),
472 * then no need to lstat parent directory again.
473 */
474 if (*dir_found && *dirname && memcmp(path, *dirname, *dir_len))
475 return 0;
476
477 /* Free previous dirname, and cache path's dirname */
478 *dirname = path;
479 *dir_len = newdir - path + 1;
480
481 tmp = xstrndup(path, *dir_len);
482 *dir_found = !lstat(tmp, &st);
483 free(tmp);
484
485 return 0;
486}
487
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000488void clear_skip_worktree_from_present_files(struct index_state *istate)
489{
Elijah Newrend79d2992022-01-14 15:59:43 +0000490 const char *last_dirname = NULL;
491 size_t dir_len = 0;
492 int dir_found = 1;
493
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000494 int i;
Anh Le89aaab12022-11-03 23:05:00 +0000495 int path_count[2] = {0, 0};
496 int restarted = 0;
Elijah Newrend79d2992022-01-14 15:59:43 +0000497
Elijah Newrenecc7c882022-02-25 22:12:22 -0800498 if (!core_apply_sparse_checkout ||
499 sparse_expect_files_outside_of_patterns)
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000500 return;
501
Anh Le89aaab12022-11-03 23:05:00 +0000502 trace2_region_enter("index", "clear_skip_worktree_from_present_files",
503 istate->repo);
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000504restart:
505 for (i = 0; i < istate->cache_nr; i++) {
506 struct cache_entry *ce = istate->cache[i];
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000507
Anh Le89aaab12022-11-03 23:05:00 +0000508 if (ce_skip_worktree(ce)) {
509 path_count[restarted]++;
510 if (path_found(ce->name, &last_dirname, &dir_len, &dir_found)) {
511 if (S_ISSPARSEDIR(ce->ce_mode)) {
Anh Le8c7abdc2022-11-03 23:05:01 +0000512 if (restarted)
513 BUG("ensure-full-index did not fully flatten?");
Anh Le89aaab12022-11-03 23:05:00 +0000514 ensure_full_index(istate);
515 restarted = 1;
516 goto restart;
517 }
518 ce->ce_flags &= ~CE_SKIP_WORKTREE;
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000519 }
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000520 }
521 }
Anh Le89aaab12022-11-03 23:05:00 +0000522
523 if (path_count[0])
524 trace2_data_intmax("index", istate->repo,
525 "sparse_path_count", path_count[0]);
526 if (restarted)
527 trace2_data_intmax("index", istate->repo,
528 "sparse_path_count_full", path_count[1]);
529 trace2_region_leave("index", "clear_skip_worktree_from_present_files",
530 istate->repo);
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000531}
532
Derrick Stolee71f82d02021-04-12 21:08:16 +0000533/*
534 * This static global helps avoid infinite recursion between
535 * expand_to_path() and index_file_exists().
536 */
537static int in_expand_to_path = 0;
538
539void expand_to_path(struct index_state *istate,
540 const char *path, size_t pathlen, int icase)
541{
542 struct strbuf path_mutable = STRBUF_INIT;
543 size_t substr_len;
544
545 /* prevent extra recursion */
546 if (in_expand_to_path)
547 return;
548
Ævar Arnfjörð Bjarmasond2cdf2c2023-01-12 13:55:24 +0100549 if (!istate->sparse_index)
Derrick Stolee71f82d02021-04-12 21:08:16 +0000550 return;
551
Derrick Stolee71f82d02021-04-12 21:08:16 +0000552 in_expand_to_path = 1;
553
554 /*
555 * We only need to actually expand a region if the
556 * following are both true:
557 *
558 * 1. 'path' is not already in the index.
559 * 2. Some parent directory of 'path' is a sparse directory.
560 */
561
562 if (index_file_exists(istate, path, pathlen, icase))
563 goto cleanup;
564
565 strbuf_add(&path_mutable, path, pathlen);
566 strbuf_addch(&path_mutable, '/');
567
568 /* Check the name hash for all parent directories */
569 substr_len = 0;
570 while (substr_len < pathlen) {
571 char temp;
572 char *replace = strchr(path_mutable.buf + substr_len, '/');
573
574 if (!replace)
575 break;
576
577 /* replace the character _after_ the slash */
578 replace++;
579 temp = *replace;
580 *replace = '\0';
581 if (index_file_exists(istate, path_mutable.buf,
582 path_mutable.len, icase)) {
583 /*
584 * We found a parent directory in the name-hash
585 * hashtable, because only sparse directory entries
586 * have a trailing '/' character. Since "path" wasn't
587 * in the index, perhaps it exists within this
588 * sparse-directory. Expand accordingly.
589 */
590 ensure_full_index(istate);
591 break;
592 }
593
594 *replace = temp;
595 substr_len = replace - path_mutable.buf;
596 }
597
598cleanup:
599 strbuf_release(&path_mutable);
600 in_expand_to_path = 0;
601}