blob: 3578feb28376e308ebb1ab1adc069c6860bbe72b [file] [log] [blame]
Elijah Newrenbc5c5ec2023-05-16 06:33:57 +00001#include "git-compat-util.h"
Elijah Newren32a8f512023-03-21 06:26:03 +00002#include "environment.h"
Elijah Newrenf394e092023-03-21 06:25:54 +00003#include "gettext.h"
Elijah Newrenf5653852023-05-16 06:33:50 +00004#include "name-hash.h"
Elijah Newren68d68642023-05-16 06:34:08 +00005#include "read-cache-ll.h"
Derrick Stolee3964fc22021-03-30 13:10:47 +00006#include "repository.h"
7#include "sparse-index.h"
Derrick Stolee4300f842021-03-30 13:10:48 +00008#include "tree.h"
9#include "pathspec.h"
10#include "trace2.h"
Derrick Stolee6e773522021-03-30 13:10:55 +000011#include "cache-tree.h"
12#include "config.h"
13#include "dir.h"
Elijah Newren68d68642023-05-16 06:34:08 +000014#include "fsmonitor-ll.h"
Derrick Stolee6e773522021-03-30 13:10:55 +000015
Derrick Stolee02439302022-05-23 13:48:43 +000016struct modify_index_context {
17 struct index_state *write;
18 struct pattern_list *pl;
19};
20
Derrick Stolee6e773522021-03-30 13:10:55 +000021static struct cache_entry *construct_sparse_dir_entry(
22 struct index_state *istate,
23 const char *sparse_dir,
24 struct cache_tree *tree)
25{
26 struct cache_entry *de;
27
28 de = make_cache_entry(istate, S_IFDIR, &tree->oid, sparse_dir, 0, 0);
29
30 de->ce_flags |= CE_SKIP_WORKTREE;
31 return de;
32}
33
34/*
35 * Returns the number of entries "inserted" into the index.
36 */
37static int convert_to_sparse_rec(struct index_state *istate,
38 int num_converted,
39 int start, int end,
40 const char *ct_path, size_t ct_pathlen,
41 struct cache_tree *ct)
42{
43 int i, can_convert = 1;
44 int start_converted = num_converted;
Derrick Stolee6e773522021-03-30 13:10:55 +000045 struct strbuf child_path = STRBUF_INIT;
Derrick Stolee6e773522021-03-30 13:10:55 +000046
47 /*
48 * Is the current path outside of the sparse cone?
49 * Then check if the region can be replaced by a sparse
50 * directory entry (everything is sparse and merged).
51 */
Derrick Stolee02155c82021-09-08 01:42:30 +000052 if (path_in_sparse_checkout(ct_path, istate))
Derrick Stolee6e773522021-03-30 13:10:55 +000053 can_convert = 0;
54
55 for (i = start; can_convert && i < end; i++) {
56 struct cache_entry *ce = istate->cache[i];
57
58 if (ce_stage(ce) ||
Derrick Stoleef4423132021-03-30 13:10:56 +000059 S_ISGITLINK(ce->ce_mode) ||
Derrick Stolee6e773522021-03-30 13:10:55 +000060 !(ce->ce_flags & CE_SKIP_WORKTREE))
61 can_convert = 0;
62 }
63
64 if (can_convert) {
65 struct cache_entry *se;
66 se = construct_sparse_dir_entry(istate, ct_path, ct);
67
68 istate->cache[num_converted++] = se;
69 return 1;
70 }
71
72 for (i = start; i < end; ) {
73 int count, span, pos = -1;
74 const char *base, *slash;
75 struct cache_entry *ce = istate->cache[i];
76
77 /*
78 * Detect if this is a normal entry outside of any subtree
79 * entry.
80 */
81 base = ce->name + ct_pathlen;
82 slash = strchr(base, '/');
83
84 if (slash)
85 pos = cache_tree_subtree_pos(ct, base, slash - base);
86
87 if (pos < 0) {
88 istate->cache[num_converted++] = ce;
89 i++;
90 continue;
91 }
92
93 strbuf_setlen(&child_path, 0);
94 strbuf_add(&child_path, ce->name, slash - ce->name + 1);
95
96 span = ct->down[pos]->cache_tree->entry_count;
97 count = convert_to_sparse_rec(istate,
98 num_converted, i, i + span,
99 child_path.buf, child_path.len,
100 ct->down[pos]->cache_tree);
101 num_converted += count;
102 i += span;
103 }
104
105 strbuf_release(&child_path);
106 return num_converted - start_converted;
107}
108
Ævar Arnfjörð Bjarmasonb79f9c02021-05-05 14:11:58 +0200109int set_sparse_index_config(struct repository *repo, int enable)
Derrick Stolee58300f42021-03-30 13:10:59 +0000110{
Derrick Stolee7316dc52022-02-07 21:33:01 +0000111 int res = repo_config_set_worktree_gently(repo,
112 "index.sparse",
113 enable ? "true" : "false");
Derrick Stolee58300f42021-03-30 13:10:59 +0000114 prepare_repo_settings(repo);
Derrick Stolee122ba1f2021-03-30 13:11:00 +0000115 repo->settings.sparse_index = enable;
116 return res;
Derrick Stolee58300f42021-03-30 13:10:59 +0000117}
118
Derrick Stoleefc6609d2021-07-14 13:12:25 +0000119static int index_has_unmerged_entries(struct index_state *istate)
120{
121 int i;
122 for (i = 0; i < istate->cache_nr; i++) {
123 if (ce_stage(istate->cache[i]))
124 return 1;
125 }
126
127 return 0;
128}
129
Victoria Dyecfde4cd2022-05-10 23:32:29 +0000130int is_sparse_index_allowed(struct index_state *istate, int flags)
Derrick Stolee6e773522021-03-30 13:10:55 +0000131{
Victoria Dyeb93fea02021-11-23 00:20:32 +0000132 if (!core_apply_sparse_checkout || !core_sparse_checkout_cone)
Derrick Stolee6e773522021-03-30 13:10:55 +0000133 return 0;
134
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000135 if (!(flags & SPARSE_INDEX_MEMORY_ONLY)) {
Victoria Dyeb93fea02021-11-23 00:20:32 +0000136 int test_env;
137
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000138 /*
139 * The sparse index is not (yet) integrated with a split index.
140 */
Johannes Schindelinae103c32022-01-19 17:29:37 +0000141 if (istate->split_index || git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000142 return 0;
143 /*
144 * The GIT_TEST_SPARSE_INDEX environment variable triggers the
145 * index.sparse config variable to be on.
146 */
147 test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1);
148 if (test_env >= 0)
149 set_sparse_index_config(istate->repo, test_env);
Derrick Stolee58300f42021-03-30 13:10:59 +0000150
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000151 /*
152 * Only convert to sparse if index.sparse is set.
153 */
154 prepare_repo_settings(istate->repo);
155 if (!istate->repo->settings.sparse_index)
Derrick Stolee6e773522021-03-30 13:10:55 +0000156 return 0;
157 }
158
Derrick Stolee02155c82021-09-08 01:42:30 +0000159 if (init_sparse_checkout_patterns(istate))
160 return 0;
Derrick Stolee6e773522021-03-30 13:10:55 +0000161
Derrick Stoleee27eab42021-09-08 01:42:26 +0000162 /*
163 * We need cone-mode patterns to use sparse-index. If a user edits
164 * their sparse-checkout file manually, then we can detect during
165 * parsing that they are not actually using cone-mode patterns and
166 * hence we need to abort this conversion _without error_. Warnings
167 * already exist in the pattern parsing to inform the user of their
168 * bad patterns.
169 */
170 if (!istate->sparse_checkout_patterns->use_cone_patterns)
171 return 0;
Derrick Stolee6e773522021-03-30 13:10:55 +0000172
Victoria Dyeb93fea02021-11-23 00:20:32 +0000173 return 1;
174}
175
176int convert_to_sparse(struct index_state *istate, int flags)
177{
178 /*
179 * If the index is already sparse, empty, or otherwise
180 * cannot be converted to sparse, do not convert.
181 */
Derrick Stolee9fadb372022-05-23 13:48:40 +0000182 if (istate->sparse_index == INDEX_COLLAPSED || !istate->cache_nr ||
Victoria Dyeb93fea02021-11-23 00:20:32 +0000183 !is_sparse_index_allowed(istate, flags))
184 return 0;
185
Derrick Stoleefc6609d2021-07-14 13:12:25 +0000186 /*
187 * NEEDSWORK: If we have unmerged entries, then stay full.
188 * Unmerged entries prevent the cache-tree extension from working.
189 */
190 if (index_has_unmerged_entries(istate))
191 return 0;
192
Victoria Dye13f69f32021-11-23 00:20:31 +0000193 if (!cache_tree_fully_valid(istate->cache_tree)) {
194 /* Clear and recompute the cache-tree */
195 cache_tree_free(&istate->cache_tree);
196
197 /*
198 * Silently return if there is a problem with the cache tree update,
199 * which might just be due to a conflict state in some entry.
200 *
201 * This might create new tree objects, so be sure to use
202 * WRITE_TREE_MISSING_OK.
203 */
204 if (cache_tree_update(istate, WRITE_TREE_MISSING_OK))
205 return 0;
206 }
Derrick Stolee6e773522021-03-30 13:10:55 +0000207
208 remove_fsmonitor(istate);
209
210 trace2_region_enter("index", "convert_to_sparse", istate->repo);
211 istate->cache_nr = convert_to_sparse_rec(istate,
212 0, 0, istate->cache_nr,
213 "", 0, istate->cache_tree);
Derrick Stolee2de37c52021-03-30 13:11:02 +0000214
215 /* Clear and recompute the cache-tree */
216 cache_tree_free(&istate->cache_tree);
217 cache_tree_update(istate, 0);
218
Derrick Stoleef8fe49e2021-07-14 13:12:39 +0000219 istate->fsmonitor_has_run_once = 0;
220 FREE_AND_NULL(istate->fsmonitor_dirty);
221 FREE_AND_NULL(istate->fsmonitor_last_update);
222
Derrick Stolee9fadb372022-05-23 13:48:40 +0000223 istate->sparse_index = INDEX_COLLAPSED;
Derrick Stolee6e773522021-03-30 13:10:55 +0000224 trace2_region_leave("index", "convert_to_sparse", istate->repo);
225 return 0;
226}
Derrick Stolee4300f842021-03-30 13:10:48 +0000227
228static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
229{
230 ALLOC_GROW(istate->cache, nr + 1, istate->cache_alloc);
231
232 istate->cache[nr] = ce;
233 add_name_hash(istate, ce);
234}
235
236static int add_path_to_index(const struct object_id *oid,
237 struct strbuf *base, const char *path,
238 unsigned int mode, void *context)
239{
Derrick Stolee02439302022-05-23 13:48:43 +0000240 struct modify_index_context *ctx = (struct modify_index_context *)context;
Derrick Stolee4300f842021-03-30 13:10:48 +0000241 struct cache_entry *ce;
242 size_t len = base->len;
243
Derrick Stolee02439302022-05-23 13:48:43 +0000244 if (S_ISDIR(mode)) {
245 int dtype;
246 size_t baselen = base->len;
247 if (!ctx->pl)
248 return READ_TREE_RECURSIVE;
Derrick Stolee4300f842021-03-30 13:10:48 +0000249
Derrick Stolee02439302022-05-23 13:48:43 +0000250 /*
251 * Have we expanded to a point outside of the sparse-checkout?
252 *
253 * Artificially pad the path name with a slash "/" to
254 * indicate it as a directory, and add an arbitrary file
255 * name ("-") so we can consider base->buf as a file name
256 * to match against the cone-mode patterns.
257 *
258 * If we compared just "path", then we would expand more
259 * than we should. Since every file at root is always
260 * included, we would expand every directory at root at
261 * least one level deep instead of using sparse directory
262 * entries.
263 */
264 strbuf_addstr(base, path);
265 strbuf_add(base, "/-", 2);
Derrick Stolee4300f842021-03-30 13:10:48 +0000266
Derrick Stolee02439302022-05-23 13:48:43 +0000267 if (path_matches_pattern_list(base->buf, base->len,
268 NULL, &dtype,
269 ctx->pl, ctx->write)) {
270 strbuf_setlen(base, baselen);
271 return READ_TREE_RECURSIVE;
272 }
273
274 /*
275 * The path "{base}{path}/" is a sparse directory. Create the correct
276 * name for inserting the entry into the index.
277 */
278 strbuf_setlen(base, base->len - 1);
279 } else {
280 strbuf_addstr(base, path);
281 }
282
283 ce = make_cache_entry(ctx->write, mode, oid, base->buf, 0, 0);
Derrick Stolee47410772021-07-14 13:12:26 +0000284 ce->ce_flags |= CE_SKIP_WORKTREE | CE_EXTENDED;
Derrick Stolee02439302022-05-23 13:48:43 +0000285 set_index_entry(ctx->write, ctx->write->cache_nr++, ce);
Derrick Stolee4300f842021-03-30 13:10:48 +0000286
287 strbuf_setlen(base, len);
288 return 0;
289}
Derrick Stolee3964fc22021-03-30 13:10:47 +0000290
Derrick Stoleedce241b2022-05-23 13:48:39 +0000291void expand_index(struct index_state *istate, struct pattern_list *pl)
Derrick Stolee3964fc22021-03-30 13:10:47 +0000292{
Derrick Stolee4300f842021-03-30 13:10:48 +0000293 int i;
294 struct index_state *full;
295 struct strbuf base = STRBUF_INIT;
Derrick Stoleedce241b2022-05-23 13:48:39 +0000296 const char *tr_region;
Derrick Stolee02439302022-05-23 13:48:43 +0000297 struct modify_index_context ctx;
Derrick Stolee4300f842021-03-30 13:10:48 +0000298
Derrick Stoleedce241b2022-05-23 13:48:39 +0000299 /*
300 * If the index is already full, then keep it full. We will convert
301 * it to a sparse index on write, if possible.
302 */
Ævar Arnfjörð Bjarmason29fefaf2023-01-12 13:55:25 +0100303 if (istate->sparse_index == INDEX_EXPANDED)
Derrick Stolee4300f842021-03-30 13:10:48 +0000304 return;
305
Derrick Stoleedce241b2022-05-23 13:48:39 +0000306 /*
307 * If our index is sparse, but our new pattern set does not use
308 * cone mode patterns, then we need to expand the index before we
309 * continue. A NULL pattern set indicates a full expansion to a
310 * full index.
311 */
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000312 if (pl && !pl->use_cone_patterns) {
Derrick Stoleedce241b2022-05-23 13:48:39 +0000313 pl = NULL;
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000314 } else {
315 /*
316 * We might contract file entries into sparse-directory
317 * entries, and for that we will need the cache tree to
318 * be recomputed.
319 */
320 cache_tree_free(&istate->cache_tree);
321
322 /*
323 * If there is a problem creating the cache tree, then we
324 * need to expand to a full index since we cannot satisfy
325 * the current request as a sparse index.
326 */
327 if (cache_tree_update(istate, 0))
328 pl = NULL;
329 }
Derrick Stoleedce241b2022-05-23 13:48:39 +0000330
Derrick Stoleedce241b2022-05-23 13:48:39 +0000331 /*
332 * A NULL pattern set indicates we are expanding a full index, so
333 * we use a special region name that indicates the full expansion.
334 * This is used by test cases, but also helps to differentiate the
335 * two cases.
336 */
337 tr_region = pl ? "expand_index" : "ensure_full_index";
338 trace2_region_enter("index", tr_region, istate->repo);
Derrick Stolee4300f842021-03-30 13:10:48 +0000339
340 /* initialize basics of new index */
341 full = xcalloc(1, sizeof(struct index_state));
342 memcpy(full, istate, sizeof(struct index_state));
343
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000344 /*
345 * This slightly-misnamed 'full' index might still be sparse if we
346 * are only modifying the list of sparse directories. This hinges
347 * on whether we have a non-NULL pattern list.
348 */
349 full->sparse_index = pl ? INDEX_PARTIALLY_SPARSE : INDEX_EXPANDED;
350
Derrick Stolee4300f842021-03-30 13:10:48 +0000351 /* then change the necessary things */
Derrick Stolee4300f842021-03-30 13:10:48 +0000352 full->cache_alloc = (3 * istate->cache_alloc) / 2;
353 full->cache_nr = 0;
354 ALLOC_ARRAY(full->cache, full->cache_alloc);
355
Derrick Stolee02439302022-05-23 13:48:43 +0000356 ctx.write = full;
357 ctx.pl = pl;
358
Derrick Stolee4300f842021-03-30 13:10:48 +0000359 for (i = 0; i < istate->cache_nr; i++) {
360 struct cache_entry *ce = istate->cache[i];
361 struct tree *tree;
362 struct pathspec ps;
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000363 int dtype;
Derrick Stolee4300f842021-03-30 13:10:48 +0000364
365 if (!S_ISSPARSEDIR(ce->ce_mode)) {
366 set_index_entry(full, full->cache_nr++, ce);
367 continue;
368 }
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000369
370 /* We now have a sparse directory entry. Should we expand? */
371 if (pl &&
372 path_matches_pattern_list(ce->name, ce->ce_namelen,
373 NULL, &dtype,
374 pl, istate) == NOT_MATCHED) {
375 set_index_entry(full, full->cache_nr++, ce);
376 continue;
377 }
378
Derrick Stolee4300f842021-03-30 13:10:48 +0000379 if (!(ce->ce_flags & CE_SKIP_WORKTREE))
380 warning(_("index entry is a directory, but not sparse (%08x)"),
381 ce->ce_flags);
382
383 /* recursively walk into cd->name */
384 tree = lookup_tree(istate->repo, &ce->oid);
385
386 memset(&ps, 0, sizeof(ps));
387 ps.recursive = 1;
388 ps.has_wildcard = 1;
389 ps.max_depth = -1;
390
391 strbuf_setlen(&base, 0);
392 strbuf_add(&base, ce->name, strlen(ce->name));
393
Jeff King1ee7a5c2023-08-31 02:21:55 -0400394 read_tree_at(istate->repo, tree, &base, 0, &ps,
Derrick Stolee02439302022-05-23 13:48:43 +0000395 add_path_to_index, &ctx);
Derrick Stolee4300f842021-03-30 13:10:48 +0000396
397 /* free directory entries. full entries are re-used */
398 discard_cache_entry(ce);
399 }
400
401 /* Copy back into original index. */
402 memcpy(&istate->name_hash, &full->name_hash, sizeof(full->name_hash));
Jeff Hostetlerd9e9b442021-08-16 17:48:55 +0000403 memcpy(&istate->dir_hash, &full->dir_hash, sizeof(full->dir_hash));
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000404 istate->sparse_index = pl ? INDEX_PARTIALLY_SPARSE : INDEX_EXPANDED;
Derrick Stolee4300f842021-03-30 13:10:48 +0000405 free(istate->cache);
406 istate->cache = full->cache;
407 istate->cache_nr = full->cache_nr;
408 istate->cache_alloc = full->cache_alloc;
Derrick Stoleef8fe49e2021-07-14 13:12:39 +0000409 istate->fsmonitor_has_run_once = 0;
410 FREE_AND_NULL(istate->fsmonitor_dirty);
411 FREE_AND_NULL(istate->fsmonitor_last_update);
Derrick Stolee4300f842021-03-30 13:10:48 +0000412
413 strbuf_release(&base);
414 free(full);
415
Derrick Stolee2de37c52021-03-30 13:11:02 +0000416 /* Clear and recompute the cache-tree */
417 cache_tree_free(&istate->cache_tree);
418 cache_tree_update(istate, 0);
419
Derrick Stoleedce241b2022-05-23 13:48:39 +0000420 trace2_region_leave("index", tr_region, istate->repo);
421}
422
423void ensure_full_index(struct index_state *istate)
424{
Ævar Arnfjörð Bjarmason29fefaf2023-01-12 13:55:25 +0100425 if (!istate)
426 BUG("ensure_full_index() must get an index!");
Derrick Stoleedce241b2022-05-23 13:48:39 +0000427 expand_index(istate, NULL);
Derrick Stolee3964fc22021-03-30 13:10:47 +0000428}
Derrick Stolee71f82d02021-04-12 21:08:16 +0000429
Victoria Dyeb93fea02021-11-23 00:20:32 +0000430void ensure_correct_sparsity(struct index_state *istate)
431{
432 /*
433 * If the index can be sparse, make it sparse. Otherwise,
434 * ensure the index is full.
435 */
436 if (is_sparse_index_allowed(istate, 0))
437 convert_to_sparse(istate, 0);
438 else
439 ensure_full_index(istate);
440}
441
Elijah Newrend79d2992022-01-14 15:59:43 +0000442static int path_found(const char *path, const char **dirname, size_t *dir_len,
443 int *dir_found)
444{
445 struct stat st;
446 char *newdir;
447 char *tmp;
448
449 /*
450 * If dirname corresponds to a directory that doesn't exist, and this
451 * path starts with dirname, then path can't exist.
452 */
453 if (!*dir_found && !memcmp(path, *dirname, *dir_len))
454 return 0;
455
456 /*
457 * If path itself exists, return 1.
458 */
459 if (!lstat(path, &st))
460 return 1;
461
462 /*
463 * Otherwise, path does not exist so we'll return 0...but we'll first
464 * determine some info about its parent directory so we can avoid
465 * lstat calls for future cache entries.
466 */
467 newdir = strrchr(path, '/');
468 if (!newdir)
469 return 0; /* Didn't find a parent dir; just return 0 now. */
470
471 /*
472 * If path starts with directory (which we already lstat'ed and found),
473 * then no need to lstat parent directory again.
474 */
475 if (*dir_found && *dirname && memcmp(path, *dirname, *dir_len))
476 return 0;
477
478 /* Free previous dirname, and cache path's dirname */
479 *dirname = path;
480 *dir_len = newdir - path + 1;
481
482 tmp = xstrndup(path, *dir_len);
483 *dir_found = !lstat(tmp, &st);
484 free(tmp);
485
486 return 0;
487}
488
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000489void clear_skip_worktree_from_present_files(struct index_state *istate)
490{
Elijah Newrend79d2992022-01-14 15:59:43 +0000491 const char *last_dirname = NULL;
492 size_t dir_len = 0;
493 int dir_found = 1;
494
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000495 int i;
Anh Le89aaab12022-11-03 23:05:00 +0000496 int path_count[2] = {0, 0};
497 int restarted = 0;
Elijah Newrend79d2992022-01-14 15:59:43 +0000498
Elijah Newrenecc7c882022-02-25 22:12:22 -0800499 if (!core_apply_sparse_checkout ||
500 sparse_expect_files_outside_of_patterns)
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000501 return;
502
Anh Le89aaab12022-11-03 23:05:00 +0000503 trace2_region_enter("index", "clear_skip_worktree_from_present_files",
504 istate->repo);
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000505restart:
506 for (i = 0; i < istate->cache_nr; i++) {
507 struct cache_entry *ce = istate->cache[i];
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000508
Anh Le89aaab12022-11-03 23:05:00 +0000509 if (ce_skip_worktree(ce)) {
510 path_count[restarted]++;
511 if (path_found(ce->name, &last_dirname, &dir_len, &dir_found)) {
512 if (S_ISSPARSEDIR(ce->ce_mode)) {
Anh Le8c7abdc2022-11-03 23:05:01 +0000513 if (restarted)
514 BUG("ensure-full-index did not fully flatten?");
Anh Le89aaab12022-11-03 23:05:00 +0000515 ensure_full_index(istate);
516 restarted = 1;
517 goto restart;
518 }
519 ce->ce_flags &= ~CE_SKIP_WORKTREE;
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000520 }
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000521 }
522 }
Anh Le89aaab12022-11-03 23:05:00 +0000523
524 if (path_count[0])
525 trace2_data_intmax("index", istate->repo,
526 "sparse_path_count", path_count[0]);
527 if (restarted)
528 trace2_data_intmax("index", istate->repo,
529 "sparse_path_count_full", path_count[1]);
530 trace2_region_leave("index", "clear_skip_worktree_from_present_files",
531 istate->repo);
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000532}
533
Derrick Stolee71f82d02021-04-12 21:08:16 +0000534/*
535 * This static global helps avoid infinite recursion between
536 * expand_to_path() and index_file_exists().
537 */
538static int in_expand_to_path = 0;
539
540void expand_to_path(struct index_state *istate,
541 const char *path, size_t pathlen, int icase)
542{
543 struct strbuf path_mutable = STRBUF_INIT;
544 size_t substr_len;
545
546 /* prevent extra recursion */
547 if (in_expand_to_path)
548 return;
549
Ævar Arnfjörð Bjarmasond2cdf2c2023-01-12 13:55:24 +0100550 if (!istate->sparse_index)
Derrick Stolee71f82d02021-04-12 21:08:16 +0000551 return;
552
Derrick Stolee71f82d02021-04-12 21:08:16 +0000553 in_expand_to_path = 1;
554
555 /*
556 * We only need to actually expand a region if the
557 * following are both true:
558 *
559 * 1. 'path' is not already in the index.
560 * 2. Some parent directory of 'path' is a sparse directory.
561 */
562
563 if (index_file_exists(istate, path, pathlen, icase))
564 goto cleanup;
565
566 strbuf_add(&path_mutable, path, pathlen);
567 strbuf_addch(&path_mutable, '/');
568
569 /* Check the name hash for all parent directories */
570 substr_len = 0;
571 while (substr_len < pathlen) {
572 char temp;
573 char *replace = strchr(path_mutable.buf + substr_len, '/');
574
575 if (!replace)
576 break;
577
578 /* replace the character _after_ the slash */
579 replace++;
580 temp = *replace;
581 *replace = '\0';
582 if (index_file_exists(istate, path_mutable.buf,
583 path_mutable.len, icase)) {
584 /*
585 * We found a parent directory in the name-hash
586 * hashtable, because only sparse directory entries
587 * have a trailing '/' character. Since "path" wasn't
588 * in the index, perhaps it exists within this
589 * sparse-directory. Expand accordingly.
590 */
591 ensure_full_index(istate);
592 break;
593 }
594
595 *replace = temp;
596 substr_len = replace - path_mutable.buf;
597 }
598
599cleanup:
600 strbuf_release(&path_mutable);
601 in_expand_to_path = 0;
602}