blob: 65a08d33c73e84428db386fa92641f59cda0b02f [file] [log] [blame]
Derrick Stolee3964fc22021-03-30 13:10:47 +00001#include "cache.h"
2#include "repository.h"
3#include "sparse-index.h"
Derrick Stolee4300f842021-03-30 13:10:48 +00004#include "tree.h"
5#include "pathspec.h"
6#include "trace2.h"
Derrick Stolee6e773522021-03-30 13:10:55 +00007#include "cache-tree.h"
8#include "config.h"
9#include "dir.h"
10#include "fsmonitor.h"
11
Derrick Stolee02439302022-05-23 13:48:43 +000012struct modify_index_context {
13 struct index_state *write;
14 struct pattern_list *pl;
15};
16
Derrick Stolee6e773522021-03-30 13:10:55 +000017static struct cache_entry *construct_sparse_dir_entry(
18 struct index_state *istate,
19 const char *sparse_dir,
20 struct cache_tree *tree)
21{
22 struct cache_entry *de;
23
24 de = make_cache_entry(istate, S_IFDIR, &tree->oid, sparse_dir, 0, 0);
25
26 de->ce_flags |= CE_SKIP_WORKTREE;
27 return de;
28}
29
30/*
31 * Returns the number of entries "inserted" into the index.
32 */
33static int convert_to_sparse_rec(struct index_state *istate,
34 int num_converted,
35 int start, int end,
36 const char *ct_path, size_t ct_pathlen,
37 struct cache_tree *ct)
38{
39 int i, can_convert = 1;
40 int start_converted = num_converted;
Derrick Stolee6e773522021-03-30 13:10:55 +000041 struct strbuf child_path = STRBUF_INIT;
Derrick Stolee6e773522021-03-30 13:10:55 +000042
43 /*
44 * Is the current path outside of the sparse cone?
45 * Then check if the region can be replaced by a sparse
46 * directory entry (everything is sparse and merged).
47 */
Derrick Stolee02155c82021-09-08 01:42:30 +000048 if (path_in_sparse_checkout(ct_path, istate))
Derrick Stolee6e773522021-03-30 13:10:55 +000049 can_convert = 0;
50
51 for (i = start; can_convert && i < end; i++) {
52 struct cache_entry *ce = istate->cache[i];
53
54 if (ce_stage(ce) ||
Derrick Stoleef4423132021-03-30 13:10:56 +000055 S_ISGITLINK(ce->ce_mode) ||
Derrick Stolee6e773522021-03-30 13:10:55 +000056 !(ce->ce_flags & CE_SKIP_WORKTREE))
57 can_convert = 0;
58 }
59
60 if (can_convert) {
61 struct cache_entry *se;
62 se = construct_sparse_dir_entry(istate, ct_path, ct);
63
64 istate->cache[num_converted++] = se;
65 return 1;
66 }
67
68 for (i = start; i < end; ) {
69 int count, span, pos = -1;
70 const char *base, *slash;
71 struct cache_entry *ce = istate->cache[i];
72
73 /*
74 * Detect if this is a normal entry outside of any subtree
75 * entry.
76 */
77 base = ce->name + ct_pathlen;
78 slash = strchr(base, '/');
79
80 if (slash)
81 pos = cache_tree_subtree_pos(ct, base, slash - base);
82
83 if (pos < 0) {
84 istate->cache[num_converted++] = ce;
85 i++;
86 continue;
87 }
88
89 strbuf_setlen(&child_path, 0);
90 strbuf_add(&child_path, ce->name, slash - ce->name + 1);
91
92 span = ct->down[pos]->cache_tree->entry_count;
93 count = convert_to_sparse_rec(istate,
94 num_converted, i, i + span,
95 child_path.buf, child_path.len,
96 ct->down[pos]->cache_tree);
97 num_converted += count;
98 i += span;
99 }
100
101 strbuf_release(&child_path);
102 return num_converted - start_converted;
103}
104
Ævar Arnfjörð Bjarmasonb79f9c02021-05-05 14:11:58 +0200105int set_sparse_index_config(struct repository *repo, int enable)
Derrick Stolee58300f42021-03-30 13:10:59 +0000106{
Derrick Stolee7316dc52022-02-07 21:33:01 +0000107 int res = repo_config_set_worktree_gently(repo,
108 "index.sparse",
109 enable ? "true" : "false");
Derrick Stolee58300f42021-03-30 13:10:59 +0000110 prepare_repo_settings(repo);
Derrick Stolee122ba1f2021-03-30 13:11:00 +0000111 repo->settings.sparse_index = enable;
112 return res;
Derrick Stolee58300f42021-03-30 13:10:59 +0000113}
114
Derrick Stoleefc6609d2021-07-14 13:12:25 +0000115static int index_has_unmerged_entries(struct index_state *istate)
116{
117 int i;
118 for (i = 0; i < istate->cache_nr; i++) {
119 if (ce_stage(istate->cache[i]))
120 return 1;
121 }
122
123 return 0;
124}
125
Victoria Dyecfde4cd2022-05-10 23:32:29 +0000126int is_sparse_index_allowed(struct index_state *istate, int flags)
Derrick Stolee6e773522021-03-30 13:10:55 +0000127{
Victoria Dyeb93fea02021-11-23 00:20:32 +0000128 if (!core_apply_sparse_checkout || !core_sparse_checkout_cone)
Derrick Stolee6e773522021-03-30 13:10:55 +0000129 return 0;
130
Derrick Stolee58300f42021-03-30 13:10:59 +0000131 if (!istate->repo)
132 istate->repo = the_repository;
133
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000134 if (!(flags & SPARSE_INDEX_MEMORY_ONLY)) {
Victoria Dyeb93fea02021-11-23 00:20:32 +0000135 int test_env;
136
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000137 /*
138 * The sparse index is not (yet) integrated with a split index.
139 */
Johannes Schindelinae103c32022-01-19 17:29:37 +0000140 if (istate->split_index || git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000141 return 0;
142 /*
143 * The GIT_TEST_SPARSE_INDEX environment variable triggers the
144 * index.sparse config variable to be on.
145 */
146 test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1);
147 if (test_env >= 0)
148 set_sparse_index_config(istate->repo, test_env);
Derrick Stolee58300f42021-03-30 13:10:59 +0000149
Derrick Stoleece7a9f02021-09-08 01:42:32 +0000150 /*
151 * Only convert to sparse if index.sparse is set.
152 */
153 prepare_repo_settings(istate->repo);
154 if (!istate->repo->settings.sparse_index)
Derrick Stolee6e773522021-03-30 13:10:55 +0000155 return 0;
156 }
157
Derrick Stolee02155c82021-09-08 01:42:30 +0000158 if (init_sparse_checkout_patterns(istate))
159 return 0;
Derrick Stolee6e773522021-03-30 13:10:55 +0000160
Derrick Stoleee27eab42021-09-08 01:42:26 +0000161 /*
162 * We need cone-mode patterns to use sparse-index. If a user edits
163 * their sparse-checkout file manually, then we can detect during
164 * parsing that they are not actually using cone-mode patterns and
165 * hence we need to abort this conversion _without error_. Warnings
166 * already exist in the pattern parsing to inform the user of their
167 * bad patterns.
168 */
169 if (!istate->sparse_checkout_patterns->use_cone_patterns)
170 return 0;
Derrick Stolee6e773522021-03-30 13:10:55 +0000171
Victoria Dyeb93fea02021-11-23 00:20:32 +0000172 return 1;
173}
174
175int convert_to_sparse(struct index_state *istate, int flags)
176{
177 /*
178 * If the index is already sparse, empty, or otherwise
179 * cannot be converted to sparse, do not convert.
180 */
Derrick Stolee9fadb372022-05-23 13:48:40 +0000181 if (istate->sparse_index == INDEX_COLLAPSED || !istate->cache_nr ||
Victoria Dyeb93fea02021-11-23 00:20:32 +0000182 !is_sparse_index_allowed(istate, flags))
183 return 0;
184
Derrick Stoleefc6609d2021-07-14 13:12:25 +0000185 /*
186 * NEEDSWORK: If we have unmerged entries, then stay full.
187 * Unmerged entries prevent the cache-tree extension from working.
188 */
189 if (index_has_unmerged_entries(istate))
190 return 0;
191
Victoria Dye13f69f32021-11-23 00:20:31 +0000192 if (!cache_tree_fully_valid(istate->cache_tree)) {
193 /* Clear and recompute the cache-tree */
194 cache_tree_free(&istate->cache_tree);
195
196 /*
197 * Silently return if there is a problem with the cache tree update,
198 * which might just be due to a conflict state in some entry.
199 *
200 * This might create new tree objects, so be sure to use
201 * WRITE_TREE_MISSING_OK.
202 */
203 if (cache_tree_update(istate, WRITE_TREE_MISSING_OK))
204 return 0;
205 }
Derrick Stolee6e773522021-03-30 13:10:55 +0000206
207 remove_fsmonitor(istate);
208
209 trace2_region_enter("index", "convert_to_sparse", istate->repo);
210 istate->cache_nr = convert_to_sparse_rec(istate,
211 0, 0, istate->cache_nr,
212 "", 0, istate->cache_tree);
Derrick Stolee2de37c52021-03-30 13:11:02 +0000213
214 /* Clear and recompute the cache-tree */
215 cache_tree_free(&istate->cache_tree);
216 cache_tree_update(istate, 0);
217
Derrick Stoleef8fe49e2021-07-14 13:12:39 +0000218 istate->fsmonitor_has_run_once = 0;
219 FREE_AND_NULL(istate->fsmonitor_dirty);
220 FREE_AND_NULL(istate->fsmonitor_last_update);
221
Derrick Stolee9fadb372022-05-23 13:48:40 +0000222 istate->sparse_index = INDEX_COLLAPSED;
Derrick Stolee6e773522021-03-30 13:10:55 +0000223 trace2_region_leave("index", "convert_to_sparse", istate->repo);
224 return 0;
225}
Derrick Stolee4300f842021-03-30 13:10:48 +0000226
227static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
228{
229 ALLOC_GROW(istate->cache, nr + 1, istate->cache_alloc);
230
231 istate->cache[nr] = ce;
232 add_name_hash(istate, ce);
233}
234
235static int add_path_to_index(const struct object_id *oid,
236 struct strbuf *base, const char *path,
237 unsigned int mode, void *context)
238{
Derrick Stolee02439302022-05-23 13:48:43 +0000239 struct modify_index_context *ctx = (struct modify_index_context *)context;
Derrick Stolee4300f842021-03-30 13:10:48 +0000240 struct cache_entry *ce;
241 size_t len = base->len;
242
Derrick Stolee02439302022-05-23 13:48:43 +0000243 if (S_ISDIR(mode)) {
244 int dtype;
245 size_t baselen = base->len;
246 if (!ctx->pl)
247 return READ_TREE_RECURSIVE;
Derrick Stolee4300f842021-03-30 13:10:48 +0000248
Derrick Stolee02439302022-05-23 13:48:43 +0000249 /*
250 * Have we expanded to a point outside of the sparse-checkout?
251 *
252 * Artificially pad the path name with a slash "/" to
253 * indicate it as a directory, and add an arbitrary file
254 * name ("-") so we can consider base->buf as a file name
255 * to match against the cone-mode patterns.
256 *
257 * If we compared just "path", then we would expand more
258 * than we should. Since every file at root is always
259 * included, we would expand every directory at root at
260 * least one level deep instead of using sparse directory
261 * entries.
262 */
263 strbuf_addstr(base, path);
264 strbuf_add(base, "/-", 2);
Derrick Stolee4300f842021-03-30 13:10:48 +0000265
Derrick Stolee02439302022-05-23 13:48:43 +0000266 if (path_matches_pattern_list(base->buf, base->len,
267 NULL, &dtype,
268 ctx->pl, ctx->write)) {
269 strbuf_setlen(base, baselen);
270 return READ_TREE_RECURSIVE;
271 }
272
273 /*
274 * The path "{base}{path}/" is a sparse directory. Create the correct
275 * name for inserting the entry into the index.
276 */
277 strbuf_setlen(base, base->len - 1);
278 } else {
279 strbuf_addstr(base, path);
280 }
281
282 ce = make_cache_entry(ctx->write, mode, oid, base->buf, 0, 0);
Derrick Stolee47410772021-07-14 13:12:26 +0000283 ce->ce_flags |= CE_SKIP_WORKTREE | CE_EXTENDED;
Derrick Stolee02439302022-05-23 13:48:43 +0000284 set_index_entry(ctx->write, ctx->write->cache_nr++, ce);
Derrick Stolee4300f842021-03-30 13:10:48 +0000285
286 strbuf_setlen(base, len);
287 return 0;
288}
Derrick Stolee3964fc22021-03-30 13:10:47 +0000289
Derrick Stoleedce241b2022-05-23 13:48:39 +0000290void expand_index(struct index_state *istate, struct pattern_list *pl)
Derrick Stolee3964fc22021-03-30 13:10:47 +0000291{
Derrick Stolee4300f842021-03-30 13:10:48 +0000292 int i;
293 struct index_state *full;
294 struct strbuf base = STRBUF_INIT;
Derrick Stoleedce241b2022-05-23 13:48:39 +0000295 const char *tr_region;
Derrick Stolee02439302022-05-23 13:48:43 +0000296 struct modify_index_context ctx;
Derrick Stolee4300f842021-03-30 13:10:48 +0000297
Derrick Stoleedce241b2022-05-23 13:48:39 +0000298 /*
299 * If the index is already full, then keep it full. We will convert
300 * it to a sparse index on write, if possible.
301 */
Derrick Stolee9fadb372022-05-23 13:48:40 +0000302 if (!istate || istate->sparse_index == INDEX_EXPANDED)
Derrick Stolee4300f842021-03-30 13:10:48 +0000303 return;
304
Derrick Stoleedce241b2022-05-23 13:48:39 +0000305 /*
306 * If our index is sparse, but our new pattern set does not use
307 * cone mode patterns, then we need to expand the index before we
308 * continue. A NULL pattern set indicates a full expansion to a
309 * full index.
310 */
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000311 if (pl && !pl->use_cone_patterns) {
Derrick Stoleedce241b2022-05-23 13:48:39 +0000312 pl = NULL;
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000313 } else {
314 /*
315 * We might contract file entries into sparse-directory
316 * entries, and for that we will need the cache tree to
317 * be recomputed.
318 */
319 cache_tree_free(&istate->cache_tree);
320
321 /*
322 * If there is a problem creating the cache tree, then we
323 * need to expand to a full index since we cannot satisfy
324 * the current request as a sparse index.
325 */
326 if (cache_tree_update(istate, 0))
327 pl = NULL;
328 }
Derrick Stoleedce241b2022-05-23 13:48:39 +0000329
Derrick Stolee4300f842021-03-30 13:10:48 +0000330 if (!istate->repo)
331 istate->repo = the_repository;
332
Derrick Stoleedce241b2022-05-23 13:48:39 +0000333 /*
334 * A NULL pattern set indicates we are expanding a full index, so
335 * we use a special region name that indicates the full expansion.
336 * This is used by test cases, but also helps to differentiate the
337 * two cases.
338 */
339 tr_region = pl ? "expand_index" : "ensure_full_index";
340 trace2_region_enter("index", tr_region, istate->repo);
Derrick Stolee4300f842021-03-30 13:10:48 +0000341
342 /* initialize basics of new index */
343 full = xcalloc(1, sizeof(struct index_state));
344 memcpy(full, istate, sizeof(struct index_state));
345
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000346 /*
347 * This slightly-misnamed 'full' index might still be sparse if we
348 * are only modifying the list of sparse directories. This hinges
349 * on whether we have a non-NULL pattern list.
350 */
351 full->sparse_index = pl ? INDEX_PARTIALLY_SPARSE : INDEX_EXPANDED;
352
Derrick Stolee4300f842021-03-30 13:10:48 +0000353 /* then change the necessary things */
Derrick Stolee4300f842021-03-30 13:10:48 +0000354 full->cache_alloc = (3 * istate->cache_alloc) / 2;
355 full->cache_nr = 0;
356 ALLOC_ARRAY(full->cache, full->cache_alloc);
357
Derrick Stolee02439302022-05-23 13:48:43 +0000358 ctx.write = full;
359 ctx.pl = pl;
360
Derrick Stolee4300f842021-03-30 13:10:48 +0000361 for (i = 0; i < istate->cache_nr; i++) {
362 struct cache_entry *ce = istate->cache[i];
363 struct tree *tree;
364 struct pathspec ps;
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000365 int dtype;
Derrick Stolee4300f842021-03-30 13:10:48 +0000366
367 if (!S_ISSPARSEDIR(ce->ce_mode)) {
368 set_index_entry(full, full->cache_nr++, ce);
369 continue;
370 }
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000371
372 /* We now have a sparse directory entry. Should we expand? */
373 if (pl &&
374 path_matches_pattern_list(ce->name, ce->ce_namelen,
375 NULL, &dtype,
376 pl, istate) == NOT_MATCHED) {
377 set_index_entry(full, full->cache_nr++, ce);
378 continue;
379 }
380
Derrick Stolee4300f842021-03-30 13:10:48 +0000381 if (!(ce->ce_flags & CE_SKIP_WORKTREE))
382 warning(_("index entry is a directory, but not sparse (%08x)"),
383 ce->ce_flags);
384
385 /* recursively walk into cd->name */
386 tree = lookup_tree(istate->repo, &ce->oid);
387
388 memset(&ps, 0, sizeof(ps));
389 ps.recursive = 1;
390 ps.has_wildcard = 1;
391 ps.max_depth = -1;
392
393 strbuf_setlen(&base, 0);
394 strbuf_add(&base, ce->name, strlen(ce->name));
395
396 read_tree_at(istate->repo, tree, &base, &ps,
Derrick Stolee02439302022-05-23 13:48:43 +0000397 add_path_to_index, &ctx);
Derrick Stolee4300f842021-03-30 13:10:48 +0000398
399 /* free directory entries. full entries are re-used */
400 discard_cache_entry(ce);
401 }
402
403 /* Copy back into original index. */
404 memcpy(&istate->name_hash, &full->name_hash, sizeof(full->name_hash));
Jeff Hostetlerd9e9b442021-08-16 17:48:55 +0000405 memcpy(&istate->dir_hash, &full->dir_hash, sizeof(full->dir_hash));
Derrick Stoleeac8acb42022-05-23 13:48:44 +0000406 istate->sparse_index = pl ? INDEX_PARTIALLY_SPARSE : INDEX_EXPANDED;
Derrick Stolee4300f842021-03-30 13:10:48 +0000407 free(istate->cache);
408 istate->cache = full->cache;
409 istate->cache_nr = full->cache_nr;
410 istate->cache_alloc = full->cache_alloc;
Derrick Stoleef8fe49e2021-07-14 13:12:39 +0000411 istate->fsmonitor_has_run_once = 0;
412 FREE_AND_NULL(istate->fsmonitor_dirty);
413 FREE_AND_NULL(istate->fsmonitor_last_update);
Derrick Stolee4300f842021-03-30 13:10:48 +0000414
415 strbuf_release(&base);
416 free(full);
417
Derrick Stolee2de37c52021-03-30 13:11:02 +0000418 /* Clear and recompute the cache-tree */
419 cache_tree_free(&istate->cache_tree);
420 cache_tree_update(istate, 0);
421
Derrick Stoleedce241b2022-05-23 13:48:39 +0000422 trace2_region_leave("index", tr_region, istate->repo);
423}
424
425void ensure_full_index(struct index_state *istate)
426{
427 expand_index(istate, NULL);
Derrick Stolee3964fc22021-03-30 13:10:47 +0000428}
Derrick Stolee71f82d02021-04-12 21:08:16 +0000429
Victoria Dyeb93fea02021-11-23 00:20:32 +0000430void ensure_correct_sparsity(struct index_state *istate)
431{
432 /*
433 * If the index can be sparse, make it sparse. Otherwise,
434 * ensure the index is full.
435 */
436 if (is_sparse_index_allowed(istate, 0))
437 convert_to_sparse(istate, 0);
438 else
439 ensure_full_index(istate);
440}
441
Elijah Newrend79d2992022-01-14 15:59:43 +0000442static int path_found(const char *path, const char **dirname, size_t *dir_len,
443 int *dir_found)
444{
445 struct stat st;
446 char *newdir;
447 char *tmp;
448
449 /*
450 * If dirname corresponds to a directory that doesn't exist, and this
451 * path starts with dirname, then path can't exist.
452 */
453 if (!*dir_found && !memcmp(path, *dirname, *dir_len))
454 return 0;
455
456 /*
457 * If path itself exists, return 1.
458 */
459 if (!lstat(path, &st))
460 return 1;
461
462 /*
463 * Otherwise, path does not exist so we'll return 0...but we'll first
464 * determine some info about its parent directory so we can avoid
465 * lstat calls for future cache entries.
466 */
467 newdir = strrchr(path, '/');
468 if (!newdir)
469 return 0; /* Didn't find a parent dir; just return 0 now. */
470
471 /*
472 * If path starts with directory (which we already lstat'ed and found),
473 * then no need to lstat parent directory again.
474 */
475 if (*dir_found && *dirname && memcmp(path, *dirname, *dir_len))
476 return 0;
477
478 /* Free previous dirname, and cache path's dirname */
479 *dirname = path;
480 *dir_len = newdir - path + 1;
481
482 tmp = xstrndup(path, *dir_len);
483 *dir_found = !lstat(tmp, &st);
484 free(tmp);
485
486 return 0;
487}
488
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000489void clear_skip_worktree_from_present_files(struct index_state *istate)
490{
Elijah Newrend79d2992022-01-14 15:59:43 +0000491 const char *last_dirname = NULL;
492 size_t dir_len = 0;
493 int dir_found = 1;
494
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000495 int i;
Anh Le89aaab12022-11-03 23:05:00 +0000496 int path_count[2] = {0, 0};
497 int restarted = 0;
Elijah Newrend79d2992022-01-14 15:59:43 +0000498
Elijah Newrenecc7c882022-02-25 22:12:22 -0800499 if (!core_apply_sparse_checkout ||
500 sparse_expect_files_outside_of_patterns)
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000501 return;
502
Anh Le89aaab12022-11-03 23:05:00 +0000503 trace2_region_enter("index", "clear_skip_worktree_from_present_files",
504 istate->repo);
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000505restart:
506 for (i = 0; i < istate->cache_nr; i++) {
507 struct cache_entry *ce = istate->cache[i];
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000508
Anh Le89aaab12022-11-03 23:05:00 +0000509 if (ce_skip_worktree(ce)) {
510 path_count[restarted]++;
511 if (path_found(ce->name, &last_dirname, &dir_len, &dir_found)) {
512 if (S_ISSPARSEDIR(ce->ce_mode)) {
Anh Le8c7abdc2022-11-03 23:05:01 +0000513 if (restarted)
514 BUG("ensure-full-index did not fully flatten?");
Anh Le89aaab12022-11-03 23:05:00 +0000515 ensure_full_index(istate);
516 restarted = 1;
517 goto restart;
518 }
519 ce->ce_flags &= ~CE_SKIP_WORKTREE;
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000520 }
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000521 }
522 }
Anh Le89aaab12022-11-03 23:05:00 +0000523
524 if (path_count[0])
525 trace2_data_intmax("index", istate->repo,
526 "sparse_path_count", path_count[0]);
527 if (restarted)
528 trace2_data_intmax("index", istate->repo,
529 "sparse_path_count_full", path_count[1]);
530 trace2_region_leave("index", "clear_skip_worktree_from_present_files",
531 istate->repo);
Elijah Newrenaf6a5182022-01-14 15:59:41 +0000532}
533
Derrick Stolee71f82d02021-04-12 21:08:16 +0000534/*
535 * This static global helps avoid infinite recursion between
536 * expand_to_path() and index_file_exists().
537 */
538static int in_expand_to_path = 0;
539
540void expand_to_path(struct index_state *istate,
541 const char *path, size_t pathlen, int icase)
542{
543 struct strbuf path_mutable = STRBUF_INIT;
544 size_t substr_len;
545
546 /* prevent extra recursion */
547 if (in_expand_to_path)
548 return;
549
Ævar Arnfjörð Bjarmasond2cdf2c2023-01-12 13:55:24 +0100550 if (!istate->sparse_index)
Derrick Stolee71f82d02021-04-12 21:08:16 +0000551 return;
552
553 if (!istate->repo)
554 istate->repo = the_repository;
555
556 in_expand_to_path = 1;
557
558 /*
559 * We only need to actually expand a region if the
560 * following are both true:
561 *
562 * 1. 'path' is not already in the index.
563 * 2. Some parent directory of 'path' is a sparse directory.
564 */
565
566 if (index_file_exists(istate, path, pathlen, icase))
567 goto cleanup;
568
569 strbuf_add(&path_mutable, path, pathlen);
570 strbuf_addch(&path_mutable, '/');
571
572 /* Check the name hash for all parent directories */
573 substr_len = 0;
574 while (substr_len < pathlen) {
575 char temp;
576 char *replace = strchr(path_mutable.buf + substr_len, '/');
577
578 if (!replace)
579 break;
580
581 /* replace the character _after_ the slash */
582 replace++;
583 temp = *replace;
584 *replace = '\0';
585 if (index_file_exists(istate, path_mutable.buf,
586 path_mutable.len, icase)) {
587 /*
588 * We found a parent directory in the name-hash
589 * hashtable, because only sparse directory entries
590 * have a trailing '/' character. Since "path" wasn't
591 * in the index, perhaps it exists within this
592 * sparse-directory. Expand accordingly.
593 */
594 ensure_full_index(istate);
595 break;
596 }
597
598 *replace = temp;
599 substr_len = replace - path_mutable.buf;
600 }
601
602cleanup:
603 strbuf_release(&path_mutable);
604 in_expand_to_path = 0;
605}