block alloc: add lifecycle APIs for cache_entry structs
It has been observed that the time spent loading an index with a large
number of entries is partly dominated by malloc() calls. This change
is in preparation for using memory pools to reduce the number of
malloc() calls made to allocate cahce entries when loading an index.
Add an API to allocate and discard cache entries, abstracting the
details of managing the memory backing the cache entries. This commit
does actually change how memory is managed - this will be done in a
later commit in the series.
This change makes the distinction between cache entries that are
associated with an index and cache entries that are not associated with
an index. A main use of cache entries is with an index, and we can
optimize the memory management around this. We still have other cases
where a cache entry is not persisted with an index, and so we need to
handle the "transient" use case as well.
To keep the congnitive overhead of managing the cache entries, there
will only be a single discard function. This means there must be enough
information kept with the cache entry so that we know how to discard
them.
A summary of the main functions in the API is:
make_cache_entry: create cache entry for use in an index. Uses specified
parameters to populate cache_entry fields.
make_empty_cache_entry: Create an empty cache entry for use in an index.
Returns cache entry with empty fields.
make_transient_cache_entry: create cache entry that is not used in an
index. Uses specified parameters to populate
cache_entry fields.
make_empty_transient_cache_entry: create cache entry that is not used in
an index. Returns cache entry with
empty fields.
discard_cache_entry: A single function that knows how to discard a cache
entry regardless of how it was allocated.
Signed-off-by: Jameson Miller <jamill@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
diff --git a/unpack-trees.c b/unpack-trees.c
index 3a85a02..33cba55 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -203,10 +203,10 @@ static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
}
-static struct cache_entry *dup_entry(const struct cache_entry *ce)
+static struct cache_entry *dup_entry(const struct cache_entry *ce, struct index_state *istate)
{
unsigned int size = ce_size(ce);
- struct cache_entry *new_entry = xmalloc(size);
+ struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce));
memcpy(new_entry, ce, size);
return new_entry;
@@ -216,7 +216,7 @@ static void add_entry(struct unpack_trees_options *o,
const struct cache_entry *ce,
unsigned int set, unsigned int clear)
{
- do_add_entry(o, dup_entry(ce), set, clear);
+ do_add_entry(o, dup_entry(ce, &o->result), set, clear);
}
/*
@@ -797,10 +797,17 @@ static int ce_in_traverse_path(const struct cache_entry *ce,
return (info->pathlen < ce_namelen(ce));
}
-static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage)
+static struct cache_entry *create_ce_entry(const struct traverse_info *info,
+ const struct name_entry *n,
+ int stage,
+ struct index_state *istate,
+ int is_transient)
{
int len = traverse_path_len(info, n);
- struct cache_entry *ce = xcalloc(1, cache_entry_size(len));
+ struct cache_entry *ce =
+ is_transient ?
+ make_empty_transient_cache_entry(len) :
+ make_empty_cache_entry(istate, len);
ce->ce_mode = create_ce_mode(n->mode);
ce->ce_flags = create_ce_flags(stage);
@@ -846,7 +853,15 @@ static int unpack_nondirectories(int n, unsigned long mask,
stage = 3;
else
stage = 2;
- src[i + o->merge] = create_ce_entry(info, names + i, stage);
+
+ /*
+ * If the merge bit is set, then the cache entries are
+ * discarded in the following block. In this case,
+ * construct "transient" cache_entries, as they are
+ * not stored in the index. otherwise construct the
+ * cache entry from the index aware logic.
+ */
+ src[i + o->merge] = create_ce_entry(info, names + i, stage, &o->result, o->merge);
}
if (o->merge) {
@@ -855,7 +870,7 @@ static int unpack_nondirectories(int n, unsigned long mask,
for (i = 0; i < n; i++) {
struct cache_entry *ce = src[i + o->merge];
if (ce != o->df_conflict_entry)
- free(ce);
+ discard_cache_entry(ce);
}
return rc;
}
@@ -1787,7 +1802,7 @@ static int merged_entry(const struct cache_entry *ce,
struct unpack_trees_options *o)
{
int update = CE_UPDATE;
- struct cache_entry *merge = dup_entry(ce);
+ struct cache_entry *merge = dup_entry(ce, &o->result);
if (!old) {
/*
@@ -1807,7 +1822,7 @@ static int merged_entry(const struct cache_entry *ce,
if (verify_absent(merge,
ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
- free(merge);
+ discard_cache_entry(merge);
return -1;
}
invalidate_ce_path(merge, o);
@@ -1833,7 +1848,7 @@ static int merged_entry(const struct cache_entry *ce,
update = 0;
} else {
if (verify_uptodate(old, o)) {
- free(merge);
+ discard_cache_entry(merge);
return -1;
}
/* Migrate old flags over */