blob: 377e80f5dda2f80e6222f4eb2fa8c607d708cc9d [file] [log] [blame]
Linus Torvalds855419f2006-06-19 10:44:15 -07001/*
2 * alloc.c - specialized allocator for internal objects
3 *
4 * Copyright (C) 2006 Linus Torvalds
5 *
6 * The standard malloc/free wastes too much space for objects, partly because
Stefan Beller14ba97f2018-05-15 14:48:42 -07007 * it maintains all the allocation infrastructure, but even more because it ends
Linus Torvalds855419f2006-06-19 10:44:15 -07008 * up with maximal alignment because it doesn't know what the object alignment
9 * for the new allocation is.
10 */
Elijah Newrena64215b2023-02-24 00:09:30 +000011#include "git-compat-util.h"
Linus Torvalds855419f2006-06-19 10:44:15 -070012#include "object.h"
13#include "blob.h"
14#include "tree.h"
15#include "commit.h"
Elijah Newrend1cbe1e2023-04-22 20:17:20 +000016#include "repository.h"
Linus Torvalds855419f2006-06-19 10:44:15 -070017#include "tag.h"
Stefan Beller14ba97f2018-05-15 14:48:42 -070018#include "alloc.h"
Linus Torvalds855419f2006-06-19 10:44:15 -070019
20#define BLOCKING 1024
21
Linus Torvalds2c1cbec2007-04-16 22:10:19 -070022union any_object {
23 struct object object;
24 struct blob blob;
25 struct tree tree;
26 struct commit commit;
27 struct tag tag;
28};
29
Ramsay Jones225ea222014-07-13 02:41:41 -040030struct alloc_state {
Ramsay Jones225ea222014-07-13 02:41:41 -040031 int nr; /* number of nodes left in current allocation */
32 void *p; /* first free node in current allocation */
Stefan Beller14ba97f2018-05-15 14:48:42 -070033
34 /* bookkeeping of allocations */
35 void **slabs;
36 int slab_nr, slab_alloc;
Ramsay Jones225ea222014-07-13 02:41:41 -040037};
38
Elijah Newren17313102018-08-15 10:54:06 -070039struct alloc_state *allocate_alloc_state(void)
Stefan Beller14ba97f2018-05-15 14:48:42 -070040{
41 return xcalloc(1, sizeof(struct alloc_state));
42}
43
44void clear_alloc_state(struct alloc_state *s)
45{
46 while (s->slab_nr > 0) {
47 s->slab_nr--;
48 free(s->slabs[s->slab_nr]);
49 }
50
51 FREE_AND_NULL(s->slabs);
52}
53
Ramsay Jones225ea222014-07-13 02:41:41 -040054static inline void *alloc_node(struct alloc_state *s, size_t node_size)
55{
56 void *ret;
57
58 if (!s->nr) {
59 s->nr = BLOCKING;
60 s->p = xmalloc(BLOCKING * node_size);
Stefan Beller14ba97f2018-05-15 14:48:42 -070061
62 ALLOC_GROW(s->slabs, s->slab_nr + 1, s->slab_alloc);
63 s->slabs[s->slab_nr++] = s->p;
Ramsay Jones225ea222014-07-13 02:41:41 -040064 }
65 s->nr--;
Ramsay Jones225ea222014-07-13 02:41:41 -040066 ret = s->p;
67 s->p = (char *)s->p + node_size;
68 memset(ret, 0, node_size);
Stefan Beller14ba97f2018-05-15 14:48:42 -070069
Ramsay Jones225ea222014-07-13 02:41:41 -040070 return ret;
71}
72
Stefan Beller14ba97f2018-05-15 14:48:42 -070073void *alloc_blob_node(struct repository *r)
Jeff King600e2a62014-07-13 02:41:51 -040074{
Stefan Beller14ba97f2018-05-15 14:48:42 -070075 struct blob *b = alloc_node(r->parsed_objects->blob_state, sizeof(struct blob));
Jeff Kingd36f51c2014-07-13 02:41:55 -040076 b->object.type = OBJ_BLOB;
Jeff King600e2a62014-07-13 02:41:51 -040077 return b;
78}
79
Stefan Beller14ba97f2018-05-15 14:48:42 -070080void *alloc_tree_node(struct repository *r)
Jeff King600e2a62014-07-13 02:41:51 -040081{
Stefan Beller14ba97f2018-05-15 14:48:42 -070082 struct tree *t = alloc_node(r->parsed_objects->tree_state, sizeof(struct tree));
Jeff Kingd36f51c2014-07-13 02:41:55 -040083 t->object.type = OBJ_TREE;
Jeff King600e2a62014-07-13 02:41:51 -040084 return t;
85}
86
Stefan Beller14ba97f2018-05-15 14:48:42 -070087void *alloc_tag_node(struct repository *r)
Jeff King600e2a62014-07-13 02:41:51 -040088{
Stefan Beller14ba97f2018-05-15 14:48:42 -070089 struct tag *t = alloc_node(r->parsed_objects->tag_state, sizeof(struct tag));
Jeff Kingd36f51c2014-07-13 02:41:55 -040090 t->object.type = OBJ_TAG;
Jeff King600e2a62014-07-13 02:41:51 -040091 return t;
92}
93
Stefan Beller14ba97f2018-05-15 14:48:42 -070094void *alloc_object_node(struct repository *r)
Jeff King600e2a62014-07-13 02:41:51 -040095{
Stefan Beller14ba97f2018-05-15 14:48:42 -070096 struct object *obj = alloc_node(r->parsed_objects->object_state, sizeof(union any_object));
Jeff Kingd36f51c2014-07-13 02:41:55 -040097 obj->type = OBJ_NONE;
Jeff King600e2a62014-07-13 02:41:51 -040098 return obj;
99}
Linus Torvalds855419f2006-06-19 10:44:15 -0700100
Abhishek Kumar6da43d92020-06-17 14:44:08 +0530101/*
102 * The returned count is to be used as an index into commit slabs,
103 * that are *NOT* maintained per repository, and that is why a single
104 * global counter is used.
105 */
106static unsigned int alloc_commit_index(void)
Jeff King94d5a222014-07-13 02:42:08 -0400107{
Abhishek Kumar6da43d92020-06-17 14:44:08 +0530108 static unsigned int parsed_commits_count;
109 return parsed_commits_count++;
Jeff King94d5a222014-07-13 02:42:08 -0400110}
111
Abhishek Kumar6da43d92020-06-17 14:44:08 +0530112void init_commit_node(struct commit *c)
SZEDER Gábor4468d442019-01-27 14:08:32 +0100113{
114 c->object.type = OBJ_COMMIT;
Abhishek Kumar6da43d92020-06-17 14:44:08 +0530115 c->index = alloc_commit_index();
SZEDER Gábor4468d442019-01-27 14:08:32 +0100116}
117
Stefan Beller14ba97f2018-05-15 14:48:42 -0700118void *alloc_commit_node(struct repository *r)
Jeff King969eba62014-06-10 17:39:04 -0400119{
Stefan Beller14ba97f2018-05-15 14:48:42 -0700120 struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit));
Abhishek Kumar6da43d92020-06-17 14:44:08 +0530121 init_commit_node(c);
Jeff King969eba62014-06-10 17:39:04 -0400122 return c;
123}