blob: 27f697e4c87a05ef7cc847a17e83e14e9cfd2a4d [file] [log] [blame]
Linus Torvalds855419f2006-06-19 10:44:15 -07001/*
2 * alloc.c - specialized allocator for internal objects
3 *
4 * Copyright (C) 2006 Linus Torvalds
5 *
6 * The standard malloc/free wastes too much space for objects, partly because
Stefan Beller14ba97f2018-05-15 14:48:42 -07007 * it maintains all the allocation infrastructure, but even more because it ends
Linus Torvalds855419f2006-06-19 10:44:15 -07008 * up with maximal alignment because it doesn't know what the object alignment
9 * for the new allocation is.
10 */
11#include "cache.h"
12#include "object.h"
13#include "blob.h"
14#include "tree.h"
15#include "commit.h"
16#include "tag.h"
Stefan Beller14ba97f2018-05-15 14:48:42 -070017#include "alloc.h"
Linus Torvalds855419f2006-06-19 10:44:15 -070018
19#define BLOCKING 1024
20
Linus Torvalds2c1cbec2007-04-16 22:10:19 -070021union any_object {
22 struct object object;
23 struct blob blob;
24 struct tree tree;
25 struct commit commit;
26 struct tag tag;
27};
28
Ramsay Jones225ea222014-07-13 02:41:41 -040029struct alloc_state {
Ramsay Jones225ea222014-07-13 02:41:41 -040030 int nr; /* number of nodes left in current allocation */
31 void *p; /* first free node in current allocation */
Stefan Beller14ba97f2018-05-15 14:48:42 -070032
33 /* bookkeeping of allocations */
34 void **slabs;
35 int slab_nr, slab_alloc;
Ramsay Jones225ea222014-07-13 02:41:41 -040036};
37
Elijah Newren17313102018-08-15 10:54:06 -070038struct alloc_state *allocate_alloc_state(void)
Stefan Beller14ba97f2018-05-15 14:48:42 -070039{
40 return xcalloc(1, sizeof(struct alloc_state));
41}
42
43void clear_alloc_state(struct alloc_state *s)
44{
45 while (s->slab_nr > 0) {
46 s->slab_nr--;
47 free(s->slabs[s->slab_nr]);
48 }
49
50 FREE_AND_NULL(s->slabs);
51}
52
Ramsay Jones225ea222014-07-13 02:41:41 -040053static inline void *alloc_node(struct alloc_state *s, size_t node_size)
54{
55 void *ret;
56
57 if (!s->nr) {
58 s->nr = BLOCKING;
59 s->p = xmalloc(BLOCKING * node_size);
Stefan Beller14ba97f2018-05-15 14:48:42 -070060
61 ALLOC_GROW(s->slabs, s->slab_nr + 1, s->slab_alloc);
62 s->slabs[s->slab_nr++] = s->p;
Ramsay Jones225ea222014-07-13 02:41:41 -040063 }
64 s->nr--;
Ramsay Jones225ea222014-07-13 02:41:41 -040065 ret = s->p;
66 s->p = (char *)s->p + node_size;
67 memset(ret, 0, node_size);
Stefan Beller14ba97f2018-05-15 14:48:42 -070068
Ramsay Jones225ea222014-07-13 02:41:41 -040069 return ret;
70}
71
Stefan Beller14ba97f2018-05-15 14:48:42 -070072void *alloc_blob_node(struct repository *r)
Jeff King600e2a62014-07-13 02:41:51 -040073{
Stefan Beller14ba97f2018-05-15 14:48:42 -070074 struct blob *b = alloc_node(r->parsed_objects->blob_state, sizeof(struct blob));
Jeff Kingd36f51c2014-07-13 02:41:55 -040075 b->object.type = OBJ_BLOB;
Jeff King600e2a62014-07-13 02:41:51 -040076 return b;
77}
78
Stefan Beller14ba97f2018-05-15 14:48:42 -070079void *alloc_tree_node(struct repository *r)
Jeff King600e2a62014-07-13 02:41:51 -040080{
Stefan Beller14ba97f2018-05-15 14:48:42 -070081 struct tree *t = alloc_node(r->parsed_objects->tree_state, sizeof(struct tree));
Jeff Kingd36f51c2014-07-13 02:41:55 -040082 t->object.type = OBJ_TREE;
Jeff King600e2a62014-07-13 02:41:51 -040083 return t;
84}
85
Stefan Beller14ba97f2018-05-15 14:48:42 -070086void *alloc_tag_node(struct repository *r)
Jeff King600e2a62014-07-13 02:41:51 -040087{
Stefan Beller14ba97f2018-05-15 14:48:42 -070088 struct tag *t = alloc_node(r->parsed_objects->tag_state, sizeof(struct tag));
Jeff Kingd36f51c2014-07-13 02:41:55 -040089 t->object.type = OBJ_TAG;
Jeff King600e2a62014-07-13 02:41:51 -040090 return t;
91}
92
Stefan Beller14ba97f2018-05-15 14:48:42 -070093void *alloc_object_node(struct repository *r)
Jeff King600e2a62014-07-13 02:41:51 -040094{
Stefan Beller14ba97f2018-05-15 14:48:42 -070095 struct object *obj = alloc_node(r->parsed_objects->object_state, sizeof(union any_object));
Jeff Kingd36f51c2014-07-13 02:41:55 -040096 obj->type = OBJ_NONE;
Jeff King600e2a62014-07-13 02:41:51 -040097 return obj;
98}
Linus Torvalds855419f2006-06-19 10:44:15 -070099
Abhishek Kumar6da43d92020-06-17 14:44:08 +0530100/*
101 * The returned count is to be used as an index into commit slabs,
102 * that are *NOT* maintained per repository, and that is why a single
103 * global counter is used.
104 */
105static unsigned int alloc_commit_index(void)
Jeff King94d5a222014-07-13 02:42:08 -0400106{
Abhishek Kumar6da43d92020-06-17 14:44:08 +0530107 static unsigned int parsed_commits_count;
108 return parsed_commits_count++;
Jeff King94d5a222014-07-13 02:42:08 -0400109}
110
Abhishek Kumar6da43d92020-06-17 14:44:08 +0530111void init_commit_node(struct commit *c)
SZEDER Gábor4468d442019-01-27 14:08:32 +0100112{
113 c->object.type = OBJ_COMMIT;
Abhishek Kumar6da43d92020-06-17 14:44:08 +0530114 c->index = alloc_commit_index();
SZEDER Gábor4468d442019-01-27 14:08:32 +0100115}
116
Stefan Beller14ba97f2018-05-15 14:48:42 -0700117void *alloc_commit_node(struct repository *r)
Jeff King969eba62014-06-10 17:39:04 -0400118{
Stefan Beller14ba97f2018-05-15 14:48:42 -0700119 struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit));
Abhishek Kumar6da43d92020-06-17 14:44:08 +0530120 init_commit_node(c);
Jeff King969eba62014-06-10 17:39:04 -0400121 return c;
122}