blob: 58ebfce1bca609b0f7119eb210345c2065d02f40 [file] [log] [blame]
Junio C Hamano8f1d2e62006-01-07 01:33:54 -08001#include "cache.h"
Junio C Hamanoaf3785d2007-08-09 13:42:50 -07002#include "cache-tree.h"
Daniel Barkalow175785e2005-04-18 11:39:48 -07003#include "tree.h"
4#include "blob.h"
Daniel Barkalow77675e22005-09-05 02:03:51 -04005#include "commit.h"
6#include "tag.h"
Linus Torvalds136f2e52006-05-29 12:16:12 -07007#include "tree-walk.h"
Daniel Barkalow175785e2005-04-18 11:39:48 -07008
9const char *tree_type = "tree";
10
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070011static int read_one_entry_opt(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, int opt)
Linus Torvalds94537c72005-04-22 16:42:37 -070012{
Linus Torvalds3c5e8462005-11-26 09:38:20 -080013 int len;
14 unsigned int size;
15 struct cache_entry *ce;
16
17 if (S_ISDIR(mode))
18 return READ_TREE_RECURSIVE;
19
20 len = strlen(pathname);
21 size = cache_entry_size(baselen + len);
Peter Eriksen90321c12006-04-03 19:30:46 +010022 ce = xcalloc(1, size);
Linus Torvalds94537c72005-04-22 16:42:37 -070023
24 ce->ce_mode = create_ce_mode(mode);
Thomas Gummererb60e1882012-07-11 11:22:37 +020025 ce->ce_flags = create_ce_flags(stage);
26 ce->ce_namelen = baselen + len;
Linus Torvalds94537c72005-04-22 16:42:37 -070027 memcpy(ce->name, base, baselen);
28 memcpy(ce->name + baselen, pathname, len+1);
Shawn Pearcee7024962006-08-23 02:49:00 -040029 hashcpy(ce->sha1, sha1);
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070030 return add_cache_entry(ce, opt);
31}
32
Nguyễn Thái Ngọc Duy6a0b0b62014-11-30 16:05:00 +070033static int read_one_entry(const unsigned char *sha1, struct strbuf *base,
34 const char *pathname, unsigned mode, int stage,
35 void *context)
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070036{
Nguyễn Thái Ngọc Duy6a0b0b62014-11-30 16:05:00 +070037 return read_one_entry_opt(sha1, base->buf, base->len, pathname,
38 mode, stage,
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070039 ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
40}
41
42/*
43 * This is used when the caller knows there is no existing entries at
44 * the stage that will conflict with the entry being added.
45 */
Nguyễn Thái Ngọc Duy6a0b0b62014-11-30 16:05:00 +070046static int read_one_entry_quick(const unsigned char *sha1, struct strbuf *base,
47 const char *pathname, unsigned mode, int stage,
48 void *context)
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070049{
Nguyễn Thái Ngọc Duy6a0b0b62014-11-30 16:05:00 +070050 return read_one_entry_opt(sha1, base->buf, base->len, pathname,
51 mode, stage,
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070052 ADD_CACHE_JUST_APPEND);
Linus Torvalds94537c72005-04-22 16:42:37 -070053}
54
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070055static int read_tree_1(struct tree *tree, struct strbuf *base,
Nguyễn Thái Ngọc Duy18e4f402013-07-14 15:35:52 +070056 int stage, const struct pathspec *pathspec,
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070057 read_tree_fn_t fn, void *context)
Linus Torvalds0ca14a52005-07-14 11:26:31 -070058{
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070059 struct tree_desc desc;
60 struct name_entry entry;
61 unsigned char sha1[20];
Nguyễn Thái Ngọc Duyd688cf02011-10-24 17:36:10 +110062 int len, oldlen = base->len;
63 enum interesting retval = entry_not_interesting;
Linus Torvalds0ca14a52005-07-14 11:26:31 -070064
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070065 if (parse_tree(tree))
66 return -1;
Linus Torvalds0ca14a52005-07-14 11:26:31 -070067
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070068 init_tree_desc(&desc, tree->buffer, tree->size);
Linus Torvalds0ca14a52005-07-14 11:26:31 -070069
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070070 while (tree_entry(&desc, &entry)) {
Nguyễn Thái Ngọc Duyd688cf02011-10-24 17:36:10 +110071 if (retval != all_entries_interesting) {
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070072 retval = tree_entry_interesting(&entry, base, 0, pathspec);
Nguyễn Thái Ngọc Duyd688cf02011-10-24 17:36:10 +110073 if (retval == all_entries_not_interesting)
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070074 break;
Nguyễn Thái Ngọc Duyd688cf02011-10-24 17:36:10 +110075 if (retval == entry_not_interesting)
Linus Torvalds0ca14a52005-07-14 11:26:31 -070076 continue;
77 }
78
Nguyễn Thái Ngọc Duy6a0b0b62014-11-30 16:05:00 +070079 switch (fn(entry.sha1, base,
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070080 entry.path, entry.mode, stage, context)) {
81 case 0:
82 continue;
83 case READ_TREE_RECURSIVE:
84 break;
85 default:
86 return -1;
87 }
88
89 if (S_ISDIR(entry.mode))
90 hashcpy(sha1, entry.sha1);
91 else if (S_ISGITLINK(entry.mode)) {
92 struct commit *commit;
93
94 commit = lookup_commit(entry.sha1);
95 if (!commit)
96 die("Commit %s in submodule path %s%s not found",
97 sha1_to_hex(entry.sha1),
98 base->buf, entry.path);
99
100 if (parse_commit(commit))
101 die("Invalid commit %s in submodule path %s%s",
102 sha1_to_hex(entry.sha1),
103 base->buf, entry.path);
104
105 hashcpy(sha1, commit->tree->object.sha1);
106 }
107 else
Linus Torvalds0ca14a52005-07-14 11:26:31 -0700108 continue;
Linus Torvalds3e587632005-07-14 11:39:27 -0700109
Nguyễn Thái Ngọc Duy0de16332011-10-24 17:36:09 +1100110 len = tree_entry_len(&entry);
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700111 strbuf_add(base, entry.path, len);
112 strbuf_addch(base, '/');
113 retval = read_tree_1(lookup_tree(sha1),
114 base, stage, pathspec,
115 fn, context);
116 strbuf_setlen(base, oldlen);
117 if (retval)
118 return -1;
Linus Torvalds0ca14a52005-07-14 11:26:31 -0700119 }
120 return 0;
121}
122
Daniel Barkalow521698b2006-01-26 01:13:36 -0500123int read_tree_recursive(struct tree *tree,
Linus Torvalds3c5e8462005-11-26 09:38:20 -0800124 const char *base, int baselen,
Nguyễn Thái Ngọc Duy18e4f402013-07-14 15:35:52 +0700125 int stage, const struct pathspec *pathspec,
René Scharfe671f0702008-07-14 21:22:12 +0200126 read_tree_fn_t fn, void *context)
Linus Torvalds94537c72005-04-22 16:42:37 -0700127{
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700128 struct strbuf sb = STRBUF_INIT;
Nguyễn Thái Ngọc Duyf0096c02011-03-25 16:34:19 +0700129 int ret;
Linus Torvalds0790a422006-05-29 12:17:28 -0700130
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700131 strbuf_add(&sb, base, baselen);
Nguyễn Thái Ngọc Duyf0096c02011-03-25 16:34:19 +0700132 ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700133 strbuf_release(&sb);
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700134 return ret;
Linus Torvalds94537c72005-04-22 16:42:37 -0700135}
136
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700137static int cmp_cache_name_compare(const void *a_, const void *b_)
138{
139 const struct cache_entry *ce1, *ce2;
140
141 ce1 = *((const struct cache_entry **)a_);
142 ce2 = *((const struct cache_entry **)b_);
Thomas Gummererb60e1882012-07-11 11:22:37 +0200143 return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
144 ce2->name, ce2->ce_namelen, ce_stage(ce2));
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700145}
146
Nguyễn Thái Ngọc Duyf0096c02011-03-25 16:34:19 +0700147int read_tree(struct tree *tree, int stage, struct pathspec *match)
Linus Torvalds94537c72005-04-22 16:42:37 -0700148{
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700149 read_tree_fn_t fn = NULL;
150 int i, err;
151
152 /*
153 * Currently the only existing callers of this function all
154 * call it with stage=1 and after making sure there is nothing
155 * at that stage; we could always use read_one_entry_quick().
156 *
157 * But when we decide to straighten out git-read-tree not to
158 * use unpack_trees() in some cases, this will probably start
159 * to matter.
160 */
161
162 /*
163 * See if we have cache entry at the stage. If so,
164 * do it the original slow way, otherwise, append and then
165 * sort at the end.
166 */
167 for (i = 0; !fn && i < active_nr; i++) {
Nguyễn Thái Ngọc Duy9c5e6c82013-07-09 22:29:00 +0700168 const struct cache_entry *ce = active_cache[i];
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700169 if (ce_stage(ce) == stage)
170 fn = read_one_entry;
171 }
172
173 if (!fn)
174 fn = read_one_entry_quick;
René Scharfe671f0702008-07-14 21:22:12 +0200175 err = read_tree_recursive(tree, "", 0, stage, match, fn, NULL);
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700176 if (fn == read_one_entry || err)
177 return err;
178
179 /*
180 * Sort the cache entry -- we need to nuke the cache tree, though.
181 */
182 cache_tree_free(&active_cache_tree);
183 qsort(active_cache, active_nr, sizeof(active_cache[0]),
184 cmp_cache_name_compare);
185 return 0;
Linus Torvalds94537c72005-04-22 16:42:37 -0700186}
187
Jason McMullan5d6ccf52005-06-03 11:05:39 -0400188struct tree *lookup_tree(const unsigned char *sha1)
Daniel Barkalow175785e2005-04-18 11:39:48 -0700189{
190 struct object *obj = lookup_object(sha1);
Linus Torvalds100c5f32007-04-16 22:11:43 -0700191 if (!obj)
Jeff Kingd36f51c2014-07-13 02:41:55 -0400192 return create_object(sha1, alloc_tree_node());
Jeff King8ff226a2014-07-13 02:42:03 -0400193 return object_as_type(obj, OBJ_TREE, 0);
Daniel Barkalow175785e2005-04-18 11:39:48 -0700194}
195
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400196int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
Daniel Barkalow175785e2005-04-18 11:39:48 -0700197{
Daniel Barkalow175785e2005-04-18 11:39:48 -0700198 if (item->object.parsed)
199 return 0;
200 item->object.parsed = 1;
Linus Torvalds136f2e52006-05-29 12:16:12 -0700201 item->buffer = buffer;
202 item->size = size;
203
Linus Torvalds2d9c58c2006-05-29 12:18:33 -0700204 return 0;
205}
Linus Torvalds136f2e52006-05-29 12:16:12 -0700206
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400207int parse_tree(struct tree *item)
208{
Nicolas Pitre21666f12007-02-26 14:55:59 -0500209 enum object_type type;
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400210 void *buffer;
211 unsigned long size;
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400212
213 if (item->object.parsed)
214 return 0;
Nicolas Pitre21666f12007-02-26 14:55:59 -0500215 buffer = read_sha1_file(item->object.sha1, &type, &size);
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400216 if (!buffer)
217 return error("Could not read %s",
218 sha1_to_hex(item->object.sha1));
Nicolas Pitre21666f12007-02-26 14:55:59 -0500219 if (type != OBJ_TREE) {
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400220 free(buffer);
221 return error("Object %s not a tree",
222 sha1_to_hex(item->object.sha1));
223 }
Linus Torvalds136f2e52006-05-29 12:16:12 -0700224 return parse_tree_buffer(item, buffer, size);
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400225}
Daniel Barkalow77675e22005-09-05 02:03:51 -0400226
Jeff King6e454b92013-06-05 18:37:39 -0400227void free_tree_buffer(struct tree *tree)
228{
229 free(tree->buffer);
230 tree->buffer = NULL;
231 tree->size = 0;
232 tree->object.parsed = 0;
233}
234
Daniel Barkalow77675e22005-09-05 02:03:51 -0400235struct tree *parse_tree_indirect(const unsigned char *sha1)
236{
237 struct object *obj = parse_object(sha1);
238 do {
239 if (!obj)
240 return NULL;
Linus Torvalds19746322006-07-11 20:45:31 -0700241 if (obj->type == OBJ_TREE)
Daniel Barkalow77675e22005-09-05 02:03:51 -0400242 return (struct tree *) obj;
Linus Torvalds19746322006-07-11 20:45:31 -0700243 else if (obj->type == OBJ_COMMIT)
Daniel Barkalow77675e22005-09-05 02:03:51 -0400244 obj = &(((struct commit *) obj)->tree->object);
Linus Torvalds19746322006-07-11 20:45:31 -0700245 else if (obj->type == OBJ_TAG)
Daniel Barkalow77675e22005-09-05 02:03:51 -0400246 obj = ((struct tag *) obj)->tagged;
247 else
248 return NULL;
249 if (!obj->parsed)
250 parse_object(obj->sha1);
251 } while (1);
252}