blob: bc7e99020d96eee95b099e3f276aee9ca852c57b [file] [log] [blame]
Brandon Williams85ab50f2017-06-12 15:13:57 -07001#define NO_THE_INDEX_COMPATIBILITY_MACROS
Junio C Hamano8f1d2e62006-01-07 01:33:54 -08002#include "cache.h"
Junio C Hamanoaf3785d2007-08-09 13:42:50 -07003#include "cache-tree.h"
Daniel Barkalow175785e2005-04-18 11:39:48 -07004#include "tree.h"
Stefan Bellercbd53a22018-05-15 16:42:15 -07005#include "object-store.h"
Daniel Barkalow175785e2005-04-18 11:39:48 -07006#include "blob.h"
Daniel Barkalow77675e22005-09-05 02:03:51 -04007#include "commit.h"
8#include "tag.h"
Stefan Beller14ba97f2018-05-15 14:48:42 -07009#include "alloc.h"
Linus Torvalds136f2e52006-05-29 12:16:12 -070010#include "tree-walk.h"
Daniel Barkalow175785e2005-04-18 11:39:48 -070011
12const char *tree_type = "tree";
13
Brandon Williams85ab50f2017-06-12 15:13:57 -070014static int read_one_entry_opt(struct index_state *istate,
brian m. carlsondf46d772018-03-12 02:27:26 +000015 const struct object_id *oid,
Brandon Williams85ab50f2017-06-12 15:13:57 -070016 const char *base, int baselen,
17 const char *pathname,
18 unsigned mode, int stage, int opt)
Linus Torvalds94537c72005-04-22 16:42:37 -070019{
Linus Torvalds3c5e8462005-11-26 09:38:20 -080020 int len;
21 unsigned int size;
22 struct cache_entry *ce;
23
24 if (S_ISDIR(mode))
25 return READ_TREE_RECURSIVE;
26
27 len = strlen(pathname);
28 size = cache_entry_size(baselen + len);
Peter Eriksen90321c12006-04-03 19:30:46 +010029 ce = xcalloc(1, size);
Linus Torvalds94537c72005-04-22 16:42:37 -070030
31 ce->ce_mode = create_ce_mode(mode);
Thomas Gummererb60e1882012-07-11 11:22:37 +020032 ce->ce_flags = create_ce_flags(stage);
33 ce->ce_namelen = baselen + len;
Linus Torvalds94537c72005-04-22 16:42:37 -070034 memcpy(ce->name, base, baselen);
35 memcpy(ce->name + baselen, pathname, len+1);
brian m. carlsondf46d772018-03-12 02:27:26 +000036 oidcpy(&ce->oid, oid);
Brandon Williams85ab50f2017-06-12 15:13:57 -070037 return add_index_entry(istate, ce, opt);
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070038}
39
brian m. carlsondf46d772018-03-12 02:27:26 +000040static int read_one_entry(const struct object_id *oid, struct strbuf *base,
Nguyễn Thái Ngọc Duy6a0b0b62014-11-30 16:05:00 +070041 const char *pathname, unsigned mode, int stage,
42 void *context)
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070043{
Brandon Williams85ab50f2017-06-12 15:13:57 -070044 struct index_state *istate = context;
brian m. carlsondf46d772018-03-12 02:27:26 +000045 return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
Nguyễn Thái Ngọc Duy6a0b0b62014-11-30 16:05:00 +070046 mode, stage,
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070047 ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
48}
49
50/*
51 * This is used when the caller knows there is no existing entries at
52 * the stage that will conflict with the entry being added.
53 */
brian m. carlsondf46d772018-03-12 02:27:26 +000054static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
Nguyễn Thái Ngọc Duy6a0b0b62014-11-30 16:05:00 +070055 const char *pathname, unsigned mode, int stage,
56 void *context)
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070057{
Brandon Williams85ab50f2017-06-12 15:13:57 -070058 struct index_state *istate = context;
brian m. carlsondf46d772018-03-12 02:27:26 +000059 return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
Nguyễn Thái Ngọc Duy6a0b0b62014-11-30 16:05:00 +070060 mode, stage,
Junio C Hamanoaf3785d2007-08-09 13:42:50 -070061 ADD_CACHE_JUST_APPEND);
Linus Torvalds94537c72005-04-22 16:42:37 -070062}
63
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070064static int read_tree_1(struct tree *tree, struct strbuf *base,
Nguyễn Thái Ngọc Duy18e4f402013-07-14 15:35:52 +070065 int stage, const struct pathspec *pathspec,
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070066 read_tree_fn_t fn, void *context)
Linus Torvalds0ca14a52005-07-14 11:26:31 -070067{
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070068 struct tree_desc desc;
69 struct name_entry entry;
brian m. carlsonf26efc52017-05-06 22:10:15 +000070 struct object_id oid;
Nguyễn Thái Ngọc Duyd688cf02011-10-24 17:36:10 +110071 int len, oldlen = base->len;
72 enum interesting retval = entry_not_interesting;
Linus Torvalds0ca14a52005-07-14 11:26:31 -070073
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070074 if (parse_tree(tree))
75 return -1;
Linus Torvalds0ca14a52005-07-14 11:26:31 -070076
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070077 init_tree_desc(&desc, tree->buffer, tree->size);
Linus Torvalds0ca14a52005-07-14 11:26:31 -070078
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070079 while (tree_entry(&desc, &entry)) {
Nguyễn Thái Ngọc Duyd688cf02011-10-24 17:36:10 +110080 if (retval != all_entries_interesting) {
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070081 retval = tree_entry_interesting(&entry, base, 0, pathspec);
Nguyễn Thái Ngọc Duyd688cf02011-10-24 17:36:10 +110082 if (retval == all_entries_not_interesting)
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070083 break;
Nguyễn Thái Ngọc Duyd688cf02011-10-24 17:36:10 +110084 if (retval == entry_not_interesting)
Linus Torvalds0ca14a52005-07-14 11:26:31 -070085 continue;
86 }
87
brian m. carlsondf46d772018-03-12 02:27:26 +000088 switch (fn(entry.oid, base,
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +070089 entry.path, entry.mode, stage, context)) {
90 case 0:
91 continue;
92 case READ_TREE_RECURSIVE:
93 break;
94 default:
95 return -1;
96 }
97
98 if (S_ISDIR(entry.mode))
brian m. carlsonf26efc52017-05-06 22:10:15 +000099 oidcpy(&oid, entry.oid);
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700100 else if (S_ISGITLINK(entry.mode)) {
101 struct commit *commit;
102
brian m. carlsonbc832662017-05-06 22:10:10 +0000103 commit = lookup_commit(entry.oid);
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700104 if (!commit)
105 die("Commit %s in submodule path %s%s not found",
brian m. carlson7d924c92016-04-17 23:10:39 +0000106 oid_to_hex(entry.oid),
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700107 base->buf, entry.path);
108
109 if (parse_commit(commit))
110 die("Invalid commit %s in submodule path %s%s",
brian m. carlson7d924c92016-04-17 23:10:39 +0000111 oid_to_hex(entry.oid),
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700112 base->buf, entry.path);
113
Derrick Stolee2e27bd72018-04-06 19:09:38 +0000114 oidcpy(&oid, get_commit_tree_oid(commit));
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700115 }
116 else
Linus Torvalds0ca14a52005-07-14 11:26:31 -0700117 continue;
Linus Torvalds3e587632005-07-14 11:39:27 -0700118
Nguyễn Thái Ngọc Duy0de16332011-10-24 17:36:09 +1100119 len = tree_entry_len(&entry);
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700120 strbuf_add(base, entry.path, len);
121 strbuf_addch(base, '/');
brian m. carlson740ee052017-05-06 22:10:17 +0000122 retval = read_tree_1(lookup_tree(&oid),
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700123 base, stage, pathspec,
124 fn, context);
125 strbuf_setlen(base, oldlen);
126 if (retval)
127 return -1;
Linus Torvalds0ca14a52005-07-14 11:26:31 -0700128 }
129 return 0;
130}
131
Daniel Barkalow521698b2006-01-26 01:13:36 -0500132int read_tree_recursive(struct tree *tree,
Linus Torvalds3c5e8462005-11-26 09:38:20 -0800133 const char *base, int baselen,
Nguyễn Thái Ngọc Duy18e4f402013-07-14 15:35:52 +0700134 int stage, const struct pathspec *pathspec,
René Scharfe671f0702008-07-14 21:22:12 +0200135 read_tree_fn_t fn, void *context)
Linus Torvalds94537c72005-04-22 16:42:37 -0700136{
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700137 struct strbuf sb = STRBUF_INIT;
Nguyễn Thái Ngọc Duyf0096c02011-03-25 16:34:19 +0700138 int ret;
Linus Torvalds0790a422006-05-29 12:17:28 -0700139
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700140 strbuf_add(&sb, base, baselen);
Nguyễn Thái Ngọc Duyf0096c02011-03-25 16:34:19 +0700141 ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700142 strbuf_release(&sb);
Nguyễn Thái Ngọc Duyffd31f62011-03-25 16:34:18 +0700143 return ret;
Linus Torvalds94537c72005-04-22 16:42:37 -0700144}
145
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700146static int cmp_cache_name_compare(const void *a_, const void *b_)
147{
148 const struct cache_entry *ce1, *ce2;
149
150 ce1 = *((const struct cache_entry **)a_);
151 ce2 = *((const struct cache_entry **)b_);
Thomas Gummererb60e1882012-07-11 11:22:37 +0200152 return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
153 ce2->name, ce2->ce_namelen, ce_stage(ce2));
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700154}
155
Brandon Williams85ab50f2017-06-12 15:13:57 -0700156int read_tree(struct tree *tree, int stage, struct pathspec *match,
157 struct index_state *istate)
Linus Torvalds94537c72005-04-22 16:42:37 -0700158{
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700159 read_tree_fn_t fn = NULL;
160 int i, err;
161
162 /*
163 * Currently the only existing callers of this function all
164 * call it with stage=1 and after making sure there is nothing
165 * at that stage; we could always use read_one_entry_quick().
166 *
167 * But when we decide to straighten out git-read-tree not to
168 * use unpack_trees() in some cases, this will probably start
169 * to matter.
170 */
171
172 /*
173 * See if we have cache entry at the stage. If so,
174 * do it the original slow way, otherwise, append and then
175 * sort at the end.
176 */
Brandon Williams85ab50f2017-06-12 15:13:57 -0700177 for (i = 0; !fn && i < istate->cache_nr; i++) {
178 const struct cache_entry *ce = istate->cache[i];
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700179 if (ce_stage(ce) == stage)
180 fn = read_one_entry;
181 }
182
183 if (!fn)
184 fn = read_one_entry_quick;
Brandon Williams85ab50f2017-06-12 15:13:57 -0700185 err = read_tree_recursive(tree, "", 0, stage, match, fn, istate);
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700186 if (fn == read_one_entry || err)
187 return err;
188
189 /*
190 * Sort the cache entry -- we need to nuke the cache tree, though.
191 */
Brandon Williams85ab50f2017-06-12 15:13:57 -0700192 cache_tree_free(&istate->cache_tree);
193 QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
Junio C Hamanoaf3785d2007-08-09 13:42:50 -0700194 return 0;
Linus Torvalds94537c72005-04-22 16:42:37 -0700195}
196
brian m. carlson740ee052017-05-06 22:10:17 +0000197struct tree *lookup_tree(const struct object_id *oid)
Daniel Barkalow175785e2005-04-18 11:39:48 -0700198{
brian m. carlson740ee052017-05-06 22:10:17 +0000199 struct object *obj = lookup_object(oid->hash);
Linus Torvalds100c5f32007-04-16 22:11:43 -0700200 if (!obj)
Stefan Beller68f95d32018-05-08 12:37:25 -0700201 return create_object(the_repository, oid->hash,
Stefan Bellercf7203b2018-05-08 12:37:28 -0700202 alloc_tree_node(the_repository));
Jeff King8ff226a2014-07-13 02:42:03 -0400203 return object_as_type(obj, OBJ_TREE, 0);
Daniel Barkalow175785e2005-04-18 11:39:48 -0700204}
205
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400206int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
Daniel Barkalow175785e2005-04-18 11:39:48 -0700207{
Daniel Barkalow175785e2005-04-18 11:39:48 -0700208 if (item->object.parsed)
209 return 0;
210 item->object.parsed = 1;
Linus Torvalds136f2e52006-05-29 12:16:12 -0700211 item->buffer = buffer;
212 item->size = size;
213
Linus Torvalds2d9c58c2006-05-29 12:18:33 -0700214 return 0;
215}
Linus Torvalds136f2e52006-05-29 12:16:12 -0700216
Jeff King9cc2b072015-06-01 05:56:26 -0400217int parse_tree_gently(struct tree *item, int quiet_on_missing)
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400218{
Nicolas Pitre21666f12007-02-26 14:55:59 -0500219 enum object_type type;
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400220 void *buffer;
221 unsigned long size;
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400222
223 if (item->object.parsed)
224 return 0;
brian m. carlsonb4f5aca2018-03-12 02:27:53 +0000225 buffer = read_object_file(&item->object.oid, &type, &size);
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400226 if (!buffer)
Jeff King9cc2b072015-06-01 05:56:26 -0400227 return quiet_on_missing ? -1 :
228 error("Could not read %s",
brian m. carlsonf2fd0762015-11-10 02:22:28 +0000229 oid_to_hex(&item->object.oid));
Nicolas Pitre21666f12007-02-26 14:55:59 -0500230 if (type != OBJ_TREE) {
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400231 free(buffer);
232 return error("Object %s not a tree",
brian m. carlsonf2fd0762015-11-10 02:22:28 +0000233 oid_to_hex(&item->object.oid));
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400234 }
Linus Torvalds136f2e52006-05-29 12:16:12 -0700235 return parse_tree_buffer(item, buffer, size);
Nicolas Pitrebd2c39f2005-05-06 13:48:34 -0400236}
Daniel Barkalow77675e22005-09-05 02:03:51 -0400237
Jeff King6e454b92013-06-05 18:37:39 -0400238void free_tree_buffer(struct tree *tree)
239{
Ævar Arnfjörð Bjarmason6a83d902017-06-15 23:15:46 +0000240 FREE_AND_NULL(tree->buffer);
Jeff King6e454b92013-06-05 18:37:39 -0400241 tree->size = 0;
242 tree->object.parsed = 0;
243}
244
brian m. carlsona9dbc172017-05-06 22:10:37 +0000245struct tree *parse_tree_indirect(const struct object_id *oid)
Daniel Barkalow77675e22005-09-05 02:03:51 -0400246{
brian m. carlsonc251c832017-05-06 22:10:38 +0000247 struct object *obj = parse_object(oid);
Daniel Barkalow77675e22005-09-05 02:03:51 -0400248 do {
249 if (!obj)
250 return NULL;
Linus Torvalds19746322006-07-11 20:45:31 -0700251 if (obj->type == OBJ_TREE)
Daniel Barkalow77675e22005-09-05 02:03:51 -0400252 return (struct tree *) obj;
Linus Torvalds19746322006-07-11 20:45:31 -0700253 else if (obj->type == OBJ_COMMIT)
Derrick Stolee2e27bd72018-04-06 19:09:38 +0000254 obj = &(get_commit_tree(((struct commit *)obj))->object);
Linus Torvalds19746322006-07-11 20:45:31 -0700255 else if (obj->type == OBJ_TAG)
Daniel Barkalow77675e22005-09-05 02:03:51 -0400256 obj = ((struct tag *) obj)->tagged;
257 else
258 return NULL;
259 if (!obj->parsed)
brian m. carlsonc251c832017-05-06 22:10:38 +0000260 parse_object(&obj->oid);
Daniel Barkalow77675e22005-09-05 02:03:51 -0400261 } while (1);
262}