blob: daef175dc71d6c7d55fc89ef1e286334afbe00b7 [file] [log] [blame]
Eric Wong92d8ed82021-07-07 23:10:19 +00001/*
2 * A wrapper around cbtree which stores oids
3 * May be used to replace oid-array for prefix (abbreviation) matches
4 */
Elijah Newren8bff5ca2023-02-24 00:09:20 +00005#include "git-compat-util.h"
Eric Wong92d8ed82021-07-07 23:10:19 +00006#include "oidtree.h"
Eric Wong92d8ed82021-07-07 23:10:19 +00007#include "hash.h"
8
Eric Wong92d8ed82021-07-07 23:10:19 +00009struct oidtree_iter_data {
10 oidtree_iter fn;
11 void *arg;
12 size_t *last_nibble_at;
13 int algo;
14 uint8_t last_byte;
15};
16
17void oidtree_init(struct oidtree *ot)
18{
19 cb_init(&ot->tree);
20 mem_pool_init(&ot->mem_pool, 0);
21}
22
23void oidtree_clear(struct oidtree *ot)
24{
25 if (ot) {
26 mem_pool_discard(&ot->mem_pool, 0);
27 oidtree_init(ot);
28 }
29}
30
31void oidtree_insert(struct oidtree *ot, const struct object_id *oid)
32{
Carlo Marcelo Arenas Belón14825942021-08-08 18:38:31 -070033 struct cb_node *on;
René Scharfe8bcda982021-08-14 22:00:38 +020034 struct object_id k;
Eric Wong92d8ed82021-07-07 23:10:19 +000035
36 if (!oid->algo)
37 BUG("oidtree_insert requires oid->algo");
38
39 on = mem_pool_alloc(&ot->mem_pool, sizeof(*on) + sizeof(*oid));
René Scharfe8bcda982021-08-14 22:00:38 +020040
41 /*
42 * Clear the padding and copy the result in separate steps to
43 * respect the 4-byte alignment needed by struct object_id.
44 */
45 oidcpy_with_padding(&k, oid);
46 memcpy(on->k, &k, sizeof(k));
Eric Wong92d8ed82021-07-07 23:10:19 +000047
48 /*
49 * n.b. Current callers won't get us duplicates, here. If a
50 * future caller causes duplicates, there'll be a a small leak
51 * that won't be freed until oidtree_clear. Currently it's not
52 * worth maintaining a free list
53 */
Carlo Marcelo Arenas Belón14825942021-08-08 18:38:31 -070054 cb_insert(&ot->tree, on, sizeof(*oid));
Eric Wong92d8ed82021-07-07 23:10:19 +000055}
56
57
58int oidtree_contains(struct oidtree *ot, const struct object_id *oid)
59{
60 struct object_id k;
61 size_t klen = sizeof(k);
62
63 oidcpy_with_padding(&k, oid);
64
65 if (oid->algo == GIT_HASH_UNKNOWN)
66 klen -= sizeof(oid->algo);
67
68 /* cb_lookup relies on memcmp on the struct, so order matters: */
69 klen += BUILD_ASSERT_OR_ZERO(offsetof(struct object_id, hash) <
70 offsetof(struct object_id, algo));
71
72 return cb_lookup(&ot->tree, (const uint8_t *)&k, klen) ? 1 : 0;
73}
74
75static enum cb_next iter(struct cb_node *n, void *arg)
76{
77 struct oidtree_iter_data *x = arg;
René Scharfe8bcda982021-08-14 22:00:38 +020078 struct object_id k;
Eric Wong92d8ed82021-07-07 23:10:19 +000079
René Scharfe8bcda982021-08-14 22:00:38 +020080 /* Copy to provide 4-byte alignment needed by struct object_id. */
81 memcpy(&k, n->k, sizeof(k));
82
83 if (x->algo != GIT_HASH_UNKNOWN && x->algo != k.algo)
Eric Wong92d8ed82021-07-07 23:10:19 +000084 return CB_CONTINUE;
85
86 if (x->last_nibble_at) {
René Scharfe8bcda982021-08-14 22:00:38 +020087 if ((k.hash[*x->last_nibble_at] ^ x->last_byte) & 0xf0)
Eric Wong92d8ed82021-07-07 23:10:19 +000088 return CB_CONTINUE;
89 }
90
René Scharfe8bcda982021-08-14 22:00:38 +020091 return x->fn(&k, x->arg);
Eric Wong92d8ed82021-07-07 23:10:19 +000092}
93
94void oidtree_each(struct oidtree *ot, const struct object_id *oid,
95 size_t oidhexsz, oidtree_iter fn, void *arg)
96{
97 size_t klen = oidhexsz / 2;
98 struct oidtree_iter_data x = { 0 };
99 assert(oidhexsz <= GIT_MAX_HEXSZ);
100
101 x.fn = fn;
102 x.arg = arg;
103 x.algo = oid->algo;
104 if (oidhexsz & 1) {
105 x.last_byte = oid->hash[klen];
106 x.last_nibble_at = &klen;
107 }
108 cb_each(&ot->tree, (const uint8_t *)oid, klen, iter, &x);
109}