diff options
| author | Tyge Løvset <[email protected]> | 2022-04-29 17:18:46 +0200 |
|---|---|---|
| committer | Tyge Løvset <[email protected]> | 2022-04-29 17:18:46 +0200 |
| commit | f46f405d4c28de4e087bc275edee1409e6596000 (patch) | |
| tree | daa100a5a5ba7cd47437b62ff84363b8c2ce274e /include | |
| parent | 93ad88cf99299c0bf9c73c0854b511e8df8e0268 (diff) | |
| download | STC-modified-f46f405d4c28de4e087bc275edee1409e6596000.tar.gz STC-modified-f46f405d4c28de4e087bc275edee1409e6596000.zip | |
cmap/csmap cleanup incl. docs.
Diffstat (limited to 'include')
| -rw-r--r-- | include/stc/cmap.h | 47 | ||||
| -rw-r--r-- | include/stc/csmap.h | 99 | ||||
| -rw-r--r-- | include/stc/template.h | 1 |
3 files changed, 72 insertions, 75 deletions
diff --git a/include/stc/cmap.h b/include/stc/cmap.h index 5f1ae3f2..259dea22 100644 --- a/include/stc/cmap.h +++ b/include/stc/cmap.h @@ -183,7 +183,7 @@ _cx_memb(_push)(_cx_self* self, _cx_value _val) { STC_INLINE _cx_iter
_cx_memb(_find)(const _cx_self* self, i_keyraw rkey) {
- _cx_size idx;
+ i_size idx;
if (!(self->size && self->_hashx[idx = _cx_memb(_bucket_)(self, &rkey).idx]))
idx = self->bucket_count;
return c_make(_cx_iter){self->table+idx, self->_hashx+idx};
@@ -191,7 +191,7 @@ _cx_memb(_find)(const _cx_self* self, i_keyraw rkey) { STC_INLINE const _cx_value*
_cx_memb(_get)(const _cx_self* self, i_keyraw rkey) {
- _cx_size idx;
+ i_size idx;
return self->size && self->_hashx[idx = _cx_memb(_bucket_)(self, &rkey).idx] ?
self->table + idx : NULL;
}
@@ -295,7 +295,7 @@ STC_DEF void _cx_memb(_clear)(_cx_self* self) { STC_DEF chash_bucket_t
_cx_memb(_bucket_)(const _cx_self* self, const _cx_rawkey* rkeyptr) {
const uint64_t _hash = i_hash(rkeyptr);
- uint8_t _hx; _cx_size _cap = self->bucket_count;
+ uint8_t _hx; i_size _cap = self->bucket_count;
chash_bucket_t b = {c_paste(fastrange_,i_size)(_hash, _cap), (uint8_t)(_hash | 0x80)};
const uint8_t* _hashx = self->_hashx;
while ((_hx = _hashx[b.idx])) {
@@ -311,7 +311,7 @@ _cx_memb(_bucket_)(const _cx_self* self, const _cx_rawkey* rkeyptr) { STC_DEF _cx_result
_cx_memb(_insert_entry_)(_cx_self* self, i_keyraw rkey) {
- if (self->size + 1 >= (_cx_size) (self->bucket_count * self->max_load_factor))
+ if (self->size + 1 >= (i_size)(self->bucket_count*self->max_load_factor))
_cx_memb(_reserve)(self, ((size_t)self->size*3 >> 1) + 4);
chash_bucket_t b = _cx_memb(_bucket_)(self, &rkey);
_cx_result res = {&self->table[b.idx], !self->_hashx[b.idx]};
@@ -325,36 +325,27 @@ _cx_memb(_insert_entry_)(_cx_self* self, i_keyraw rkey) { #if !defined _i_no_clone
STC_DEF _cx_self
_cx_memb(_clone)(_cx_self m) {
- _cx_self clone = {
- c_alloc_n(_cx_value, m.bucket_count),
- (uint8_t *) memcpy(c_malloc(m.bucket_count + 1), m._hashx, m.bucket_count + 1),
- m.size, m.bucket_count,
- m.max_load_factor
- };
- _cx_value *e = m.table, *end = e + m.bucket_count, *dst = clone.table;
- for (uint8_t *hx = m._hashx; e != end; ++hx, ++e, ++dst)
- if (*hx) *dst = _cx_memb(_value_clone)(*e);
- return clone;
-}
-
-STC_DEF _cx_self
-_cx_memb(_clone2)(_cx_self m) {
- _cx_self clone = _cx_memb(_with_capacity)(m.size);
- c_foreach (i, _cx_self, m)
- _cx_memb(_push)(&clone, _cx_memb(_value_clone)(*i.ref));
- return clone;
+ _cx_value *t = c_alloc_n(_cx_value, m.bucket_count), *dst = t, *m_end = m.table + m.bucket_count;
+ uint8_t *h = (uint8_t *)memcpy(c_malloc(m.bucket_count + 1), m._hashx, m.bucket_count + 1);
+ if (!(t && h))
+ { c_free(t), c_free(h), t = 0, h = 0, m.bucket_count = 0; }
+ else
+ for (; m.table != m_end; ++m.table, ++m._hashx, ++dst)
+ if (*m._hashx) *dst = _cx_memb(_value_clone)(*m.table);
+ m.table = t, m._hashx = h;
+ return m;
}
#endif
STC_DEF bool
_cx_memb(_reserve)(_cx_self* self, const size_t _newcap) {
- if (_newcap < self->size) return true;
- const _cx_size _oldbuckets = self->bucket_count;
- const _cx_size _nbuckets = ((_cx_size)(_newcap/self->max_load_factor) + 2) | 1;
+ const i_size _oldbuckets = self->bucket_count;
+ const i_size _nbuckets = ((i_size)(_newcap/self->max_load_factor) + 2) | 1;
+ if (_newcap != self->size && _newcap <= _oldbuckets) return true;
_cx_self _tmp = {
c_alloc_n(_cx_value, _nbuckets),
(uint8_t *) c_calloc(_nbuckets + 1, sizeof(uint8_t)),
- self->size, (_cx_size) _nbuckets,
+ self->size, (i_size) _nbuckets,
self->max_load_factor
};
bool ret; /* Rehash: */
@@ -377,8 +368,8 @@ _cx_memb(_reserve)(_cx_self* self, const size_t _newcap) { STC_DEF void
_cx_memb(_erase_entry)(_cx_self* self, _cx_value* _val) {
- _cx_size i = _val - self->table, j = i, k;
- const _cx_size _cap = self->bucket_count;
+ i_size i = _val - self->table, j = i, k;
+ const i_size _cap = self->bucket_count;
_cx_value* _slot = self->table;
uint8_t* _hashx = self->_hashx;
_cx_memb(_value_drop)(_val);
diff --git a/include/stc/csmap.h b/include/stc/csmap.h index 009f39b3..75e7a5ee 100644 --- a/include/stc/csmap.h +++ b/include/stc/csmap.h @@ -83,7 +83,7 @@ _i_MAP_ONLY( struct _cx_value { _cx_mapped second;
}; )
struct _cx_node {
- _cx_size link[2];
+ i_size link[2];
int8_t level;
_cx_value value;
};
@@ -96,8 +96,6 @@ typedef _i_SET_ONLY( i_keyraw ) #if !defined _i_no_clone
STC_API _cx_self _cx_memb(_clone)(_cx_self tree);
-STC_API void _cx_memb(_copy)(_cx_self *self, _cx_self other);
-STC_API _cx_value _cx_memb(_value_clone)(_cx_value _val);
#if !defined _i_no_emplace
STC_API _cx_result _cx_memb(_emplace)(_cx_self* self, i_keyraw rkey _i_MAP_ONLY(, i_valraw rmapped));
#endif // !_i_no_emplace
@@ -157,6 +155,28 @@ _cx_memb(_value_drop)(_cx_value* val) { _i_MAP_ONLY( i_valdrop((&val->second)); )
}
+#if !defined _i_no_clone
+STC_INLINE _cx_value
+_cx_memb(_value_clone)(_cx_value _val) {
+ *_i_keyref(&_val) = i_keyclone((*_i_keyref(&_val)));
+ _i_MAP_ONLY( _val.second = i_valclone(_val.second); )
+ return _val;
+}
+
+STC_INLINE void
+_cx_memb(_copy)(_cx_self *self, _cx_self other) {
+ if (self->nodes == other.nodes) return;
+ _cx_memb(_drop)(self);
+ *self = _cx_memb(_clone)(other);
+}
+
+STC_INLINE void
+_cx_memb(_shrink_to_fit)(_cx_self *self) {
+ _cx_self tmp = _cx_memb(_clone)(*self);
+ _cx_memb(_drop)(self); *self = tmp;
+}
+#endif // !_i_no_clone
+
#ifndef _i_isset
#if !defined _i_no_clone && !defined _i_no_emplace
STC_API _cx_result _cx_memb(_emplace_or_assign)(_cx_self* self, i_keyraw rkey, i_valraw rmapped);
@@ -181,7 +201,7 @@ _cx_memb(_find)(const _cx_self* self, i_keyraw rkey) { STC_INLINE _cx_iter
_cx_memb(_begin)(const _cx_self* self) {
_cx_iter it; it._d = self->nodes, it._top = 0;
- it._tn = (_cx_size) _csmap_rep(self)->root;
+ it._tn = (i_size) _csmap_rep(self)->root;
if (it._tn) _cx_memb(_next)(&it);
return it;
}
@@ -214,7 +234,7 @@ _cx_memb(_init)(void) { STC_DEF _cx_value*
_cx_memb(_front)(const _cx_self* self) {
_cx_node *d = self->nodes;
- _cx_size tn = (_cx_size) _csmap_rep(self)->root;
+ i_size tn = (i_size) _csmap_rep(self)->root;
while (d[tn].link[0]) tn = d[tn].link[0];
return &d[tn].value;
}
@@ -222,7 +242,7 @@ _cx_memb(_front)(const _cx_self* self) { STC_DEF _cx_value*
_cx_memb(_back)(const _cx_self* self) {
_cx_node *d = self->nodes;
- _cx_size tn = (_cx_size) _csmap_rep(self)->root;
+ i_size tn = (i_size) _csmap_rep(self)->root;
while (d[tn].link[1]) tn = d[tn].link[1];
return &d[tn].value;
}
@@ -244,7 +264,7 @@ _cx_memb(_reserve)(_cx_self* self, const size_t cap) { return true;
}
-static _cx_size
+static i_size
_cx_memb(_node_new_)(_cx_self* self, int level) {
size_t tn; struct csmap_rep *rep = _csmap_rep(self);
if (rep->disp) {
@@ -256,7 +276,7 @@ _cx_memb(_node_new_)(_cx_self* self, int level) { }
_cx_node* dn = &self->nodes[tn];
dn->link[0] = dn->link[1] = 0; dn->level = level;
- return (_cx_size) tn;
+ return (i_size) tn;
}
static _cx_result _cx_memb(_insert_entry_)(_cx_self* self, i_keyraw rkey);
@@ -298,7 +318,7 @@ _cx_memb(_push)(_cx_self* self, _cx_value _val) { STC_DEF _cx_value*
_cx_memb(_find_it)(const _cx_self* self, i_keyraw rkey, _cx_iter* out) {
- _cx_size tn = _csmap_rep(self)->root;
+ i_size tn = _csmap_rep(self)->root;
_cx_node *d = out->_d = self->nodes;
out->_top = 0;
while (tn) {
@@ -318,7 +338,7 @@ _cx_memb(_lower_bound)(const _cx_self* self, i_keyraw rkey) { _cx_iter it;
_cx_memb(_find_it)(self, rkey, &it);
if (!it.ref && it._top) {
- _cx_size tn = it._st[--it._top];
+ i_size tn = it._st[--it._top];
it._tn = it._d[tn].link[1];
it.ref = &it._d[tn].value;
}
@@ -327,7 +347,7 @@ _cx_memb(_lower_bound)(const _cx_self* self, i_keyraw rkey) { STC_DEF void
_cx_memb(_next)(_cx_iter *it) {
- _cx_size tn = it->_tn;
+ i_size tn = it->_tn;
if (it->_top || tn) {
while (tn) {
it->_st[it->_top++] = tn;
@@ -340,10 +360,10 @@ _cx_memb(_next)(_cx_iter *it) { it->ref = NULL;
}
-STC_DEF _cx_size
-_cx_memb(_skew_)(_cx_node *d, _cx_size tn) {
+STC_DEF i_size
+_cx_memb(_skew_)(_cx_node *d, i_size tn) {
if (tn && d[d[tn].link[0]].level == d[tn].level) {
- _cx_size tmp = d[tn].link[0];
+ i_size tmp = d[tn].link[0];
d[tn].link[0] = d[tmp].link[1];
d[tmp].link[1] = tn;
tn = tmp;
@@ -351,10 +371,10 @@ _cx_memb(_skew_)(_cx_node *d, _cx_size tn) { return tn;
}
-STC_DEF _cx_size
-_cx_memb(_split_)(_cx_node *d, _cx_size tn) {
+STC_DEF i_size
+_cx_memb(_split_)(_cx_node *d, i_size tn) {
if (d[d[d[tn].link[1]].link[1]].level == d[tn].level) {
- _cx_size tmp = d[tn].link[1];
+ i_size tmp = d[tn].link[1];
d[tn].link[1] = d[tmp].link[0];
d[tmp].link[0] = tn;
tn = tmp;
@@ -363,9 +383,9 @@ _cx_memb(_split_)(_cx_node *d, _cx_size tn) { return tn;
}
-static _cx_size
-_cx_memb(_insert_entry_i_)(_cx_self* self, _cx_size tn, const _cx_rawkey* rkey, _cx_result* res) {
- _cx_size up[64], tx = tn;
+static i_size
+_cx_memb(_insert_entry_i_)(_cx_self* self, i_size tn, const _cx_rawkey* rkey, _cx_result* res) {
+ i_size up[64], tx = tn;
_cx_node* d = self->nodes;
int c, top = 0, dir = 0;
while (tx) {
@@ -391,18 +411,18 @@ _cx_memb(_insert_entry_i_)(_cx_self* self, _cx_size tn, const _cx_rawkey* rkey, static _cx_result
_cx_memb(_insert_entry_)(_cx_self* self, i_keyraw rkey) {
_cx_result res = {NULL, false};
- _cx_size tn = _cx_memb(_insert_entry_i_)(self, (_cx_size) _csmap_rep(self)->root, &rkey, &res);
+ i_size tn = _cx_memb(_insert_entry_i_)(self, (i_size) _csmap_rep(self)->root, &rkey, &res);
_csmap_rep(self)->root = tn;
_csmap_rep(self)->size += res.inserted;
return res;
}
-static _cx_size
-_cx_memb(_erase_r_)(_cx_node *d, _cx_size tn, const _cx_rawkey* rkey, int *erased) {
+static i_size
+_cx_memb(_erase_r_)(_cx_node *d, i_size tn, const _cx_rawkey* rkey, int *erased) {
if (tn == 0)
return 0;
i_keyraw raw = i_keyto(_i_keyref(&d[tn].value));
- _cx_size tx; int c = i_cmp((&raw), rkey);
+ i_size tx; int c = i_cmp((&raw), rkey);
if (c != 0)
d[tn].link[c < 0] = _cx_memb(_erase_r_)(d, d[tn].link[c < 0], rkey, erased);
else {
@@ -420,7 +440,7 @@ _cx_memb(_erase_r_)(_cx_node *d, _cx_size tn, const _cx_rawkey* rkey, int *erase tn = d[tn].link[ d[tn].link[0] == 0 ];
/* move it to disposed nodes list */
struct csmap_rep *rep = c_container_of(d, struct csmap_rep, nodes);
- d[tx].link[1] = (_cx_size) rep->disp;
+ d[tx].link[1] = (i_size) rep->disp;
rep->disp = tx;
}
}
@@ -440,7 +460,7 @@ _cx_memb(_erase_r_)(_cx_node *d, _cx_size tn, const _cx_rawkey* rkey, int *erase STC_DEF int
_cx_memb(_erase)(_cx_self* self, i_keyraw rkey) {
int erased = 0;
- _cx_size root = _cx_memb(_erase_r_)(self->nodes, (_cx_size) _csmap_rep(self)->root, &rkey, &erased);
+ i_size root = _cx_memb(_erase_r_)(self->nodes, (i_size) _csmap_rep(self)->root, &rkey, &erased);
return erased ? (_csmap_rep(self)->root = root, --_csmap_rep(self)->size, 1) : 0;
}
@@ -469,24 +489,10 @@ _cx_memb(_erase_range)(_cx_self* self, _cx_iter it1, _cx_iter it2) { }
#if !defined _i_no_clone
-STC_DEF void
-_cx_memb(_copy)(_cx_self *self, _cx_self other) {
- if (self->nodes == other.nodes) return;
- _cx_memb(_drop)(self); *self = _cx_memb(_clone)(other);
-}
-
-STC_DEF _cx_value
-_cx_memb(_value_clone)(_cx_value _val) {
- *_i_keyref(&_val) = i_keyclone((*_i_keyref(&_val)));
- _i_MAP_ONLY( _val.second = i_valclone(_val.second); )
- return _val;
-}
-
-
-static _cx_size
-_cx_memb(_clone_r_)(_cx_self* self, _cx_node* src, _cx_size sn) {
+static i_size
+_cx_memb(_clone_r_)(_cx_self* self, _cx_node* src, i_size sn) {
if (sn == 0) return 0;
- _cx_size tx, tn = _cx_memb(_node_new_)(self, src[sn].level);
+ i_size tx, tn = _cx_memb(_node_new_)(self, src[sn].level);
self->nodes[tn].value = _cx_memb(_value_clone)(src[sn].value);
tx = _cx_memb(_clone_r_)(self, src, src[sn].link[0]); self->nodes[tn].link[0] = tx;
tx = _cx_memb(_clone_r_)(self, src, src[sn].link[1]); self->nodes[tn].link[1] = tx;
@@ -496,11 +502,12 @@ _cx_memb(_clone_r_)(_cx_self* self, _cx_node* src, _cx_size sn) { STC_DEF _cx_self
_cx_memb(_clone)(_cx_self tree) {
_cx_self clone = _cx_memb(_with_capacity)(_csmap_rep(&tree)->size);
- _cx_size root = _cx_memb(_clone_r_)(&clone, tree.nodes, (_cx_size) _csmap_rep(&tree)->root);
+ i_size root = _cx_memb(_clone_r_)(&clone, tree.nodes, (i_size) _csmap_rep(&tree)->root);
_csmap_rep(&clone)->root = root;
_csmap_rep(&clone)->size = _csmap_rep(&tree)->size;
return clone;
}
+
#if !defined _i_no_emplace
STC_DEF _cx_result
_cx_memb(_emplace)(_cx_self* self, i_keyraw rkey _i_MAP_ONLY(, i_valraw rmapped)) {
@@ -515,7 +522,7 @@ _cx_memb(_emplace)(_cx_self* self, i_keyraw rkey _i_MAP_ONLY(, i_valraw rmapped) #endif // !_i_no_clone
static void
-_cx_memb(_drop_r_)(_cx_node* d, _cx_size tn) {
+_cx_memb(_drop_r_)(_cx_node* d, i_size tn) {
if (tn) {
_cx_memb(_drop_r_)(d, d[tn].link[0]);
_cx_memb(_drop_r_)(d, d[tn].link[1]);
@@ -528,7 +535,7 @@ _cx_memb(_drop)(_cx_self* self) { struct csmap_rep* rep = _csmap_rep(self);
// second test is bogus, but supresses gcc warning:
if (rep->cap && rep != &_csmap_sentinel) {
- _cx_memb(_drop_r_)(self->nodes, (_cx_size) rep->root);
+ _cx_memb(_drop_r_)(self->nodes, (i_size) rep->root);
c_free(rep); // correct, but may give warning
}
}
diff --git a/include/stc/template.h b/include/stc/template.h index a0cec3a8..1c38616e 100644 --- a/include/stc/template.h +++ b/include/stc/template.h @@ -37,7 +37,6 @@ #define _cx_iter _cx_memb(_iter)
#define _cx_result _cx_memb(_result)
#define _cx_node _cx_memb(_node)
- #define _cx_size _cx_memb(_size_t)
#endif
#ifdef i_type
|
