summaryrefslogtreecommitdiffhomepage
path: root/include/stc/cspan.h
diff options
context:
space:
mode:
authorTyge Løvset <[email protected]>2023-01-31 12:53:46 +0100
committerTyge Løvset <[email protected]>2023-01-31 12:53:46 +0100
commit5bbcae2a3add163ea3b7a91d65fda6836c18f410 (patch)
tree304ab8ca8f632f56e53ee2bc568fb834da91b13c /include/stc/cspan.h
parent209bf743e0c1253a4bc81d2ffb6897f657a84c8a (diff)
downloadSTC-modified-5bbcae2a3add163ea3b7a91d65fda6836c18f410.tar.gz
STC-modified-5bbcae2a3add163ea3b7a91d65fda6836c18f410.zip
Updates, and prepare for the big unsigned ==> signed transformation.
Diffstat (limited to 'include/stc/cspan.h')
-rw-r--r--include/stc/cspan.h110
1 files changed, 63 insertions, 47 deletions
diff --git a/include/stc/cspan.h b/include/stc/cspan.h
index 7886c9f6..a5f3fdc7 100644
--- a/include/stc/cspan.h
+++ b/include/stc/cspan.h
@@ -32,8 +32,8 @@ int demo1() {
float raw[4*5];
Span2f ms = cspan_md(raw, 4, 5);
- for (size_t i=0; i<ms.dim[0]; i++)
- for (size_t j=0; j<ms.dim[1]; j++)
+ for (int i=0; i<ms.dim[0]; i++)
+ for (int j=0; j<ms.dim[1]; j++)
*cspan_at(&ms, i, j) = i*1000 + j;
printf("%f\n", *cspan_at(&ms, 3, 4));
@@ -60,17 +60,19 @@ int demo2() {
#include "ccommon.h"
-#define using_cspan(Self, T, RANK) \
+#define using_cspan(...) c_MACRO_OVERLOAD(using_cspan, __VA_ARGS__)
+#define using_cspan_2(Self, T) using_cspan_3(Self, T, 1)
+#define using_cspan_3(Self, T, RANK) \
typedef T Self##_value; typedef T Self##_raw; \
typedef struct { \
Self##_value *data; \
- uint32_t dim[RANK]; \
+ int32_t dim[RANK]; \
cspan_idx##RANK stride; \
} Self; \
- typedef struct { Self##_value *ref; uint32_t pos[RANK]; const Self *_s; } Self##_iter; \
+ typedef struct { Self##_value *ref; int32_t pos[RANK]; const Self *_s; } Self##_iter; \
\
- STC_INLINE Self Self##_from_n(Self##_raw* raw, const size_t n) { \
- return (Self){.data=raw, .dim={(uint32_t)n}}; \
+ STC_INLINE Self Self##_from_n(Self##_raw* raw, const intptr_t n) { \
+ return (Self){.data=raw, .dim={(int32_t)n}}; \
} \
STC_INLINE Self##_iter Self##_begin(const Self* self) { \
Self##_iter it = {.ref=self->data, .pos={0}, ._s=self}; \
@@ -86,13 +88,15 @@ int demo2() {
} \
struct stc_nostruct
-#define using_cspan2(Self, T) using_cspan(Self, T, 1); using_cspan(Self##2, T, 2)
-#define using_cspan3(Self, T) using_cspan2(Self, T); using_cspan(Self##3, T, 3)
-#define using_cspan4(Self, T) using_cspan3(Self, T); using_cspan(Self##4, T, 4)
-typedef struct { uint32_t d[1]; } cspan_idx1;
-typedef struct { uint32_t d[2]; } cspan_idx2;
-typedef struct { uint32_t d[3]; } cspan_idx3;
-typedef struct { uint32_t d[4]; } cspan_idx4;
+#define using_cspan2(Self, T) using_cspan_3(Self, T, 1); using_cspan_3(Self##2, T, 2)
+#define using_cspan3(Self, T) using_cspan2(Self, T); using_cspan_3(Self##3, T, 3)
+#define using_cspan4(Self, T) using_cspan3(Self, T); using_cspan_3(Self##4, T, 4)
+#define using_cspan5(Self, T) using_cspan4(Self, T); using_cspan_4(Self##4, T, 5)
+typedef struct { int32_t d[1]; } cspan_idx1;
+typedef struct { int32_t d[2]; } cspan_idx2;
+typedef struct { int32_t d[3]; } cspan_idx3;
+typedef struct { int32_t d[4]; } cspan_idx4;
+typedef struct { int32_t d[5]; } cspan_idx5;
#define cspan_md(array, ...) \
{.data=array, .dim={__VA_ARGS__}, .stride={.d={__VA_ARGS__}}}
@@ -103,7 +107,7 @@ typedef struct { uint32_t d[4]; } cspan_idx4;
/* create a cspan from a cvec, cstack, cdeq, cqueue, or cpque (heap) */
#define cspan_from(container) \
- {.data=(container)->data, .dim={(uint32_t)(container)->_len}}
+ {.data=(container)->data, .dim={(int32_t)(container)->_len}}
#define cspan_from_array(array) \
{.data=(array) + c_static_assert(sizeof(array) != sizeof(void*)), .dim={c_ARRAYLEN(array)}}
@@ -117,20 +121,16 @@ typedef struct { uint32_t d[4]; } cspan_idx4;
#define cspan_front(self) ((self)->data)
#define cspan_back(self) ((self)->data + cspan_size(self) - 1)
-// cspan_subspanN:
+// cspan_subspanN. for N > 3, use cspan_slice(&ms4, {offset, offset + count}, {0}, {0}, {0});
#define cspan_subspan(self, offset, count) \
{.data=cspan_at(self, offset), .dim={count}}
#define cspan_subspan2(self, offset, count) \
{.data=cspan_at(self, offset, 0), .dim={count, (self)->dim[1]}, .stride={(self)->stride}}
#define cspan_subspan3(self, offset, count) \
- {.data=cspan_at(self, offset, 0, 0), .dim={count, (self)->dim[1], (self)->dim[2]}, \
- .stride={(self)->stride}}
-#define cspan_subspan4(self, offset, count) \
- {.data=cspan_at(self, offset, 0, 0, 0), .dim={count, (self)->dim[1], (self)->dim[2], (self)->dim[3]}, \
- .stride={(self)->stride}}
+ {.data=cspan_at(self, offset, 0, 0), .dim={count, (self)->dim[1], (self)->dim[2]}, .stride={(self)->stride}}
-// cspan_submdN:
+// cspan_submdN: return reduced rank
#define cspan_submd4(...) c_MACRO_OVERLOAD(cspan_submd4, __VA_ARGS__)
#define cspan_submd3(...) c_MACRO_OVERLOAD(cspan_submd3, __VA_ARGS__)
@@ -145,40 +145,55 @@ typedef struct { uint32_t d[4]; } cspan_idx4;
{.data=cspan_at(self, x, 0, 0, 0), .dim={(self)->dim[1], (self)->dim[2], (self)->dim[3]}, \
.stride={.d={0, (self)->stride.d[2], (self)->stride.d[3]}}}
#define cspan_submd4_3(self, x, y) \
- {.data=cspan_at(self, x, y, 0, 0), .dim={(self)->dim[2], (self)->dim[3]}, \
- .stride={.d={0, (self)->stride.d[3]}}}
+ {.data=cspan_at(self, x, y, 0, 0), .dim={(self)->dim[2], (self)->dim[3]}, .stride={.d={0, (self)->stride.d[3]}}}
#define cspan_submd4_4(self, x, y, z) \
{.data=cspan_at(self, x, y, z, 0), .dim={(self)->dim[3]}}
+#define cspan_submd5_2(self, x) \
+ {.data=cspan_at(self, x, 0, 0, 0, 0), .dim={(self)->dim[1], (self)->dim[2], (self)->dim[3], (self)->dim[4]}, \
+ .stride={.d={0, (self)->stride.d[2], (self)->stride.d[3], (self)->stride.d[4]}}}
+#define cspan_submd5_3(self, x, y) \
+ {.data=cspan_at(self, x, y, 0, 0, 0), .dim={(self)->dim[2], (self)->dim[3], (self)->dim[4]}, \
+ .stride={.d={0, (self)->stride.d[3], (self)->stride.d[4]}}}
+#define cspan_submd5_4(self, x, y, z) \
+ {.data=cspan_at(self, x, y, z, 0, 0), .dim={(self)->dim[3], (self)->dim[4]}, .stride={.d={0, (self)->stride.d[4]}}}
+#define cspan_submd5_5(self, x, y, z, w) \
+ {.data=cspan_at(self, x, y, z, w, 0), .dim={(self)->dim[4]}}
+
// cspan_slice:
// e.g.: cspan_slice(&ms3, {1,3}, {0}, {1,4});
#define cspan_slice(self, ...) \
((void)((self)->data += _cspan_slice(cspan_rank(self), (self)->dim, (self)->stride.d, \
- (const uint32_t[][2]){__VA_ARGS__}) + \
+ (const int32_t[][2]){__VA_ARGS__}) + \
c_static_assert(cspan_rank(self) == \
- sizeof((const uint32_t[][2]){__VA_ARGS__})/8)))
+ sizeof((const int32_t[][2]){__VA_ARGS__})/8)))
// FUNCTIONS
-STC_INLINE size_t _cspan_i1(const uint32_t dim[1], const cspan_idx1 stri, uint32_t x)
- { c_ASSERT(x < dim[0]); return x; }
+STC_INLINE intptr_t _cspan_i1(const int32_t dim[1], const cspan_idx1 stri, int32_t x)
+ { c_ASSERT(c_LTu(x, dim[0])); return x; }
-STC_INLINE size_t _cspan_i2(const uint32_t dim[2], const cspan_idx2 stri, uint32_t x, uint32_t y)
- { c_ASSERT(x < dim[0] && y < dim[1]); return stri.d[1]*x + y; }
+STC_INLINE intptr_t _cspan_i2(const int32_t dim[2], const cspan_idx2 stri, int32_t x, int32_t y)
+ { c_ASSERT(c_LTu(x, dim[0]) && c_LTu(y, dim[1])); return (intptr_t)stri.d[1]*x + y; }
-STC_INLINE size_t _cspan_i3(const uint32_t dim[3], const cspan_idx3 stri, uint32_t x, uint32_t y, uint32_t z) {
- c_ASSERT(x < dim[0] && y < dim[1] && z < dim[2]);
- return stri.d[2]*(stri.d[1]*x + y) + z;
+STC_INLINE intptr_t _cspan_i3(const int32_t dim[3], const cspan_idx3 stri, int32_t x, int32_t y, int32_t z) {
+ c_ASSERT(c_LTu(x, dim[0]) && c_LTu(y, dim[1]) && c_LTu(z, dim[2]));
+ return (intptr_t)stri.d[2]*(stri.d[1]*x + y) + z;
+}
+STC_INLINE intptr_t _cspan_i4(const int32_t dim[4], const cspan_idx4 stri, int32_t x, int32_t y,
+ int32_t z, int32_t w) {
+ c_ASSERT(c_LTu(x, dim[0]) && c_LTu(y, dim[1]) && c_LTu(z, dim[2]) && c_LTu(w, dim[3]));
+ return (intptr_t)stri.d[3]*(stri.d[2]*(stri.d[1]*x + y) + z) + w;
}
-STC_INLINE size_t _cspan_i4(const uint32_t dim[4], const cspan_idx4 stri, uint32_t x, uint32_t y, \
- uint32_t z, uint32_t w) {
- c_ASSERT(x < dim[0] && y < dim[1] && z < dim[2] && w < dim[3]);
- return stri.d[3]*(stri.d[2]*(stri.d[1]*x + y) + z) + w;
+STC_INLINE intptr_t _cspan_i5(const int32_t dim[4], const cspan_idx4 stri, int32_t x, int32_t y, int32_t z,
+ int32_t w, int32_t v) {
+ c_ASSERT(c_LTu(x, dim[0]) && c_LTu(y, dim[1]) && c_LTu(z, dim[2]) && c_LTu(w, dim[3]) && c_LTu(v, dim[4]));
+ return (intptr_t)stri.d[4]*(stri.d[3]*(stri.d[2]*(stri.d[1]*x + y) + z) + w) + v;
}
-STC_INLINE size_t _cspan_size(const uint32_t dim[], unsigned rank) {
- size_t sz = dim[0];
+STC_INLINE intptr_t _cspan_size(const int32_t dim[], int rank) {
+ intptr_t sz = dim[0];
while (rank-- > 1) sz *= dim[rank];
return sz;
}
@@ -186,30 +201,31 @@ STC_INLINE size_t _cspan_size(const uint32_t dim[], unsigned rank) {
#define _cspan_next_1(r, pos, d, s) (++pos[0], 1)
#define _cspan_next_3 _cspan_next_2
#define _cspan_next_4 _cspan_next_2
+#define _cspan_next_5 _cspan_next_2
-static inline size_t _cspan_next_2(int rank, uint32_t pos[], const uint32_t dim[], const uint32_t stride[]) {
- size_t off = 1, rs = 1;
+STC_INLINE intptr_t _cspan_next_2(int rank, int32_t pos[], const int32_t dim[], const int32_t stride[]) {
+ intptr_t off = 1, rs = 1;
++pos[rank - 1];
while (--rank && pos[rank] == dim[rank]) {
pos[rank] = 0, ++pos[rank - 1];
- const size_t ds = rs*dim[rank];
+ const intptr_t ds = rs*dim[rank];
rs *= stride[rank];
off += rs - ds;
}
return off;
}
-STC_INLINE size_t _cspan_slice(int rank, uint32_t dim[], const uint32_t stri[], const uint32_t a[][2]) {
- uint32_t t = a[0][1] ? a[0][1] : dim[0];
- c_ASSERT(t <= dim[0]);
+STC_INLINE intptr_t _cspan_slice(int rank, int32_t dim[], const int32_t stri[], const int32_t a[][2]) {
+ int32_t t = a[0][1] ? a[0][1] : dim[0];
+ c_ASSERT(!c_LTu(dim[0], t));
dim[0] = t - a[0][0];
- size_t off = a[0][0];
+ intptr_t off = a[0][0];
for (int i = 1; i < rank; ++i) {
off *= stri[i];
off += a[i][0];
t = a[i][1] ? a[i][1] : dim[i];
- c_ASSERT(t <= dim[i]);
+ c_ASSERT(!c_LTu(dim[i], t));
dim[i] = t - a[i][0];
}
return off;