testrig: Provide useful interface for more complicated test rigs.
[mLib] / bits.h
1 /* -*-c-*-
2 *
3 * $Id$
4 *
5 * Portable bit-level manipulation macros
6 *
7 * (c) 1998 Straylight/Edgeware
8 */
9
10 /*----- Licensing notice --------------------------------------------------*
11 *
12 * This file is part of the mLib utilities library.
13 *
14 * mLib is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU Library General Public License as
16 * published by the Free Software Foundation; either version 2 of the
17 * License, or (at your option) any later version.
18 *
19 * mLib is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU Library General Public License for more details.
23 *
24 * You should have received a copy of the GNU Library General Public
25 * License along with mLib; if not, write to the Free
26 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
27 * MA 02111-1307, USA.
28 */
29
30 #ifndef MLIB_BITS_H
31 #define MLIB_BITS_H
32
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36
37 /*----- Header files ------------------------------------------------------*/
38
39 #include <limits.h>
40 #include <stddef.h>
41 #if __STDC_VERSION__ >= 199900l
42 # include <stdint.h>
43 #endif
44
45 /*----- Decide on some types ----------------------------------------------*/
46
47 /* --- Make GNU C shut up --- */
48
49 #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 91)
50 # define MLIB_BITS_EXTENSION __extension__
51 #else
52 # define MLIB_BITS_EXTENSION
53 #endif
54
55 /* --- Decide on a 32-bit type --- *
56 *
57 * I want a type which is capable of expressing 32-bit numbers. Because some
58 * implementations have 64-bit @long@s (infinitely preferable to the abortion
59 * that is @long long@), using @unsigned long@ regardless is wasteful. So,
60 * if @int@ appears to be good enough, then I'll go with that.
61 */
62
63 #if UINT_MAX >= 0xffffffffu
64 typedef unsigned int uint32;
65 #else
66 typedef unsigned long uint32;
67 #endif
68
69 /* --- Decide on a 64-bit type --- *
70 *
71 * The test is quite subtle. Think about it. Note that (at least on my
72 * machine), the 32-bit macros are *much* faster than GCC's @long long@
73 * support.
74 */
75
76 #if defined(ULONG_LONG_MAX) && !defined(ULLONG_MAX)
77 # define ULLONG_MAX ULONG_LONG_MAX
78 #endif
79
80 #if UINT_MAX >> 31 > 0xffffffff
81 # define HAVE_UINT64
82 typedef unsigned int uint64;
83 #elif ULONG_MAX >> 31 > 0xffffffff
84 # define HAVE_UINT64
85 typedef unsigned long uint64;
86 #elif defined(ULLONG_MAX)
87 # define HAVE_UINT64
88 MLIB_BITS_EXTENSION typedef unsigned long long uint64;
89 #endif
90
91 #ifdef DEBUG64
92 # undef HAVE_UINT64
93 #endif
94
95 #ifdef HAVE_UINT64
96 typedef struct { uint64 i; } kludge64;
97 #else
98 typedef struct { uint32 hi, lo; } kludge64;
99 #endif
100
101 /* --- Decide on a 24-bit type --- */
102
103 #if UINT_MAX >= 0x00ffffffu
104 typedef unsigned int uint24;
105 #else
106 typedef unsigned long uint24;
107 #endif
108
109 /* --- Decide on 16-bit and 8-bit types --- *
110 *
111 * This is more for brevity than anything else.
112 */
113
114 typedef unsigned short uint16;
115 typedef unsigned char octet, uint8;
116
117 /* --- WARNING! --- *
118 *
119 * Never lose sight of the fact that the above types may be wider than the
120 * names suggest. Some architectures have 32-bit @short@s for example.
121 */
122
123 /*----- Macros ------------------------------------------------------------*/
124
125 /* --- Useful masks --- */
126
127 #define MASK8 0xffu
128 #define MASK16 0xffffu
129 #define MASK16_L MASK16
130 #define MASK16_B MASK16
131 #define MASK24 0xffffffu
132 #define MASK24_L MASK24
133 #define MASK24_B MASK24
134 #define MASK32 0xffffffffu
135 #define MASK32_L MASK32
136 #define MASK32_B MASK32
137
138 #ifdef HAVE_UINT64
139 # define MASK64 MLIB_BITS_EXTENSION 0xffffffffffffffffu
140 # define MASK64_L MASK64
141 # define MASK64_B MASK64
142 #endif
143
144 /* --- Sizes --- */
145
146 #define SZ_8 1
147 #define SZ_16 2
148 #define SZ_16_L 2
149 #define SZ_16_B 2
150 #define SZ_24 3
151 #define SZ_24_L 3
152 #define SZ_24_B 3
153 #define SZ_32 4
154 #define SZ_32_L 4
155 #define SZ_32_B 4
156
157 #ifdef HAVE_UINT64
158 # define SZ_64 8
159 # define SZ_64_L 8
160 # define SZ_64_B 8
161 #endif
162
163 /* --- Type aliases --- */
164
165 #define TY_U8 octet
166 #define TY_U16 uint16
167 #define TY_U16_L uint16
168 #define TY_U16_B uint16
169 #define TY_U24 uint24
170 #define TY_U24_L uint24
171 #define TY_U24_B uint24
172 #define TY_U32 uint32
173 #define TY_U32_L uint32
174 #define TY_U32_B uint32
175
176 #ifdef HAVE_UINT64
177 # define TY_U64 uint64
178 # define TY_U64_L uint64
179 # define TY_U64_B uint64
180 #endif
181
182 /* --- List macros --- */
183
184 #ifdef HAVE_UINT64
185 # define DOUINTCONV(_) \
186 _(8, 8, 8) \
187 _(16, 16, 16) _(16, 16_L, 16l) _(16, 16_B, 16b) \
188 _(24, 24, 24) _(24, 24_L, 24l) _(24, 24_B, 24b) \
189 _(32, 32, 32) _(32, 32_L, 32l) _(32, 32_B, 32b) \
190 _(64, 64, 64) _(64, 64_L, 64l) _(64, 64_B, 64b)
191 # define DOUINTSZ(_) _(8) _(16) _(24) _(32) _(64)
192 #else
193 # define DOUINTCONV(_) \
194 _(8, 8, 8) \
195 _(16, 16, 16) _(16, 16_L, 16l) _(16, 16_B, 16b) \
196 _(24, 24, 24) _(24, 24_L, 24l) _(24, 24_B, 24b) \
197 _(32, 32, 32) _(32, 32_L, 32l) _(32, 32_B, 32b)
198 # define DOUINTSZ(_) _(8) _(16) _(24) _(32)
199 #endif
200
201 /* --- Type coercions --- */
202
203 #define U8(x) ((octet)((x) & MASK8))
204 #define U16(x) ((uint16)((x) & MASK16))
205 #define U24(x) ((uint24)((x) & MASK24))
206 #define U32(x) ((uint32)((x) & MASK32))
207
208 #ifdef HAVE_UINT64
209 # define U64(x) ((uint64)(x) & MASK64)
210 # define U64_(d, x) ((d).i = U64(x).i)
211 #else
212 # define U64_(d, x) ((d).hi = U32((x).hi), (d).lo = U32((x).lo))
213 #endif
214
215 /* --- Safe shifting macros --- */
216
217 #define LSL8(v, s) U8(U8(v) << ((s) & 7u))
218 #define LSR8(v, s) U8(U8(v) >> ((s) & 7u))
219 #define LSL16(v, s) U16(U16(v) << ((s) & 15u))
220 #define LSR16(v, s) U16(U16(v) >> ((s) & 15u))
221 #define LSL24(v, s) U24(U24(v) << ((s) % 24u))
222 #define LSR24(v, s) U24(U24(v) >> ((s) % 24u))
223 #define LSL32(v, s) U32(U32(v) << ((s) & 31u))
224 #define LSR32(v, s) U32(U32(v) >> ((s) & 31u))
225
226 #ifdef HAVE_UINT64
227 # define LSL64(v, s) U64(U64(v) << ((s) & 63u))
228 # define LSR64(v, s) U64(U64(v) >> ((s) & 63u))
229 # define LSL64_(d, v, s) ((d).i = LSL64((v).i, (s)))
230 # define LSR64_(d, v, s) ((d).i = LSR64((v).i, (s)))
231 #else
232 # define LSL64_(d, v, s) do { \
233 unsigned _s = (s) & 63u; \
234 uint32 _l = (v).lo, _h = (v).hi; \
235 kludge64 *_d = &(d); \
236 if (_s >= 32) { \
237 _d->hi = LSL32(_l, _s - 32u); \
238 _d->lo = 0; \
239 } else if (!_s) { \
240 _d->lo = _l; \
241 _d->hi = _h; \
242 } else { \
243 _d->hi = LSL32(_h, _s) | LSR32(_l, 32u - _s); \
244 _d->lo = LSL32(_l, _s); \
245 } \
246 } while (0)
247 # define LSR64_(d, v, s) do { \
248 unsigned _s = (s) & 63u; \
249 uint32 _l = (v).lo, _h = (v).hi; \
250 kludge64 *_d = &(d); \
251 if (_s >= 32) { \
252 _d->lo = LSR32(_h, _s - 32u); \
253 _d->hi = 0; \
254 } else if (!_s) { \
255 _d->lo = _l; \
256 _d->hi = _h; \
257 } else { \
258 _d->lo = LSR32(_l, _s) | LSL32(_h, 32u - _s); \
259 _d->hi = LSR32(_h, _s); \
260 } \
261 } while (0)
262 #endif
263
264 /* --- Rotation macros --- */
265
266 #define ROL8(v, s) (LSL8((v), (s)) | (LSR8((v), 8u - (s))))
267 #define ROR8(v, s) (LSR8((v), (s)) | (LSL8((v), 8u - (s))))
268 #define ROL16(v, s) (LSL16((v), (s)) | (LSR16((v), 16u - (s))))
269 #define ROR16(v, s) (LSR16((v), (s)) | (LSL16((v), 16u - (s))))
270 #define ROL24(v, s) (LSL24((v), (s)) | (LSR24((v), 24u - (s))))
271 #define ROR24(v, s) (LSR24((v), (s)) | (LSL24((v), 24u - (s))))
272 #define ROL32(v, s) (LSL32((v), (s)) | (LSR32((v), 32u - (s))))
273 #define ROR32(v, s) (LSR32((v), (s)) | (LSL32((v), 32u - (s))))
274
275 #ifdef HAVE_UINT64
276 # define ROL64(v, s) (LSL64((v), (s)) | (LSR64((v), 64u - (s))))
277 # define ROR64(v, s) (LSR64((v), (s)) | (LSL64((v), 64u - (s))))
278 # define ROL64_(d, v, s) ((d).i = ROL64((v).i, (s)))
279 # define ROR64_(d, v, s) ((d).i = ROR64((v).i, (s)))
280 #else
281 # define ROL64_(d, v, s) do { \
282 unsigned _s = (s) & 63u; \
283 uint32 _l = (v).lo, _h = (v).hi; \
284 kludge64 *_d = &(d); \
285 if (_s > 32) { \
286 _d->hi = LSL32(_l, _s - 32u) | LSR32(_h, 64u - _s); \
287 _d->lo = LSL32(_h, _s - 32u) | LSR32(_l, 64u - _s); \
288 } else if (!_s) { \
289 _d->lo = _l; \
290 _d->hi = _h; \
291 } else if (_s == 32) { \
292 _d->lo = _h; \
293 _d->hi = _l; \
294 } else { \
295 _d->hi = LSL32(_h, _s) | LSR32(_l, 32u - _s); \
296 _d->lo = LSL32(_l, _s) | LSR32(_h, 32u - _s); \
297 } \
298 } while (0)
299 # define ROR64_(d, v, s) do { \
300 unsigned _s = (s) & 63u; \
301 uint32 _l = (v).lo, _h = (v).hi; \
302 kludge64 *_d = &(d); \
303 if (_s > 32) { \
304 _d->hi = LSR32(_l, _s - 32u) | LSL32(_h, 64u - _s); \
305 _d->lo = LSR32(_h, _s - 32u) | LSL32(_l, 64u - _s); \
306 } else if (!_s) { \
307 _d->lo = _l; \
308 _d->hi = _h; \
309 } else if (_s == 32) { \
310 _d->lo = _h; \
311 _d->hi = _l; \
312 } else { \
313 _d->hi = LSR32(_h, _s) | LSL32(_l, 32u - _s); \
314 _d->lo = LSR32(_l, _s) | LSL32(_h, 32u - _s); \
315 } \
316 } while (0)
317 #endif
318
319 /* --- Storage and retrieval --- */
320
321 #define GETBYTE(p, o) (((octet *)(p))[o] & MASK8)
322 #define PUTBYTE(p, o, v) (((octet *)(p))[o] = U8((v)))
323
324 #define LOAD8(p) (GETBYTE((p), 0))
325 #define STORE8(p, v) (PUTBYTE((p), 0, (v)))
326
327 #define LOAD16_B(p) \
328 (((uint16)GETBYTE((p), 0) << 8) | \
329 ((uint16)GETBYTE((p), 1) << 0))
330 #define LOAD16_L(p) \
331 (((uint16)GETBYTE((p), 0) << 0) | \
332 ((uint16)GETBYTE((p), 1) << 8))
333 #define LOAD16(p) LOAD16_B((p))
334
335 #define STORE16_B(p, v) \
336 (PUTBYTE((p), 0, (uint16)(v) >> 8), \
337 PUTBYTE((p), 1, (uint16)(v) >> 0))
338 #define STORE16_L(p, v) \
339 (PUTBYTE((p), 0, (uint16)(v) >> 0), \
340 PUTBYTE((p), 1, (uint16)(v) >> 8))
341 #define STORE16(p, v) STORE16_B((p), (v))
342
343 #define LOAD24_B(p) \
344 (((uint24)GETBYTE((p), 0) << 16) | \
345 ((uint24)GETBYTE((p), 1) << 8) | \
346 ((uint24)GETBYTE((p), 2) << 0))
347 #define LOAD24_L(p) \
348 (((uint24)GETBYTE((p), 0) << 0) | \
349 ((uint24)GETBYTE((p), 1) << 8) | \
350 ((uint24)GETBYTE((p), 2) << 16))
351 #define LOAD24(p) LOAD24_B((p))
352
353 #define STORE24_B(p, v) \
354 (PUTBYTE((p), 0, (uint24)(v) >> 16), \
355 PUTBYTE((p), 1, (uint24)(v) >> 8), \
356 PUTBYTE((p), 2, (uint24)(v) >> 0))
357 #define STORE24_L(p, v) \
358 (PUTBYTE((p), 0, (uint24)(v) >> 0), \
359 PUTBYTE((p), 1, (uint24)(v) >> 8), \
360 PUTBYTE((p), 2, (uint24)(v) >> 16))
361 #define STORE24(p, v) STORE24_B((p), (v))
362
363 #define LOAD32_B(p) \
364 (((uint32)GETBYTE((p), 0) << 24) | \
365 ((uint32)GETBYTE((p), 1) << 16) | \
366 ((uint32)GETBYTE((p), 2) << 8) | \
367 ((uint32)GETBYTE((p), 3) << 0))
368 #define LOAD32_L(p) \
369 (((uint32)GETBYTE((p), 0) << 0) | \
370 ((uint32)GETBYTE((p), 1) << 8) | \
371 ((uint32)GETBYTE((p), 2) << 16) | \
372 ((uint32)GETBYTE((p), 3) << 24))
373 #define LOAD32(p) LOAD32_B((p))
374
375 #define STORE32_B(p, v) \
376 (PUTBYTE((p), 0, (uint32)(v) >> 24), \
377 PUTBYTE((p), 1, (uint32)(v) >> 16), \
378 PUTBYTE((p), 2, (uint32)(v) >> 8), \
379 PUTBYTE((p), 3, (uint32)(v) >> 0))
380 #define STORE32_L(p, v) \
381 (PUTBYTE((p), 0, (uint32)(v) >> 0), \
382 PUTBYTE((p), 1, (uint32)(v) >> 8), \
383 PUTBYTE((p), 2, (uint32)(v) >> 16), \
384 PUTBYTE((p), 3, (uint32)(v) >> 24))
385 #define STORE32(p, v) STORE32_B((p), (v))
386
387 #ifdef HAVE_UINT64
388
389 # define LOAD64_B(p) \
390 (((uint64)GETBYTE((p), 0) << 56) | \
391 ((uint64)GETBYTE((p), 1) << 48) | \
392 ((uint64)GETBYTE((p), 2) << 40) | \
393 ((uint64)GETBYTE((p), 3) << 32) | \
394 ((uint64)GETBYTE((p), 4) << 24) | \
395 ((uint64)GETBYTE((p), 5) << 16) | \
396 ((uint64)GETBYTE((p), 6) << 8) | \
397 ((uint64)GETBYTE((p), 7) << 0))
398 # define LOAD64_L(p) \
399 (((uint64)GETBYTE((p), 0) << 0) | \
400 ((uint64)GETBYTE((p), 1) << 8) | \
401 ((uint64)GETBYTE((p), 2) << 16) | \
402 ((uint64)GETBYTE((p), 3) << 24) | \
403 ((uint64)GETBYTE((p), 4) << 32) | \
404 ((uint64)GETBYTE((p), 5) << 40) | \
405 ((uint64)GETBYTE((p), 6) << 48) | \
406 ((uint64)GETBYTE((p), 7) << 56))
407 # define LOAD64(p) LOAD64_B((p))
408 # define LOAD64_B_(d, p) ((d).i = LOAD64_B((p)))
409 # define LOAD64_L_(d, p) ((d).i = LOAD64_L((p)))
410 # define LOAD64_(d, p) LOAD64_B_((d), (p))
411
412 # define STORE64_B(p, v) \
413 (PUTBYTE((p), 0, (uint64)(v) >> 56), \
414 PUTBYTE((p), 1, (uint64)(v) >> 48), \
415 PUTBYTE((p), 2, (uint64)(v) >> 40), \
416 PUTBYTE((p), 3, (uint64)(v) >> 32), \
417 PUTBYTE((p), 4, (uint64)(v) >> 24), \
418 PUTBYTE((p), 5, (uint64)(v) >> 16), \
419 PUTBYTE((p), 6, (uint64)(v) >> 8), \
420 PUTBYTE((p), 7, (uint64)(v) >> 0))
421 # define STORE64_L(p, v) \
422 (PUTBYTE((p), 0, (uint64)(v) >> 0), \
423 PUTBYTE((p), 1, (uint64)(v) >> 8), \
424 PUTBYTE((p), 2, (uint64)(v) >> 16), \
425 PUTBYTE((p), 3, (uint64)(v) >> 24), \
426 PUTBYTE((p), 4, (uint64)(v) >> 32), \
427 PUTBYTE((p), 5, (uint64)(v) >> 40), \
428 PUTBYTE((p), 6, (uint64)(v) >> 48), \
429 PUTBYTE((p), 7, (uint64)(v) >> 56))
430 # define STORE64(p, v) STORE64_B((p), (v))
431 # define STORE64_B_(p, v) STORE64_B((p), (v).i)
432 # define STORE64_L_(p, v) STORE64_L((p), (v).i)
433 # define STORE64_(p, v) STORE64_B_((p), (v))
434
435 #else
436
437 # define LOAD64_B_(d, p) \
438 ((d).hi = LOAD32_B((octet *)(p) + 0), \
439 (d).lo = LOAD32_B((octet *)(p) + 4))
440 # define LOAD64_L_(d, p) \
441 ((d).lo = LOAD32_L((octet *)(p) + 0), \
442 (d).hi = LOAD32_L((octet *)(p) + 4))
443 # define LOAD64_(d, p) LOAD64_B_((d), (p))
444
445 # define STORE64_B_(p, v) \
446 (STORE32_B((octet *)(p) + 0, (v).hi), \
447 STORE32_B((octet *)(p) + 4, (v).lo))
448 # define STORE64_L_(p, v) \
449 (STORE32_L((octet *)(p) + 0, (v).lo), \
450 STORE32_L((octet *)(p) + 4, (v).hi))
451 # define STORE64_(p, v) STORE64_B_((p), (v))
452
453 #endif
454
455 /* --- Other operations on 64-bit integers --- */
456
457 #ifdef HAVE_UINT64
458 # define SET64(d, h, l) ((d).i = (U64((h)) << 32) | U64((l)))
459 # define ASSIGN64(d, x) ((d).i = U64((x)))
460 # define HI64(x) U32((x).i >> 32)
461 # define LO64(x) U32((x).i)
462 # define GET64(t, x) ((t)(x).i)
463 #else
464 # define SET64(d, h, l) ((d).hi = U32(h), (d).lo = U32(l))
465 # define ASSIGN64(d, x) \
466 ((d).hi = ((x & ~MASK32) >> 16) >> 16, (d).lo = U32(x))
467 # define HI64(x) U32((x).hi)
468 # define LO64(x) U32((x).lo)
469 # define GET64(t, x) (((((t)HI64(x) << 16) << 16) & ~MASK32) | (t)LO64(x))
470 #endif
471
472 #ifdef HAVE_UINT64
473 # define AND64(d, x, y) ((d).i = (x).i & (y).i)
474 # define OR64(d, x, y) ((d).i = (x).i | (y).i)
475 # define XOR64(d, x, y) ((d).i = (x).i ^ (y).i)
476 # define CPL64(d, x) ((d).i = ~(x).i)
477 # define ADD64(d, x, y) ((d).i = (x).i + (y).i)
478 # define SUB64(d, x, y) ((d).i = (x).i - (y).i)
479 # define CMP64(x, op, y) ((x).i op (y).i)
480 # define ZERO64(x) ((x) == 0)
481 #else
482 # define AND64(d, x, y) ((d).lo = (x).lo & (y).lo, (d).hi = (x).hi & (y).hi)
483 # define OR64(d, x, y) ((d).lo = (x).lo | (y).lo, (d).hi = (x).hi | (y).hi)
484 # define XOR64(d, x, y) ((d).lo = (x).lo ^ (y).lo, (d).hi = (x).hi ^ (y).hi)
485 # define CPL64(d, x) ((d).lo = ~(x).lo, (d).hi = ~(x).hi)
486 # define ADD64(d, x, y) do { \
487 uint32 _x = U32((x).lo + (y).lo); \
488 (d).hi = (x).hi + (y).hi + (_x < (x).lo); \
489 (d).lo = _x; \
490 } while (0)
491 # define SUB64(d, x, y) do { \
492 uint32 _x = U32((x).lo - (y).lo); \
493 (d).hi = (x).hi - (y).hi - (_x > (x).lo); \
494 (d).lo = _x; \
495 } while (0)
496 # define CMP64(x, op, y) \
497 ((x).hi == (y).hi ? (x).lo op (y).lo : (x).hi op (y).hi)
498 # define ZERO64(x) ((x).lo == 0 && (x).hi == 0)
499 #endif
500
501 /* --- Storing integers in tables --- */
502
503 #ifdef HAVE_UINT64
504 # define X64(x, y) { 0x##x##y }
505 #else
506 # define X64(x, y) { 0x##x, 0x##y }
507 #endif
508
509 /*----- That's all, folks -------------------------------------------------*/
510
511 #ifdef __cplusplus
512 }
513 #endif
514
515 #endif