3 * $Id: bits.h,v 1.8 2000/10/08 11:06:30 mdw Exp $
5 * Portable bit-level manipulation macros
7 * (c) 1998 Straylight/Edgeware
10 /*----- Licensing notice --------------------------------------------------*
12 * This file is part of the mLib utilities library.
14 * mLib is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU Library General Public License as
16 * published by the Free Software Foundation; either version 2 of the
17 * License, or (at your option) any later version.
19 * mLib is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU Library General Public License for more details.
24 * You should have received a copy of the GNU Library General Public
25 * License along with mLib; if not, write to the Free
26 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
30 /*----- Revision history --------------------------------------------------*
33 * Revision 1.8 2000/10/08 11:06:30 mdw
34 * Shut later versions of GCC up about use of @long long@.
36 * Revision 1.7 2000/07/22 09:48:26 mdw
37 * Added macros for reading 64-bit values.
39 * Revision 1.6 2000/07/16 12:28:28 mdw
40 * Add 64-bit support, with faked arithmetic on 32-bit hosts.
42 * Revision 1.5 2000/06/17 10:36:06 mdw
43 * Support for 24-bit types.
45 * Revision 1.4 1999/12/10 23:42:04 mdw
46 * Change header file guard names.
48 * Revision 1.3 1999/06/20 23:31:52 mdw
49 * More portability enhancements.
51 * Revision 1.2 1999/06/17 00:12:46 mdw
52 * Improve portability for shift and rotate macros.
54 * Revision 1.1 1999/06/01 09:46:19 mdw
55 * New addition: bit manipulation macros.
66 /*----- Header files ------------------------------------------------------*/
70 #if __STDC_VERSION__ >= 199900l
74 /*----- Decide on some types ----------------------------------------------*/
76 /* --- Make GNU C shut up --- */
78 #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 91)
79 # define MLIB_BITS_EXTENSION __extension__
81 # define MLIB_BITS_EXTENSION
84 /* --- Decide on a 32-bit type --- *
86 * I want a type which is capable of expressing 32-bit numbers. Because some
87 * implementations have 64-bit @long@s (infinitely preferable to the abortion
88 * that is @long long@), using @unsigned long@ regardless is wasteful. So,
89 * if @int@ appears to be good enough, then I'll go with that.
92 #if UINT_MAX >= 0xffffffffu
93 typedef unsigned int uint32
;
95 typedef unsigned long uint32
;
98 /* --- Decide on a 64-bit type --- *
100 * The test is quite subtle. Think about it. Note that (at least on my
101 * machine), the 32-bit macros are *much* faster than GCC's @long long@
105 #if defined(ULONG_LONG_MAX) && !defined(ULLONG_MAX)
106 # define ULLONG_MAX ULONG_LONG_MAX
109 #if UINT_MAX >> 31 > 0xffffffff
111 typedef unsigned int uint64
;
112 #elif ULONG_MAX >> 31 > 0xffffffff
114 typedef unsigned long uint64
;
115 #elif defined(ULLONG_MAX)
117 MLIB_BITS_EXTENSION
typedef unsigned long long uint64
;
125 typedef struct { uint64 i
; } kludge64
;
127 typedef struct { uint32 hi
, lo
; } kludge64
;
130 /* --- Decide on a 24-bit type --- */
132 #if UINT_MAX >= 0x00ffffffu
133 typedef unsigned int uint24
;
135 typedef unsigned long uint24
;
138 /* --- Decide on 16-bit and 8-bit types --- *
140 * This is more for brevity than anything else.
143 typedef unsigned short uint16
;
144 typedef unsigned char octet
;
146 /* --- WARNING! --- *
148 * Never lose sight of the fact that the above types may be wider than the
149 * names suggest. Some architectures have 32-bit @short@s for example.
152 /*----- Macros ------------------------------------------------------------*/
154 /* --- Useful masks --- */
157 #define MASK16 0xffffu
158 #define MASK24 0xffffffu
159 #define MASK32 0xffffffffu
162 # define MASK64 MLIB_BITS_EXTENSION 0xffffffffffffffffu
165 /* --- Type coercions --- */
167 #define U8(x) ((octet)((x) & MASK8))
168 #define U16(x) ((uint16)((x) & MASK16))
169 #define U24(x) ((uint24)((x) & MASK24))
170 #define U32(x) ((uint32)((x) & MASK32))
173 # define U64(x) ((uint64)(x) & MASK64)
174 # define U64_(d, x) ((d).i = U64(x).i)
176 # define U64_(d, x) ((d).hi = U32((x).hi), (d).lo = U32((x).lo))
179 /* --- Safe shifting macros --- */
181 #define LSL8(v, s) U8(U8(v) << ((s) & 7u))
182 #define LSR8(v, s) U8(U8(v) >> ((s) & 7u))
183 #define LSL16(v, s) U16(U16(v) << ((s) & 15u))
184 #define LSR16(v, s) U16(U16(v) >> ((s) & 15u))
185 #define LSL24(v, s) U24(U24(v) << ((s) % 24u))
186 #define LSR24(v, s) U24(U24(v) >> ((s) % 24u))
187 #define LSL32(v, s) U32(U32(v) << ((s) & 31u))
188 #define LSR32(v, s) U32(U32(v) >> ((s) & 31u))
191 # define LSL64(v, s) U64(U64(v) << ((s) & 63u))
192 # define LSR64(v, s) U64(U64(v) >> ((s) & 63u))
193 # define LSL64_(d, v, s) ((d).i = LSL64((v).i, (s)))
194 # define LSR64_(d, v, s) ((d).i = LSR64((v).i, (s)))
196 # define LSL64_(d, v, s) do { \
197 unsigned _s = (s) & 63u; \
198 uint32 _l = (v).lo, _h = (v).hi; \
199 kludge64 *_d = &(d); \
201 _d->hi = LSL32(_l, _s - 32u); \
207 _d->hi = LSL32(_h, _s) | LSR32(_l, 32u - _s); \
208 _d->lo = LSL32(_l, _s); \
211 # define LSR64_(d, v, s) do { \
212 unsigned _s = (s) & 63u; \
213 uint32 _l = (v).lo, _h = (v).hi; \
214 kludge64 *_d = &(d); \
216 _d->lo = LSR32(_h, _s - 32u); \
222 _d->lo = LSR32(_l, _s) | LSL32(_h, 32u - _s); \
223 _d->hi = LSR32(_h, _s); \
228 /* --- Rotation macros --- */
230 #define ROL8(v, s) (LSL8((v), (s)) | (LSR8((v), 8u - (s))))
231 #define ROR8(v, s) (LSR8((v), (s)) | (LSL8((v), 8u - (s))))
232 #define ROL16(v, s) (LSL16((v), (s)) | (LSR16((v), 16u - (s))))
233 #define ROR16(v, s) (LSR16((v), (s)) | (LSL16((v), 16u - (s))))
234 #define ROL24(v, s) (LSL24((v), (s)) | (LSR24((v), 24u - (s))))
235 #define ROR24(v, s) (LSR24((v), (s)) | (LSL24((v), 24u - (s))))
236 #define ROL32(v, s) (LSL32((v), (s)) | (LSR32((v), 32u - (s))))
237 #define ROR32(v, s) (LSR32((v), (s)) | (LSL32((v), 32u - (s))))
240 # define ROL64(v, s) (LSL64((v), (s)) | (LSR64((v), 64u - (s))))
241 # define ROR64(v, s) (LSR64((v), (s)) | (LSL64((v), 64u - (s))))
242 # define ROL64_(d, v, s) ((d).i = ROL64((v).i, (s)))
243 # define ROR64_(d, v, s) ((d).i = ROR64((v).i, (s)))
245 # define ROL64_(d, v, s) do { \
246 unsigned _s = (s) & 63u; \
247 uint32 _l = (v).lo, _h = (v).hi; \
248 kludge64 *_d = &(d); \
250 _d->hi = LSL32(_l, _s - 32u) | LSR32(_h, 64u - _s); \
251 _d->lo = LSL32(_h, _s - 32u) | LSR32(_l, 64u - _s); \
256 _d->hi = LSL32(_h, _s) | LSR32(_l, 32u - _s); \
257 _d->lo = LSL32(_l, _s) | LSR32(_h, 32u - _s); \
260 # define ROR64_(d, v, s) do { \
261 unsigned _s = (s) & 63u; \
262 uint32 _l = (v).lo, _h = (v).hi; \
263 kludge64 *_d = &(d); \
265 _d->hi = LSR32(_l, _s - 32u) | LSL32(_h, 64u - _s); \
266 _d->lo = LSR32(_h, _s - 32u) | LSL32(_l, 64u - _s); \
271 _d->hi = LSR32(_h, _s) | LSL32(_l, 32u - _s); \
272 _d->lo = LSR32(_l, _s) | LSL32(_h, 32u - _s); \
277 /* --- Storage and retrieval --- */
279 #define GETBYTE(p, o) (((octet *)(p))[o] & MASK8)
280 #define PUTBYTE(p, o, v) (((octet *)(p))[o] = U8((v)))
282 #define LOAD8(p) (GETBYTE((p), 0))
283 #define STORE8(p, v) (PUTBYTE((p), 0, (v)))
285 #define LOAD16_B(p) \
286 (((uint16)GETBYTE((p), 0) << 8) | \
287 ((uint16)GETBYTE((p), 1) << 0))
288 #define LOAD16_L(p) \
289 (((uint16)GETBYTE((p), 0) << 0) | \
290 ((uint16)GETBYTE((p), 1) << 8))
291 #define LOAD16(p) LOAD16_B((p))
293 #define STORE16_B(p, v) \
294 (PUTBYTE((p), 0, (uint16)(v) >> 8), \
295 PUTBYTE((p), 1, (uint16)(v) >> 0))
296 #define STORE16_L(p, v) \
297 (PUTBYTE((p), 0, (uint16)(v) >> 0), \
298 PUTBYTE((p), 1, (uint16)(v) >> 8))
299 #define STORE16(p, v) STORE16_B((p), (v))
301 #define LOAD24_B(p) \
302 (((uint24)GETBYTE((p), 0) << 16) | \
303 ((uint24)GETBYTE((p), 1) << 8) | \
304 ((uint24)GETBYTE((p), 2) << 0))
305 #define LOAD24_L(p) \
306 (((uint24)GETBYTE((p), 0) << 0) | \
307 ((uint24)GETBYTE((p), 1) << 8) | \
308 ((uint24)GETBYTE((p), 2) << 16))
309 #define LOAD24(p) LOAD24_B((p))
311 #define STORE24_B(p, v) \
312 (PUTBYTE((p), 0, (uint24)(v) >> 16), \
313 PUTBYTE((p), 1, (uint24)(v) >> 8), \
314 PUTBYTE((p), 2, (uint24)(v) >> 0))
315 #define STORE24_L(p, v) \
316 (PUTBYTE((p), 0, (uint24)(v) >> 0), \
317 PUTBYTE((p), 1, (uint24)(v) >> 8), \
318 PUTBYTE((p), 2, (uint24)(v) >> 16))
319 #define STORE24(p, v) STORE24_B((p), (v))
321 #define LOAD32_B(p) \
322 (((uint32)GETBYTE((p), 0) << 24) | \
323 ((uint32)GETBYTE((p), 1) << 16) | \
324 ((uint32)GETBYTE((p), 2) << 8) | \
325 ((uint32)GETBYTE((p), 3) << 0))
326 #define LOAD32_L(p) \
327 (((uint32)GETBYTE((p), 0) << 0) | \
328 ((uint32)GETBYTE((p), 1) << 8) | \
329 ((uint32)GETBYTE((p), 2) << 16) | \
330 ((uint32)GETBYTE((p), 3) << 24))
331 #define LOAD32(p) LOAD32_B((p))
333 #define STORE32_B(p, v) \
334 (PUTBYTE((p), 0, (uint32)(v) >> 24), \
335 PUTBYTE((p), 1, (uint32)(v) >> 16), \
336 PUTBYTE((p), 2, (uint32)(v) >> 8), \
337 PUTBYTE((p), 3, (uint32)(v) >> 0))
338 #define STORE32_L(p, v) \
339 (PUTBYTE((p), 0, (uint32)(v) >> 0), \
340 PUTBYTE((p), 1, (uint32)(v) >> 8), \
341 PUTBYTE((p), 2, (uint32)(v) >> 16), \
342 PUTBYTE((p), 3, (uint32)(v) >> 24))
343 #define STORE32(p, v) STORE32_B((p), (v))
347 # define LOAD64_B(p) \
348 (((uint64)GETBYTE((p), 0) << 56) | \
349 ((uint64)GETBYTE((p), 1) << 48) | \
350 ((uint64)GETBYTE((p), 2) << 40) | \
351 ((uint64)GETBYTE((p), 3) << 32) | \
352 ((uint64)GETBYTE((p), 4) << 24) | \
353 ((uint64)GETBYTE((p), 5) << 16) | \
354 ((uint64)GETBYTE((p), 6) << 8) | \
355 ((uint64)GETBYTE((p), 7) << 0))
356 # define LOAD64_L(p) \
357 (((uint64)GETBYTE((p), 0) << 0) | \
358 ((uint64)GETBYTE((p), 1) << 8) | \
359 ((uint64)GETBYTE((p), 2) << 16) | \
360 ((uint64)GETBYTE((p), 3) << 24) | \
361 ((uint64)GETBYTE((p), 4) << 32) | \
362 ((uint64)GETBYTE((p), 5) << 40) | \
363 ((uint64)GETBYTE((p), 6) << 48) | \
364 ((uint64)GETBYTE((p), 7) << 56))
365 # define LOAD64(p) LOAD64_B((p))
366 # define LOAD64_B_(d, p) ((d).i = LOAD64_B((p)))
367 # define LOAD64_L_(d, p) ((d).i = LOAD64_L((p)))
368 # define LOAD64_(d, p) LOAD64_B_((d), (p))
370 # define STORE64_B(p, v) \
371 (PUTBYTE((p), 0, (uint64)(v) >> 56), \
372 PUTBYTE((p), 1, (uint64)(v) >> 48), \
373 PUTBYTE((p), 2, (uint64)(v) >> 40), \
374 PUTBYTE((p), 3, (uint64)(v) >> 32), \
375 PUTBYTE((p), 4, (uint64)(v) >> 24), \
376 PUTBYTE((p), 5, (uint64)(v) >> 16), \
377 PUTBYTE((p), 6, (uint64)(v) >> 8), \
378 PUTBYTE((p), 7, (uint64)(v) >> 0))
379 # define STORE64_L(p, v) \
380 (PUTBYTE((p), 0, (uint64)(v) >> 0), \
381 PUTBYTE((p), 1, (uint64)(v) >> 8), \
382 PUTBYTE((p), 2, (uint64)(v) >> 16), \
383 PUTBYTE((p), 3, (uint64)(v) >> 24), \
384 PUTBYTE((p), 4, (uint64)(v) >> 32), \
385 PUTBYTE((p), 5, (uint64)(v) >> 40), \
386 PUTBYTE((p), 6, (uint64)(v) >> 48), \
387 PUTBYTE((p), 7, (uint64)(v) >> 56))
388 # define STORE64(p, v) STORE64_B((p), (v))
389 # define STORE64_B_(p, v) STORE64_B((p), (v).i)
390 # define STORE64_L_(p, v) STORE64_L((p), (v).i)
391 # define STORE64_(p, v) STORE64_B_((p), (v))
395 # define LOAD64_B_(d, p) \
396 ((d).hi = LOAD32_B((octet *)(p) + 0), \
397 (d).lo = LOAD32_B((octet *)(p) + 4))
398 # define LOAD64_L_(d, p) \
399 ((d).lo = LOAD32_L((octet *)(p) + 0), \
400 (d).hi = LOAD32_L((octet *)(p) + 4))
401 # define LOAD64_(d, p) LOAD64_B_((d), (p))
403 # define STORE64_B_(p, v) \
404 (STORE32_B((octet *)(p) + 0, (v).hi), \
405 STORE32_B((octet *)(p) + 4, (v).lo))
406 # define STORE64_L_(p, v) \
407 (STORE32_L((octet *)(p) + 0, (v).lo), \
408 STORE32_L((octet *)(p) + 4, (v).hi))
409 # define STORE64_(p, v) STORE64_B_((p), (v))
413 /* --- Other operations on 64-bit integers --- */
416 # define SET64(d, h, l) ((d).i = (U64((h)) << 32) | U64((l)))
417 # define ASSIGN64(d, x) ((d).i = U64((x)))
418 # define HI64(x) U32((x).i >> 32)
419 # define LO64(x) U32((x).i)
420 # define GET64(t, x) ((t)(x).i)
422 # define SET64(d, h, l) ((d).hi = U32(h), (d).lo = U32(l))
423 # define ASSIGN64(d, x) \
424 ((d).hi = ((x & ~MASK32) >> 16) >> 16, (d).lo = U32(x))
425 # define HI64(x) U32((x).hi)
426 # define LO64(x) U32((x).lo)
427 # define GET64(t, x) (((((t)HI64(x) << 16) << 16) & ~MASK32) | (t)LO64(x))
431 # define AND64(d, x, y) ((d).i = (x).i & (y).i)
432 # define OR64(d, x, y) ((d).i = (x).i | (y).i)
433 # define XOR64(d, x, y) ((d).i = (x).i ^ (y).i)
434 # define CPL64(d, x) ((d).i = ~(x).i)
435 # define ADD64(d, x, y) ((d).i = (x).i + (y).i)
436 # define SUB64(d, x, y) ((d).i = (x).i - (y).i)
437 # define CMP64(x, op, y) ((x).i op (y).i)
438 # define ZERO64(x) ((x) == 0)
440 # define AND64(d, x, y) ((d).lo = (x).lo & (y).lo, (d).hi = (x).hi & (y).hi)
441 # define OR64(d, x, y) ((d).lo = (x).lo | (y).lo, (d).hi = (x).hi | (y).hi)
442 # define XOR64(d, x, y) ((d).lo = (x).lo ^ (y).lo, (d).hi = (x).hi ^ (y).hi)
443 # define CPL64(d, x) ((d).lo = ~(x).lo, (d).hi = ~(x).hi)
444 # define ADD64(d, x, y) do { \
445 uint32 _x = U32((x).lo + (y).lo); \
446 (d).hi = (x).hi + (y).hi + (_x < (x).lo); \
449 # define SUB64(d, x, y) do { \
450 uint32 _x = U32((x).lo - (y).lo); \
451 (d).hi = (x).hi - (y).hi - (_x > (x).lo); \
454 # define CMP64(x, op, y) \
455 ((x).hi == (y).hi ? (x).lo op (y).lo : (x).hi op (y).hi)
456 # define ZERO64(x) ((x).lo == 0 && (x).hi == 0)
459 /*----- That's all, folks -------------------------------------------------*/