'((author . "Straylight/Edgeware")
(full-title . "the mLib utilities library")
(library . "mLib")
- (licence-text . skelrc-lgpl))
+ (licence-text . "[[lgpl-2]]"))
skel-alist))
{
while (*p) {
switch (*p) {
- case ' ':
- DPUTC(d, '+');
+ case ' ': DPUTC(d, '+');
break;
default:
- if ((ctx->f & URLF_LAX) || isalnum((unsigned char)*p))
- goto safe;
- else
- goto unsafe;
- case '/':
- case '~':
- if (ctx->f & URLF_STRICT)
- goto unsafe;
- case '-':
- case '.':
- case '_':
- safe:
- DPUTC(d, *p);
- break;
- unsafe:
- case '+':
- case '%':
- case '=':
- case '&':
- case ';':
- dstr_putf(d, "%%%02x", *p);
- break;
+ if (isspace((unsigned char)*p)) goto unsafe;
+ else if (isalnum((unsigned char *)p)) goto safe;
+ else if (ctx->f&URLF_LAX) goto safe;
+ else goto unsafe;
+ case '/': case '~':
+ if (ctx->f&URLF_STRICT) goto unsafe; /* else fall through... */
+ safe: case '-': case '.': case '_':
+ DPUTC(d, *p); break;
+ unsafe: case '+': case '%': case '=': case '&': case ';':
+ dstr_putf(d, "%%%02x", *p); break;
}
p++;
}
+mlib (2.3.0) experimental; urgency=medium
+
+ * url: Always encode whitespace characters. Particularly egregiously,
+ mLib used to leave linefeeds unescaped in `lax' mode.
+ * bits: Document the many improvements since the original version nearly
+ 20 years ago.
+ * bits: Add macros for byte-swapping integers in-place.
+ * bits: Use compiler intrinsics where available.
+
+ -- Mark Wooding <mdw@distorted.org.uk> Tue, 12 Jun 2018 00:30:47 +0100
+
mlib (2.2.5) experimental; urgency=medium
* ident: Only close the socket once if connection fails early.
###--------------------------------------------------------------------------
### Component files.
+## Compiler checking.
+pkginclude_HEADERS += compiler.h
+LIBMANS += compiler.3
+
## Utility macros.
pkginclude_HEADERS += macros.h
LIBMANS += macros.3
.\" -*-nroff-*-
.TH bits 3 "20 June 1999" "Straylight/Edgeware" "mLib utilities library"
+.ie t \{\
+. ds ss \s8\u
+. ds se \d\s0
+.\}
+.el \{\
+. ds ss ^
+. ds se
+.\}
.SH NAME
bits \- portable bit manipulation macros
+.\" octet
+.\" uint16
+.\" uint24
+.\" uint32
+.\" uint64
+.\" kludge64
+.\"
+.\" MASK_8
+.\" MASK_16
+.\" MASK_16_L
+.\" MASK_16_B
+.\" MASK_24
+.\" MASK_24_L
+.\" MASK_24_B
+.\" MASK_32
+.\" MASK_32_L
+.\" MASK_32_B
+.\" MASK_64
+.\" MASK_64_L
+.\" MASK_64_B
+.\"
+.\" SZ_8
+.\" SZ_16
+.\" SZ_16_L
+.\" SZ_16_B
+.\" SZ_24
+.\" SZ_24_L
+.\" SZ_24_B
+.\" SZ_32
+.\" SZ_32_L
+.\" SZ_32_B
+.\" SZ_64
+.\" SZ_64_L
+.\" SZ_64_B
+.\"
+.\" TY_8
+.\" TY_16
+.\" TY_16_L
+.\" TY_16_B
+.\" TY_24
+.\" TY_24_L
+.\" TY_24_B
+.\" TY_32
+.\" TY_32_L
+.\" TY_32_B
+.\" TY_64
+.\" TY_64_L
+.\" TY_64_B
+.\"
+.\" DOUINTSZ
+.\" DOUINTCONV
+.\"
.\" @U8
.\" @U16
+.\" @U24
.\" @U32
+.\" @U64
+.\" @U64_
.\"
.\" @LSL8
.\" @LSR8
.\" @LSL16
.\" @LSR16
+.\" @LSL24
+.\" @LSR24
.\" @LSL32
.\" @LSR32
+.\" @LSL64
+.\" @LSR64
+.\" @LSL64_
+.\" @LSR64_
.\"
.\" @ROL8
.\" @ROR8
.\" @ROL16
.\" @ROR16
+.\" @ROL24
+.\" @ROR24
.\" @ROL32
-.\" @ROR32
+.\" @ROL32
+.\" @ROL64
+.\" @ROR64
+.\" @ROL64_
+.\" @ROR64_
+.\"
+.\" ENDSWAP16
+.\" ENDSWAP32
+.\" ENDSWAP64
+.\"
+.\" BTOH16
+.\" LTOH16
+.\" HTOB16
+.\" HTOL16
+.\" BTOH32
+.\" LTOH32
+.\" HTOB32
+.\" HTOL32
+.\" BTOH64
+.\" LTOH64
+.\" HTOB64
+.\" HTOL64
+.\"
+.\" RAW8
+.\" RAW16
+.\" RAW32
+.\" RAW64
.\"
.\" @GETBYTE
.\" @PUTBYTE
.\" @STORE16_B
.\" @STORE16
.\"
+.\" @LOAD24_L
+.\" @LOAD24_B
+.\" @LOAD24
+.\" @STORE24_L
+.\" @STORE24_B
+.\" @STORE24
+.\"
.\" @LOAD32_L
.\" @LOAD32_B
.\" @LOAD32
.\" @STORE32_B
.\" @STORE32
.\"
+.\" @LOAD64_L
+.\" @LOAD64_B
+.\" @LOAD64
+.\" @STORE64_L
+.\" @STORE64_B
+.\" @STORE64
+.\"
+.\" @LOAD64_L_
+.\" @LOAD64_B_
+.\" @LOAD64_
+.\" @STORE64_L_
+.\" @STORE64_B_
+.\" @STORE64_
+.\"
+.\" @SET64
+.\" @X64
+.\" @ASSIGN64
+.\" @HI64
+.\" @LO64
+.\" @GET64
+.\" @AND64
+.\" @OR64
+.\" @XOR64
+.\" @CPL64
+.\" @ADD64
+.\" @SUB64
+.\" @CMP64
+.\" @ZERO64
.SH SYNOPSIS
.nf
.B "#include <mLib/bits.h>"
-.BI "octet U8(" v );
-.BI "uint16 U16(" v );
-.BI "uint32 U32(" v );
+.BR "typedef " ... " octet;"
+.BR "typedef " ... " uint16;"
+.BR "typedef " ... " uint24;"
+.BR "typedef " ... " uint32;"
+.BR "typedef " ... " uint64;"
+.BR "typedef " ... " kludge64;"
-.BI "octet LSL8(" v ", " s );
-.BI "octet LSR8(" v ", " s );
-.BI "uint16 LSL16(" v ", " s );
-.BI "uint16 LSR16(" v ", " s );
-.BI "uint32 LSL32(" v ", " s );
-.BI "uint32 LSR32(" v ", " s );
+.BI "#define TY_" we " " type
+.BI "#define SZ_" we " \fR..."
+.BI "#define MASK_" we " \fR..."
-.BI "octet ROL8(" v ", " s );
-.BI "octet ROR8(" v ", " s );
-.BI "uint16 ROL16(" v ", " s );
-.BI "uint16 ROR16(" v ", " s );
-.BI "uint32 ROL32(" v ", " s );
-.BI "uint32 ROR32(" v ", " s );
+.BI "#define DOUINTSZ(" f ") \fR..."
+.BI "#define DOUINTCONV(" f ") \fR..."
-.BI "octet GETBYTE(" p ", " o );
-.BI "void PUTBYTE(" p ", " o ", " v );
+.IB type " U" w ( v );
-.BI "octet LOAD8(" p );
-.BI "void STORE8(" p ", " v );
+.IB type " LSL" w ( type " " v ", int " s );
+.IB type " LSR" w ( type " " v ", int " s );
+.IB type " ROL" w ( type " " v ", int " s );
+.IB type " ROR" w ( type " " v ", int " s );
-.BI "uint16 LOAD16_B(" p );
-.BI "uint16 LOAD16_L(" p );
-.BI "uint16 LOAD16(" p );
-.BI "void STORE16_B(" p ", " v );
-.BI "void STORE16_L(" p ", " v );
-.BI "void STORE16(" p ", " v );
+.BI "octet GETBYTE(void *" p ", size_t " o );
+.BI "void PUTBYTE(void *" p ", size_t " o ", octet " v );
-.BI "uint32 LOAD32_B(" p );
-.BI "uint32 LOAD32_L(" p );
-.BI "uint32 LOAD32(" p );
-.BI "void STORE32_B(" p ", " v );
-.BI "void STORE32_L(" p ", " v );
-.BI "void STORE32(" p ", " v );
+.IB type " LOAD" we "(void *" p );
+.BI "void STORE" we "(void *" p ", " type " " v );
+
+.BI "void SET64(kludge64 &" d ", uint32 " h ", uint32 " l );
+.BI "kludge64 X64(" hexh ", " hexl );
+.BI "void ASSIGN64(kludge64 &" d ", " x );
+.BI "uint32 HI64(kludge64" x );
+.BI "uint32 LO64(kludge64" x );
+.IB ty " GET64(" ty ", kludge64 " x );
+.BI "void AND64(kludge64 &" d ", kludge64 " x ", kludge64 " y );
+.BI "void OR64(kludge64 &" d ", kludge64 " x ", kludge64 " y );
+.BI "void XOR64(kludge64 &" d ", kludge64 " x ", kludge64 " y );
+.BI "void CPL64(kludge64 &" d ", kludge64 " x );
+.BI "void ADD64(kludge64 &" d ", kludge64 " x ", kludge64 " y );
+.BI "void SUB64(kludge64 &" d ", kludge64 " x ", kludge64 " y );
+.BI "int CMP64(kludge64 " x ", " op ", kludge64 " y );
+.BI "int ZERO64(kludge64 " x );
.fi
.SH DESCRIPTION
The header file
.B <mLib/bits.h>
contains a number of useful definitions for portably dealing with bit-
-and byte-level manipulation of larger quantities. It declares three
-types:
+and byte-level manipulation of larger quantities. The various macros
+and types are named fairly systematically.
+.PP
+The header provides utilities for working with 64-bit quantities, but a
+64-bit integer type is not guaranteed to exist under C89 rules. This
+header takes two approaches. Firstly, if a 64-bit type is found, the
+header defines the macro
+.B HAVE_UINT64
+and defines the various
+.RB ... 64
+macros as described below. Secondly, it unconditionally defines a type
+.B kludge64
+and a family of macros for working with them. See below for details.
+.
+.SS "Type definitions"
+A number of types are defined.
.TP
.B octet
Equivalent to
always capable of representing any 16-bit unsigned value, but the actual
type may be wider than 16 bits and will require masking.
.TP
+.B uint24
+Equivalent to some (architecture-dependent) standard type. Capable of
+representing any unsigned 24-bit value, although the the actual type may
+be wider than 24 bits.
+.TP
.B uint32
Equivalent to some (architecture-dependent) standard type. Capable of
representing any unsigned 32-bit value, although the the actual type may
be wider than 32 bits.
+pp.TP
+.B uint64
+Equivalent to some (architecture-dependent) standard type, if it exists.
+Capable of representing any unsigned 64-bit value, although the the
+actual type may be wider than 64 bits.
+.
+.SS "Size/endianness suffixes"
+Let
+.I w
+be one of the size suffixes: 8, 16, 24, 32, and (if available) 64.
+Furthermore, let
+.I we
+be one of the size-and-endian suffixes
+.IR w ,
+or, where
+.IR w \~>\~8,
+.IB w _L
+or
+.IB w _B \fR,
+where
+.RB ` _L '
+denotes little-endian (Intel, VAX) representation, and
+.RB ` _B '
+denotes big-endian (IBM, network) representation; omitting an explicit
+suffix gives big-endian order by default, since this is most common in
+portable data formats.
.PP
-The constants
-.BR MASK8 ,
-.B MASK16
-and
-.B MASK32
-contain bitmasks appropriate for discarding additional bits from a value
-before use as an 8-, 16- or 32-bit quantity. The macros
-.BR U8 ,
-.B U16
-and
-.B U32
-perform masking and type-coercion on a value, and may be more useful in
-general. For example,
-.B U16(x)
-yields a value of type
-.B uint16
-which contains (only) the least-significant 16 bits of
-.BR x .
+The macro invocation
+.BI DOUINTSZ( f )
+invokes a given macro
+.I f
+repeatedly, as
+.IB f ( w )
+for each size suffix
+.I w
+listed above.
.PP
-The macros
-.BI LSL n
-and
-.BI LSR n
-perform left and right logical shift operations on values of width
-.IR n ,
+The macro invocation
+.BI DOUINTCONV( f )
+invokes a given macro
+.I f
+repeatedly, as
+.IR f ( w ", " we ", " suff )
where
-.I n
-is one of
-.BR 8 ,
-.B 16
+.I we
+ranges over size-and-endian suffixes as described above,
+.I w
+is just the corresponding bit width, as an integer, and
+.I suff
+is a suffix
+.IR w ,
+.IB w l\fR,
or
-.BR 32 .
-The first argument, written
-.IR v ,
-is the value to shift, and the second, written
-.IR s ,
-is the number of bits to shift. The value
-.I s
-is reduced modulo
-.I n
-before use. Similarly, the macros
-.BI ROL n
-and
-.BI ROR n
-perform left and right rotations (cyclic shifts) on values of width
-.IR n .
-These macros are all written in such a way as to maximize portability.
-A compiler may be able to spot that simple machine instructions will
-suffice in many cases, although that's not the main objective.
+.IB w b\fR,
+suitable for a C function name.
+.PP
+These macros are intended to be used to define families of related
+functions.
+.
+.SS "Utility macros"
+For each size-and-endian suffix
+.IR we ,
+the following macros are defined.
+.TP
+.BI TY_ we
+A synonym for the appropriate one of the types
+.BR octet ,
+.BR uint32 ,
+etc.\& listed above.
+.TP
+.BI SZ_ we
+The number of octets needed to represent a value of the corresponding
+type; i.e., this is
+.IR w /8.
+.TP
+.BI MASK_ we
+The largest integer representable in the corresponding type; i.e., this
+is
+.RI 2\*(ss w \*(se\~\-\~1.
.PP
-The macros
-.BI LOAD n
+(Note that the endianness suffix is irrelevant in the above
+definitions.)
+.PP
+For each size suffix
+.IR w ,
+the macro invocation
+.BI U w ( x )
+coerces an integer
+.I x
+to the appropriate type; specifically, it returns the smallest
+nonnegative integer congruent to
+.I x
+(modulo
+.RI 2\*(ss w \*(se).
+.
+.SS "Shift and rotate"
+For each size suffix
+.IR w ,
+the macro invocations
+.BI LSL w ( x ", " n )
and
-.BI STORE n
-(where again
+.BI LSR w ( x ", " n )
+shift a
+.IR w -bit
+quantity
+.I x
+left or right, respectively, by
.I n
-is one of
-.BR 8 ,
-.B 16
-or
-.BR 32 )
-perform transformations between
-.IR n -bit
-quantities and arrays of octets. For example,
-.B LOAD32(q)
-returns the 32-bit value stored in the four octets starting at address
-.BR q ,
-and
-.B STORE16(q, x)
-stores the 16-bit value
-.B x
-in the 2 octets starting at address
-.BR q .
-Values are loaded and stored such that the most significant octet is
-assigned the lowest address (i.e., they use network, or big-endian byte
-order). Macros
-.BI LOAD n _L
+places; if
+.IR n \~\(>=\~ w
+then
+.I n
+is reduced modulo
+.IR w .
+(This behaviour is unfortunate, but (a) it's what a number of CPUs
+provide natively, and (b) it's a cheap way to prevent undefined
+behaviour.) Similarly,
+.BI ROL w ( x ", " n )
and
-.BI STORE n _L
-are also provided for
+.BI ROR w ( x ", " n )
+rotate a
+.IR w -bit
+quantity
+.I x
+left or right, respectively, by
.I n
-either
-.B 16
+places.
+.
+.SS "Byte order conversions"
+For each size suffix
+.IR w ,
+the macro invocation
+.BI ENDSWAP w ( x )
+returns the
+.IR w -bit
+value
+.IR x
+with its bytes reversed. The
+.B ENDSWAP8
+macro does nothing (except truncate its operand to 8 bits), but is
+provided for the sake of completeness.
+.PP
+A
+.I big-endian
+representation stores the most significant octet of an integer at the
+lowest address, with the following octets in decreasing order of
+significance. A
+.I little-endian
+representation instead stores the
+.I least
+significant octet at the lowest address, with the following octets in
+increasing order of significance. An environment has a preferred order
+for arranging the constituent octets of an integer of some given size in
+memory; this might be either the big- or little-endian representation
+just described, or something else strange.
+.PP
+It might be possible to rearrange the bits in an integer so that, when
+that integer is stored to memory in the environment's preferred manner,
+you end up with the big- or little-endian representation of the original
+integer; and, similarly, it might be possible to load a big- or
+little-endian representation of an integer into a variable using the
+environment's preferred ordering and then rearrange the bits so as to
+recover the integer value originally represented. If the environment is
+sufficiently strange, these things might not be possible, but this is
+actually quite rare.
+.PP
+Say that an integer has been converted to
+.I big-
+or
+.I "little-endian form"
+if, when it is stored in memory in the environment's preferred manner,
+one ends up with a big- or little-endian representation of the original
+integer. Equivalently, if one starts with a big- or little-endian
+representation of some integer, and loads it into a variable using the
+environment's preferred manner, one ends up with the big- or
+little-endian form of the original integer.
+.PP
+If these things are possible, then the following macros are defined.
+.TP
+.BI HTOL w ( x )
+Convert a
+.IR w -bit
+integer
+.I x
+to little-endian form.
+.TP
+.BI HTOB w ( x )
+Convert a
+.IR w -bit
+integer
+.I x
+to big-endian form.
+.TP
+.BI LTOH w ( x )
+Convert a
+.IR w -bit
+integer
+.I x
+from little-endian form.
+.TP
+.BI BTOH w ( x )
+Convert a
+.IR w -bit
+integer
+.I x
+from big-endian form.
+.
+.SS "Load and store"
+The macro invocation
+.BI GETBYTE( p ", " o )
+returns the
+.IR o th
+octet following the address
+.IR p .
+Conversely,
+.BI PUTBYTE( p ", " o ", " v)
+stores
+.I
+v in the
+.IR o th
+byte following the address
+.IR p .
+These macros always operate on byte offsets regardless of the type of
+the pointer
+.IR p .
+.PP
+For each size suffix
+.IR w ,
+there may be a macro such that the invocation
+.BI RAW w ( p )
+is an lvalue designating the
+.IR w /8
+octets starting at address
+.IR p ,
+interpreted according to the environment's preferred representation,
+except that
+.I p
+need not be aligned in any particular fashion. There are many reasons
+why this might not be possible; programmers are not normally expected to
+use these macros directly, and they are documented in case they are
+useful for special effects.
+.PP
+For each size-and-endian suffix
+.IR we ,
+the macro invocation
+.BI LOAD we ( p )
+loads and returns a value in the corresponding format at address
+.IR p ;
+similarly,
+.BI STORE we ( p ", " x )
+stores the value
+.I x
+at address
+.I p
+in the corresponding format.
+.
+.SS "64-bit support"
+For portability to environments without native 64-bit integers, the
+structure
+.B kludge64
+is defined. If the target platform is known to have an unsigned 64-bit
+integer type, then this structure merely encapsulates a native integer,
+and a decent optimizing compiler can be expected to handle this exactly
+as if it were the native type. Otherwise, it contains two 32-bit halves
+which are processed the hard way.
+.PP
+For each of the above macros with a suffix
+.BR 64 ,
+.BR 64_L ,
or
-.BR 32 :
-they use little-endian byte order. There are
-explicit big-endian macros
-.BI LOAD n _B
+.BR 64_B ,
+an additional `kludge' macro is defined, whose name has an additional
+final underscore; e.g., the kludge macro corresponding to
+.B ROR64
+is
+.BR ROR64_ ;
+and that corresponding to
+.B LOAD64_L
+is
+.BR LOAD64_L_ .
+If the original macro would have
+.I returned
+a value of type
+.BR uint64 ,
+then the kludge macro has an additional first argument, denoted
+.IR d ,
+which should be an lvalue of type
+.BR kludge64 ,
+and the kludge macro will store its result in
+.IR d .
+The kludge macro's remaining arguments are the same as the original
+macro, except that where the original macro accepts an argument of type
+.BR uint64 ,
+the kludge macro accepts an argument of type
+.B kludge64
+instead.
+.PP
+Finally, a number of additional macros are provided, to make working
+with
+.B kludge64
+somewhat less awful.
+.TP
+.BI SET64( d ", " h ", " l )
+Set the high 32 bits of
+.I d
+to be
+.IR h ,
+and the low 32 bits to be
+.IR l .
+Both
+.I h
+and
+.I l
+may be arbitrary integers.
+.TP
+.BI X64( hexh ", " hexl )
+Expands to an initializer for an object of type
+.B kludge64
+where
+.I hexh
+and
+.I hexl
+encode the high and low 32-bit halves in hexadecimal, without any
+.B 0x
+prefix.
+.TP
+.BI ASSIGN( d ", " x )
+Make
+.I d
+be a copy of the
+.B kludge64
+.IR x .
+.TP
+.BI HI64( x )
+Return the high 32 bits of
+.IR x .
+.TP
+.BI LO64( x )
+Return the low 32 bits of
+.IR x .
+.TP
+.BI GET64( t ", " x )
+Return the value of
+.I x
+as a value of type
+.IR t .
+If
+.I t
+is an unsigned integer type, then the value will be truncated to fit as
+necessary; if
+.I t
+is a signed integer type, then the behaviour is undefined if the value
+of
+.I x
+is too large.
+.TP
+.BI AND64( d ", " x ", " y )
+Set
+.I d
+to be the bitwise-and of the two
+.B kludge64
+arguments
+.I x
+and
+.IR y .
+.TP
+.BI OR64( d ", " x ", " y )
+Set
+.I d
+to be the bitwise-or of the two
+.B kludge64
+arguments
+.I x
+and
+.IR y .
+.TP
+.BI XOR64( d ", " x ", " y )
+Set
+.I d
+to be the bitwise-exclusive-or of the two
+.B kludge64
+arguments
+.I x
+and
+.IR y .
+.TP
+.BI CPL64( d ", " x )
+Set
+.I d
+to be the bitwise complement of the
+.B kludge64
+argument
+.IR x .
+.TP
+.BI ADD64( d ", " x ", " y )
+Set
+.I d
+to be the sum of the two
+.B kludge64
+arguments
+.I x
and
-.BI STORE n _B
-too. The pointer arguments don't have to be pointers to octets; the
-value arguments don't have to be of the right type. The macros perform
-all appropriate type casting and masking. Again, these macros are
-written with portability foremost in mind, although it seems they don't
-actually perform at all badly in real use.
+.IR y .
+.TP
+.BI SUB64( d ", " x ", " y )
+Set
+.I d
+to be the difference of the two
+.B kludge64
+arguments
+.I x
+and
+.IR y .
+.TP
+.BI CMP64( x ", " op ", " y )
+Here,
+.I x
+and
+.I y
+should be arguments of type
+.B kludge64
+and
+.I op
+should be one of the relational operators
+.BR == ,
+.BR < ,
+.BR <= ,
+.BR > ,
+or
+.B >=
+\(en
+.I not
+.BR !=.
+Evaluates nonzero if
+.IR x \~ op \~ y .
+.TP
+.BI ZERO64( x )
+Evaluates nonzero if the
+.B kludge64
+argument
+.I x
+is exactly zero.
.SH "SEE ALSO"
.BR mLib (3).
.SH AUTHOR
# include <stdint.h>
#endif
+#ifndef MLIB_COMPILER_H
+# include "compiler.h"
+#endif
+
/*----- Decide on some types ----------------------------------------------*/
/* --- Make GNU C shut up --- */
} while (0)
#endif
+/* --- Endianness swapping --- */
+
+#if GCC_VERSION_P(4, 8)
+# define ENDSWAP16(x) ((uint16)__builtin_bswap16(x))
+#endif
+#if GCC_VERSION_P(4, 3)
+# define ENDSWAP32(x) ((uint32)__builtin_bswap32(x))
+#endif
+#if GCC_VERSION_P(4, 3) && defined(HAVE_UINT64)
+# define ENDSWAP64(x) ((uint64)__builtin_bswap64(x))
+#endif
+
+#ifndef ENDSWAP8
+# define ENDSWAP8(x) U8(x)
+#endif
+#ifndef ENDSWAP16
+# define ENDSWAP16(x) \
+ ((((uint16)(x) >> 8)&0xff) | \
+ (((uint16)(x)&0xff) << 8))
+#endif
+#ifndef ENDSWAP24
+# define ENDSWAP24(x) \
+ ((((uint24)(x) >> 16)&0xff) | \
+ ((uint24)(x)&0xff00) | \
+ ((uint24)((x)&0xff) << 16))
+#endif
+#ifndef ENDSWAP32
+# define ENDSWAP32(x) \
+ (ENDSWAP16(((uint32)(x) >> 16)&0xffff) | \
+ ((uint32)ENDSWAP16((x)&0xffff) << 16))
+#endif
+#if defined(HAVE_UINT64) && !defined(ENDSWAP64)
+# define ENDSWAP64(x) \
+ (ENDSWAP32(((uint64)(x) >> 32)&0xffffffff) | \
+ ((uint64)ENDSWAP32((x)&0xffffffff) << 32))
+#endif
+#ifdef HAVE_UINT64
+# define ENDSWAP64_(z, x) \
+ ((z).i = ENDSWAP64((x).i))
+#else
+# define ENDSWAP64_(z, x) \
+ ((z).lo = ENDSWAP32((x).hi), \
+ (z).hi = ENDSWAP32((x).lo))
+#endif
+
+#define MLIB_LITTLE_ENDIAN 1234
+#define MLIB_BIG_ENDIAN 4321
+#if defined(__ORDER_LITTLE_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+# define MLIB_BYTE_ORDER MLIB_LITTLE_ENDIAN
+#elif defined(__ORDER_BIG_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define MLIB_BYTE_ORDER MLIB_BIG_ENDIAN
+#endif
+
+#if MLIB_BYTE_ORDER == MLIB_LITTLE_ENDIAN
+# define HTOL16(x) (x)
+# define LTOH16(x) (x)
+# define HTOB16(x) ENDSWAP16(x)
+# define BTOH16(x) ENDSWAP16(x)
+# define HTOL24(x) (x)
+# define LTOH24(x) (x)
+# define HTOB24(x) ENDSWAP24(x)
+# define BTOH24(x) ENDSWAP24(x)
+# define HTOL32(x) (x)
+# define LTOH32(x) (x)
+# define HTOB32(x) ENDSWAP32(x)
+# define BTOH32(x) ENDSWAP32(x)
+# ifdef HAVE_UINT64
+# define HTOL64(x) (x)
+# define LTOH64(x) (x)
+# define HTOB64(x) ENDSWAP64(x)
+# define BTOH64(x) ENDSWAP64(x)
+# endif
+# define HTOL64_(z, x) ASSIGN64(z, x)
+# define LTOH64_(z, x) ASSIGN64(z, x)
+# define HTOB64_(z, x) ENDSWAP64_(z, x)
+# define BTOH64_(z, x) ENDSWAP64_(z, x)
+#elif MLIB_BYTE_ORDER == MLIB_BIG_ENDIAN
+# define HTOL16(x) ENDSWAP16(x)
+# define LTOH16(x) ENDSWAP16(x)
+# define HTOB16(x) (x)
+# define BTOH16(x) (x)
+# define HTOL24(x) ENDSWAP24(x)
+# define LTOH24(x) ENDSWAP24(x)
+# define HTOB24(x) (x)
+# define BTOH24(x) (x)
+# define HTOL32(x) ENDSWAP32(x)
+# define LTOH32(x) ENDSWAP32(x)
+# define HTOB32(x) (x)
+# define BTOH32(x) (x)
+# ifdef HAVE_UINT64
+# define HTOL64(x) ENDSWAP64(x)
+# define LTOH64(x) ENDSWAP64(x)
+# define HTOB64(x) (x)
+# define BTOH64(x) (x)
+# define HTOL64_(z, x) ENDSWAP64_(z, x)
+# define LTOH64_(z, x) ENDSWAP64_(z, x)
+# define HTOB64_(z, x) ((z).i = (x).i)
+# define BTOH64_(z, x) ((z).i = (x).i)
+# endif
+# define HTOL64_(z, x) ENDSWAP64_(z, x)
+# define LTOH64_(z, x) ENDSWAP64_(z, x)
+# define HTOB64_(z, x) ASSIGN64(z, x)
+# define BTOH64_(z, x) ASSIGN64(z, x)
+#endif
+
+/* --- Unaligned access (GCC-specific) --- */
+
+#if GCC_VERSION_P(3, 3) && CHAR_BIT == 8
+# define MLIB_MISALIGNED __attribute__((aligned(1), may_alias))
+# if __SIZEOF_SHORT__ == 2
+ typedef MLIB_MISALIGNED unsigned short misaligned_uint16;
+# define RAW16(p) (*(misaligned_uint16 *)(p))
+# endif
+# if __SIZEOF_INT__ == 4
+ typedef MLIB_MISALIGNED unsigned int misaligned_uint32;
+# define RAW32(p) (*(misaligned_uint32 *)(p))
+# elif __SIZEOF_LONG__ == 4
+ typedef MLIB_MISALIGNED unsigned long misaligned_uint32;
+# define RAW32(p) (*(misaligned_uint32 *)(p))
+# endif
+# if __SIZEOF_LONG__ == 8
+ typedef MLIB_MISALIGNED unsigned long misaligned_uint64;
+# define RAW64(p) (*(misaligned_uint64 *)(p))
+# elif __SIZEOF_LONG_LONG__ == 8
+ typedef MLIB_MISALIGNED unsigned long long misaligned_uint64;
+# define RAW64(p) (*(misaligned_uint64 *)(p))
+# endif
+#endif
+
/* --- Storage and retrieval --- */
+#if defined(RAW16) && defined(LTOH16)
+# define LOAD16_L(p) LTOH16(RAW16(p))
+#endif
+#if defined(RAW16) && defined(HTOL16)
+# define STORE16_L(p, x) (RAW16(p) = HTOL16(x))
+#endif
+#if defined(RAW16) && defined(BTOH16)
+# define LOAD16_B(p) BTOH16(RAW16(p))
+#endif
+#if defined(RAW16) && defined(HTOB16)
+# define STORE16_B(p, x) (RAW16(p) = HTOB16(x))
+#endif
+
+#if defined(RAW32) && defined(LTOH32)
+# define LOAD32_L(p) LTOH32(RAW32(p))
+#endif
+#if defined(RAW32) && defined(HTOL32)
+# define STORE32_L(p, x) (RAW32(p) = HTOL32(x))
+#endif
+#if defined(RAW32) && defined(BTOH32)
+# define LOAD32_B(p) BTOH32(RAW32(p))
+#endif
+#if defined(RAW32) && defined(HTOB32)
+# define STORE32_B(p, x) (RAW32(p) = HTOB32(x))
+#endif
+
+#if defined(RAW64) && defined(LTOH64)
+# define LOAD64_L(p) LTOH64(RAW64(p))
+#endif
+#if defined(RAW64) && defined(HTOL64)
+# define STORE64_L(p, x) (RAW64(p) = HTOL64(x))
+#endif
+#if defined(RAW64) && defined(BTOH64)
+# define LOAD64_B(p) BTOH64(RAW64(p))
+#endif
+#if defined(RAW64) && defined(HTOB64)
+# define STORE64_B(p, x) (RAW64(p) = HTOB64(x))
+#endif
+
#define GETBYTE(p, o) (((octet *)(p))[o] & MASK8)
#define PUTBYTE(p, o, v) (((octet *)(p))[o] = U8((v)))
#define LOAD8(p) (GETBYTE((p), 0))
#define STORE8(p, v) (PUTBYTE((p), 0, (v)))
-#define LOAD16_B(p) \
- (((uint16)GETBYTE((p), 0) << 8) | \
- ((uint16)GETBYTE((p), 1) << 0))
-#define LOAD16_L(p) \
- (((uint16)GETBYTE((p), 0) << 0) | \
- ((uint16)GETBYTE((p), 1) << 8))
+#ifndef LOAD16_B
+# define LOAD16_B(p)
+ (((uint16)GETBYTE((p), 0) << 8) | \
+ ((uint16)GETBYTE((p), 1) << 0))
+#endif
+#ifndef LOAD16_L
+# define LOAD16_L(p) \
+ (((uint16)GETBYTE((p), 0) << 0) | \
+ ((uint16)GETBYTE((p), 1) << 8))
+#endif
#define LOAD16(p) LOAD16_B((p))
-#define STORE16_B(p, v) \
- (PUTBYTE((p), 0, (uint16)(v) >> 8), \
- PUTBYTE((p), 1, (uint16)(v) >> 0))
-#define STORE16_L(p, v) \
- (PUTBYTE((p), 0, (uint16)(v) >> 0), \
- PUTBYTE((p), 1, (uint16)(v) >> 8))
+#ifndef STORE16_B
+# define STORE16_B(p, v) \
+ (PUTBYTE((p), 0, (uint16)(v) >> 8), \
+ PUTBYTE((p), 1, (uint16)(v) >> 0))
+#endif
+#ifndef STORE16_L
+# define STORE16_L(p, v) \
+ (PUTBYTE((p), 0, (uint16)(v) >> 0), \
+ PUTBYTE((p), 1, (uint16)(v) >> 8))
+#endif
#define STORE16(p, v) STORE16_B((p), (v))
-#define LOAD24_B(p) \
- (((uint24)GETBYTE((p), 0) << 16) | \
- ((uint24)GETBYTE((p), 1) << 8) | \
- ((uint24)GETBYTE((p), 2) << 0))
-#define LOAD24_L(p) \
- (((uint24)GETBYTE((p), 0) << 0) | \
- ((uint24)GETBYTE((p), 1) << 8) | \
- ((uint24)GETBYTE((p), 2) << 16))
+#ifndef LOAD24_B
+# define LOAD24_B(p) \
+ (((uint24)GETBYTE((p), 0) << 16) | \
+ ((uint24)LOAD16_B((octet *)(p) + 1) << 0))
+#endif
+#ifndef LOAD24_L
+# define LOAD24_L(p) \
+ (((uint24)LOAD16_L((octet *)(p) + 0) << 0) | \
+ ((uint24)GETBYTE((p), 2) << 16))
+#endif
#define LOAD24(p) LOAD24_B((p))
-#define STORE24_B(p, v) \
- (PUTBYTE((p), 0, (uint24)(v) >> 16), \
- PUTBYTE((p), 1, (uint24)(v) >> 8), \
- PUTBYTE((p), 2, (uint24)(v) >> 0))
-#define STORE24_L(p, v) \
- (PUTBYTE((p), 0, (uint24)(v) >> 0), \
- PUTBYTE((p), 1, (uint24)(v) >> 8), \
- PUTBYTE((p), 2, (uint24)(v) >> 16))
+#ifndef STORE24_B
+# define STORE24_B(p, v) \
+ (PUTBYTE((p), 0, (uint24)(v) >> 16), \
+ STORE16_B((octet *)(p) + 1, (uint24)(v) >> 0))
+#endif
+#ifndef STORE24_L
+# define STORE24_L(p, v) \
+ (STORE16_L((octet *)(p) + 0, (uint24)(v) >> 0), \
+ PUTBYTE((p), 2, (uint24)(v) >> 16))
+#endif
#define STORE24(p, v) STORE24_B((p), (v))
-#define LOAD32_B(p) \
- (((uint32)GETBYTE((p), 0) << 24) | \
- ((uint32)GETBYTE((p), 1) << 16) | \
- ((uint32)GETBYTE((p), 2) << 8) | \
- ((uint32)GETBYTE((p), 3) << 0))
-#define LOAD32_L(p) \
- (((uint32)GETBYTE((p), 0) << 0) | \
- ((uint32)GETBYTE((p), 1) << 8) | \
- ((uint32)GETBYTE((p), 2) << 16) | \
- ((uint32)GETBYTE((p), 3) << 24))
+#ifndef LOAD32_B
+# define LOAD32_B(p) \
+ (((uint32)LOAD16_B((octet *)(p) + 0) << 16) | \
+ ((uint32)LOAD16_B((octet *)(p) + 2) << 0))
+#endif
+#ifndef LOAD32_L
+# define LOAD32_L(p) \
+ (((uint32)LOAD16_L((octet *)(p) + 0) << 0) | \
+ ((uint32)LOAD16_L((octet *)(p) + 2) << 16))
+#endif
#define LOAD32(p) LOAD32_B((p))
-#define STORE32_B(p, v) \
- (PUTBYTE((p), 0, (uint32)(v) >> 24), \
- PUTBYTE((p), 1, (uint32)(v) >> 16), \
- PUTBYTE((p), 2, (uint32)(v) >> 8), \
- PUTBYTE((p), 3, (uint32)(v) >> 0))
-#define STORE32_L(p, v) \
- (PUTBYTE((p), 0, (uint32)(v) >> 0), \
- PUTBYTE((p), 1, (uint32)(v) >> 8), \
- PUTBYTE((p), 2, (uint32)(v) >> 16), \
- PUTBYTE((p), 3, (uint32)(v) >> 24))
+#ifndef STORE32_B
+# define STORE32_B(p, v) \
+ (STORE16_B((octet *)(p) + 0, (uint32)(v) >> 16), \
+ STORE16_B((octet *)(p) + 2, (uint32)(v) >> 0))
+#endif
+#ifndef STORE32_L
+# define STORE32_L(p, v) \
+ (STORE16_L((octet *)(p) + 0, (uint32)(v) >> 0), \
+ STORE16_L((octet *)(p) + 2, (uint32)(v) >> 16))
+#endif
#define STORE32(p, v) STORE32_B((p), (v))
#ifdef HAVE_UINT64
-# define LOAD64_B(p) \
- (((uint64)GETBYTE((p), 0) << 56) | \
- ((uint64)GETBYTE((p), 1) << 48) | \
- ((uint64)GETBYTE((p), 2) << 40) | \
- ((uint64)GETBYTE((p), 3) << 32) | \
- ((uint64)GETBYTE((p), 4) << 24) | \
- ((uint64)GETBYTE((p), 5) << 16) | \
- ((uint64)GETBYTE((p), 6) << 8) | \
- ((uint64)GETBYTE((p), 7) << 0))
-# define LOAD64_L(p) \
- (((uint64)GETBYTE((p), 0) << 0) | \
- ((uint64)GETBYTE((p), 1) << 8) | \
- ((uint64)GETBYTE((p), 2) << 16) | \
- ((uint64)GETBYTE((p), 3) << 24) | \
- ((uint64)GETBYTE((p), 4) << 32) | \
- ((uint64)GETBYTE((p), 5) << 40) | \
- ((uint64)GETBYTE((p), 6) << 48) | \
- ((uint64)GETBYTE((p), 7) << 56))
+# ifndef LOAD64_B
+# define LOAD64_B(p) \
+ (((uint64)LOAD32_B((octet *)(p) + 0) << 32) | \
+ ((uint64)LOAD32_B((octet *)(p) + 4) << 0))
+# endif
+# ifndef LOAD64_L
+# define LOAD64_L(p) \
+ (((uint64)LOAD32_L((octet *)(p) + 0) << 0) | \
+ ((uint64)LOAD32_L((octet *)(p) + 4) << 32))
+# endif
# define LOAD64(p) LOAD64_B((p))
# define LOAD64_B_(d, p) ((d).i = LOAD64_B((p)))
# define LOAD64_L_(d, p) ((d).i = LOAD64_L((p)))
# define LOAD64_(d, p) LOAD64_B_((d), (p))
-# define STORE64_B(p, v) \
- (PUTBYTE((p), 0, (uint64)(v) >> 56), \
- PUTBYTE((p), 1, (uint64)(v) >> 48), \
- PUTBYTE((p), 2, (uint64)(v) >> 40), \
- PUTBYTE((p), 3, (uint64)(v) >> 32), \
- PUTBYTE((p), 4, (uint64)(v) >> 24), \
- PUTBYTE((p), 5, (uint64)(v) >> 16), \
- PUTBYTE((p), 6, (uint64)(v) >> 8), \
- PUTBYTE((p), 7, (uint64)(v) >> 0))
-# define STORE64_L(p, v) \
- (PUTBYTE((p), 0, (uint64)(v) >> 0), \
- PUTBYTE((p), 1, (uint64)(v) >> 8), \
- PUTBYTE((p), 2, (uint64)(v) >> 16), \
- PUTBYTE((p), 3, (uint64)(v) >> 24), \
- PUTBYTE((p), 4, (uint64)(v) >> 32), \
- PUTBYTE((p), 5, (uint64)(v) >> 40), \
- PUTBYTE((p), 6, (uint64)(v) >> 48), \
- PUTBYTE((p), 7, (uint64)(v) >> 56))
+# ifndef STORE64_B
+# define STORE64_B(p, v) \
+ (STORE32_B((octet *)(p) + 0, (uint64)(v) >> 32), \
+ STORE32_B((octet *)(p) + 4, (uint64)(v) >> 0))
+# endif
+# ifndef STORE64_L
+# define STORE64_L(p, v) \
+ (STORE32_L((octet *)(p) + 0, (uint64)(v) >> 0), \
+ STORE32_L((octet *)(p) + 4, (uint64)(v) >> 32))
+# endif
# define STORE64(p, v) STORE64_B((p), (v))
# define STORE64_B_(p, v) STORE64_B((p), (v).i)
# define STORE64_L_(p, v) STORE64_L((p), (v).i)
#else
# define LOAD64_B_(d, p) \
- ((d).hi = LOAD32_B((octet *)(p) + 0), \
- (d).lo = LOAD32_B((octet *)(p) + 4))
+ ((d).hi = LOAD32_B((octet *)(p) + 0), \
+ (d).lo = LOAD32_B((octet *)(p) + 4))
# define LOAD64_L_(d, p) \
- ((d).lo = LOAD32_L((octet *)(p) + 0), \
- (d).hi = LOAD32_L((octet *)(p) + 4))
+ ((d).lo = LOAD32_L((octet *)(p) + 0), \
+ (d).hi = LOAD32_L((octet *)(p) + 4))
# define LOAD64_(d, p) LOAD64_B_((d), (p))
# define STORE64_B_(p, v) \
- (STORE32_B((octet *)(p) + 0, (v).hi), \
- STORE32_B((octet *)(p) + 4, (v).lo))
+ (STORE32_B((octet *)(p) + 0, (v).hi), \
+ STORE32_B((octet *)(p) + 4, (v).lo))
# define STORE64_L_(p, v) \
- (STORE32_L((octet *)(p) + 0, (v).lo), \
- STORE32_L((octet *)(p) + 4, (v).hi))
+ (STORE32_L((octet *)(p) + 0, (v).lo), \
+ STORE32_L((octet *)(p) + 4, (v).hi))
# define STORE64_(p, v) STORE64_B_((p), (v))
#endif
--- /dev/null
+.\" -*-nroff-*-
+.TH compiler 3 "26 May 2018" "Straylight/Edgeware" "mLib utilities library"
+.SH NAME
+compiler \- detect compiler version
+.\" @GCC_VERSION_P
+.\" @CLANG_VERSION_P
+.SH SYNOPSIS
+.nf
+.B "#include <mLib/compiler.h>"
+
+.BI "int GCC_VERSION_P(" maj ", " min ");"
+.BI "int CLANG_VERSION_P(" maj ", " min ");"
+.fi
+.SH DESCRIPTION
+The macro invocation
+.BI GCC_VERSION_P( maj ", " min )
+expands to a compile-time constant nonzero value if the present compiler
+is GCC version
+.IR maj . min
+or better, or claims compatibility with it.
+This is frequently imperfect, as many compilers claim compatibility
+without implementing all of the necessary features, but it works
+adequately if one takes care.
+.PP
+The macro invocation
+.BI CLANG_VERSION_P( maj ", " min )
+expands to a compile-time constant nonzero value if the present compiler
+is Clang version
+.IR maj . min
+or better (or claims compatibility with it, but this is less likely
+than for GCC.
+.SH "SEE ALSO"
+.BR mLib (3).
+.SH "AUTHOR"
+Mark Wooding, <mdw@distorted.org.uk>
--- /dev/null
+/* -*-c-*-
+ *
+ * Macros for determining the compiler version
+ *
+ * (c) 2018 Straylight/Edgeware
+ */
+
+/*----- Licensing notice --------------------------------------------------*
+ *
+ * This file is part of the mLib utilities library.
+ *
+ * mLib is free software: you can redistribute it and/or modify it under
+ * the terms of the GNU Library General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * mLib is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with mLib. If not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ * USA.
+ */
+
+#ifndef MLIB_COMPILER_H
+#define MLIB_COMPILER_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/*----- Macros ------------------------------------------------------------*/
+
+#if defined(__GNUC__)
+# define GCC_VERSION_P(maj, min) \
+ (__GNUC__ > (maj) || (__GNUC__ == (maj) && __GNUC_MINOR__ >= (min)))
+#else
+# define GCC_VERSION_P(maj, min) 0
+#endif
+
+#ifdef __clang__
+# define CLANG_VERSION_P(maj, min) \
+ (__clang_major__ > (maj) || (__clang_major__ == (maj) && \
+ __clang_minor__ >= (min)))
+#else
+# define CLANG_VERSION_P(maj, min) 0
+#endif
+
+/*----- That's all, folks -------------------------------------------------*/
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif
.\" @MUFFLE_WARNINGS_DECL
.\" @MUFFLE_WARNINGS_EXPR
.\" @MUFFLE_WARNINGS_STMT
-.\" @GCC_VERSION_P
.\" @GCC_WARNING
.SH SYNOPSIS
.nf
.BI "MUFFLE_WARNINGS_EXPR(" warns ", " expr ")"
.BI "MUFFLE_WARNINGS_STMT(" warns ", " stmt ")"
-.BI "int GCC_VERSION_P(" maj ", " min ");"
.BI "GCC_WARNING(" option ")"
+.BI "CLANG_WARNING(" option ")"
.fi
.SH DESCRIPTION
.SS Utilities
.I option
naming a GCC warning option, e.g.,
.BR """\-Wdiv-by-zero""" .
+.PP
+The
+.B CLANG_WARNING
+is similar, except that it works with the Clang compiler.
+.PP
+Note that including
+.B <mLib/macros.h>
+also defines the compiler-test macros in
+.BR <mLib/compiler.h>;
+see
+.BR compiler (3).
.SH "SEE ALSO"
-.BR mLib (3).
+.BR mLib (3),
+.BR compiler (3).
.SH "AUTHOR"
Mark Wooding, <mdw@distorted.org.uk>
extern "C" {
#endif
+/*----- Header files ------------------------------------------------------*/
+
+#ifndef MLIB_COMPILER_H
+# include "compiler.h"
+#endif
+
/*----- Miscellaneous utility macros --------------------------------------*/
#define N(v) (sizeof(v)/sizeof(*v))
/* --- Compiler-specific definitions --- */
-#if defined(__GNUC__)
-# define GCC_VERSION_P(maj, min) \
- (__GNUC__ > (maj) || (__GNUC__ == (maj) && __GNUC_MINOR__ >= (min)))
-#else
-# define GCC_VERSION_P(maj, min) 0
-#endif
-
-#ifdef __clang__
-# define CLANG_VERSION_P(maj, min) \
- (__clang_major__ > (maj) || (__clang_major__ == (maj) && \
- __clang_minor__ >= (min)))
-#else
-# define CLANG_VERSION_P(maj, min) 0
-#endif
-
#if GCC_VERSION_P(2, 5) || CLANG_VERSION_P(3, 3)
# define NORETURN __attribute__((noreturn))
# define PRINTF_LIKE(fix, aix) __attribute__((format(printf, fix, aix)))