X-Git-Url: https://git.distorted.org.uk/~mdw/disorder/blobdiff_plain/4dac3d2aee45debf137b1dfaa8751b406337ba68..544a9ec143de5670072bae8114834272ced98bcd:/server/speaker.c diff --git a/server/speaker.c b/server/speaker.c index 8d6629d..d3aea99 100644 --- a/server/speaker.c +++ b/server/speaker.c @@ -17,14 +17,35 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ - -/* This program deliberately does not use the garbage collector even though it - * might be convenient to do so. This is for two reasons. Firstly some libao - * drivers are implemented using threads and we do not want to have to deal - * with potential interactions between threading and garbage collection. - * Secondly this process needs to be able to respond quickly and this is not - * compatible with the collector hanging the program even relatively - * briefly. */ +/** @file server/speaker.c + * @brief Speaker processs + * + * This program is responsible for transmitting a single coherent audio stream + * to its destination (over the network, to some sound API, to some + * subprocess). It receives connections from decoders via file descriptor + * passing from the main server and plays them in the right order. + * + * For the ALSA API, 8- and 16- bit + * stereo and mono are supported, with any sample rate (within the limits that + * ALSA can deal with.) + * + * When communicating with a subprocess, sox is invoked to convert the inbound + * data to a single consistent format. The same applies for network (RTP) + * play, though in that case currently only 44.1KHz 16-bit stereo is supported. + * + * The inbound data starts with a structure defining the data format. Note + * that this is NOT portable between different platforms or even necessarily + * between versions; the speaker is assumed to be built from the same source + * and run on the same host as the main server. + * + * This program deliberately does not use the garbage collector even though it + * might be convenient to do so. This is for two reasons. Firstly some sound + * APIs use thread threads and we do not want to have to deal with potential + * interactions between threading and garbage collection. Secondly this + * process needs to be able to respond quickly and this is not compatible with + * the collector hanging the program even relatively briefly. + */ #include #include "types.h" @@ -44,6 +65,10 @@ #include #include #include +#include +#include +#include +#include #include "configuration.h" #include "syscalls.h" @@ -52,6 +77,9 @@ #include "mem.h" #include "speaker.h" #include "user.h" +#include "addr.h" +#include "timeval.h" +#include "rtp.h" #if API_ALSA #include @@ -63,15 +91,32 @@ # define MACHINE_AO_FMT AO_FMT_LITTLE #endif -#define BUFFER_SECONDS 5 /* How many seconds of input to - * buffer. */ +/** @brief How many seconds of input to buffer + * + * While any given connection has this much audio buffered, no more reads will + * be issued for that connection. The decoder will have to wait. + */ +#define BUFFER_SECONDS 5 #define FRAMES 4096 /* Frame batch size */ -#define NFDS 256 /* Max FDs to poll for */ +/** @brief Bytes to send per network packet + * + * Don't make this too big or arithmetic will start to overflow. + */ +#define NETWORK_BYTES (1024+sizeof(struct rtp_header)) + +/** @brief Maximum RTP playahead (ms) */ +#define RTP_AHEAD_MS 1000 + +/** @brief Maximum number of FDs to poll for */ +#define NFDS 256 -/* Known tracks are kept in a linked list. We don't normally to have - * more than two - maybe three at the outside. */ +/** @brief Track structure + * + * Known tracks are kept in a linked list. Usually there will be at most two + * of these but rearranging the queue can cause there to be more. + */ static struct track { struct track *next; /* next track */ int fd; /* input FD */ @@ -88,18 +133,109 @@ static struct track { static time_t last_report; /* when we last reported */ static int paused; /* pause status */ -static ao_sample_format pcm_format; /* current format if aodev != 0 */ static size_t bpf; /* bytes per frame */ static struct pollfd fds[NFDS]; /* if we need more than that */ static int fdno; /* fd number */ static size_t bufsize; /* buffer size */ #if API_ALSA -static snd_pcm_t *pcm; /* current pcm handle */ +/** @brief The current PCM handle */ +static snd_pcm_t *pcm; static snd_pcm_uframes_t last_pcm_bufsize; /* last seen buffer size */ +static ao_sample_format pcm_format; /* current format if aodev != 0 */ #endif -static int ready; /* ready to send audio */ + +/** @brief Ready to send audio + * + * This is set when the destination is ready to receive audio. Generally + * this implies that the sound device is open. In the ALSA backend it + * does @b not necessarily imply that is has the right sample format. + */ +static int ready; + static int forceplay; /* frames to force play */ -static int kidfd = -1; /* child process input */ +static int cmdfd = -1; /* child process input */ +static int bfd = -1; /* broadcast FD */ + +/** @brief RTP timestamp + * + * This counts the number of samples played (NB not the number of frames + * played). + * + * The timestamp in the packet header is only 32 bits wide. With 44100Hz + * stereo, that only gives about half a day before wrapping, which is not + * particularly convenient for certain debugging purposes. Therefore the + * timestamp is maintained as a 64-bit integer, giving around six million years + * before wrapping, and truncated to 32 bits when transmitting. + */ +static uint64_t rtp_time; + +/** @brief RTP base timestamp + * + * This is the real time correspoding to an @ref rtp_time of 0. It is used + * to recalculate the timestamp after idle periods. + */ +static struct timeval rtp_time_0; + +static uint16_t rtp_seq; /* frame sequence number */ +static uint32_t rtp_id; /* RTP SSRC */ +static int idled; /* set when idled */ +static int audio_errors; /* audio error counter */ + +/** @brief Structure of a backend */ +struct speaker_backend { + /** @brief Which backend this is + * + * @c -1 terminates the list. + */ + int backend; + + /** @brief Flags + * + * Possible values + * - @ref FIXED_FORMAT + */ + unsigned flags; +/** @brief Lock to configured sample format */ +#define FIXED_FORMAT 0x0001 + + /** @brief Initialization + * + * Called once at startup. This is responsible for one-time setup + * operations, for instance opening a network socket to transmit to. + * + * When writing to a native sound API this might @b not imply opening the + * native sound device - that might be done by @c activate below. + */ + void (*init)(void); + + /** @brief Activation + * @return 0 on success, non-0 on error + * + * Called to activate the output device. + * + * After this function succeeds, @ref ready should be non-0. As well as + * opening the audio device, this function is responsible for reconfiguring + * if it necessary to cope with different samples formats (for backends that + * don't demand a single fixed sample format for the lifetime of the server). + */ + int (*activate)(void); + + /** @brief Play sound + * @param frames Number of frames to play + * @return Number of frames actually played + */ + size_t (*play)(size_t frames); + + /** @brief Deactivation + * + * Called to deactivate the sound device. This is the inverse of + * @c activate above. + */ + void (*deactivate)(void); +}; + +/** @brief Selected backend */ +static const struct speaker_backend *backend; static const struct option options[] = { { "help", no_argument, 0, 'h' }, @@ -133,12 +269,12 @@ static void version(void) { exit(0); } -/* Return the number of bytes per frame in FORMAT. */ +/** @brief Return the number of bytes per frame in @p format */ static size_t bytes_per_frame(const ao_sample_format *format) { return format->channels * format->bits / 8; } -/* Find track ID, maybe creating it if not found. */ +/** @brief Find track @p id, maybe creating it if not found */ static struct track *findtrack(const char *id, int create) { struct track *t; @@ -158,7 +294,7 @@ static struct track *findtrack(const char *id, int create) { return t; } -/* Remove track ID (but do not destroy it). */ +/** @brief Remove track @p id (but do not destroy it) */ static struct track *removetrack(const char *id) { struct track *t, **tt; @@ -170,7 +306,7 @@ static struct track *removetrack(const char *id) { return t; } -/* Destroy a track. */ +/** @brief Destroy a track */ static void destroy(struct track *t) { D(("destroy %s", t->id)); if(t->fd != -1) xclose(t->fd); @@ -178,7 +314,7 @@ static void destroy(struct track *t) { free(t); } -/* Notice a new FD. */ +/** @brief Notice a new connection */ static void acquire(struct track *t, int fd) { D(("acquire %s %d", t->id, fd)); if(t->fd != -1) @@ -187,7 +323,102 @@ static void acquire(struct track *t, int fd) { nonblock(fd); } -/* Read data into a sample buffer. Return 0 on success, -1 on EOF. */ +/** @brief Return true if A and B denote identical libao formats, else false */ +static int formats_equal(const ao_sample_format *a, + const ao_sample_format *b) { + return (a->bits == b->bits + && a->rate == b->rate + && a->channels == b->channels + && a->byte_format == b->byte_format); +} + +/** @brief Compute arguments to sox */ +static void soxargs(const char ***pp, char **qq, ao_sample_format *ao) { + int n; + + *(*pp)++ = "-t.raw"; + *(*pp)++ = "-s"; + *(*pp)++ = *qq; n = sprintf(*qq, "-r%d", ao->rate); *qq += n + 1; + *(*pp)++ = *qq; n = sprintf(*qq, "-c%d", ao->channels); *qq += n + 1; + /* sox 12.17.9 insists on -b etc; CVS sox insists on - etc; both are + * deployed! */ + switch(config->sox_generation) { + case 0: + if(ao->bits != 8 + && ao->byte_format != AO_FMT_NATIVE + && ao->byte_format != MACHINE_AO_FMT) { + *(*pp)++ = "-x"; + } + switch(ao->bits) { + case 8: *(*pp)++ = "-b"; break; + case 16: *(*pp)++ = "-w"; break; + case 32: *(*pp)++ = "-l"; break; + case 64: *(*pp)++ = "-d"; break; + default: fatal(0, "cannot handle sample size %d", (int)ao->bits); + } + break; + case 1: + switch(ao->byte_format) { + case AO_FMT_NATIVE: break; + case AO_FMT_BIG: *(*pp)++ = "-B"; break; + case AO_FMT_LITTLE: *(*pp)++ = "-L"; break; + } + *(*pp)++ = *qq; n = sprintf(*qq, "-%d", ao->bits/8); *qq += n + 1; + break; + } +} + +/** @brief Enable format translation + * + * If necessary, replaces a tracks inbound file descriptor with one connected + * to a sox invocation, which performs the required translation. + */ +static void enable_translation(struct track *t) { + if((backend->flags & FIXED_FORMAT) + && !formats_equal(&t->format, &config->sample_format)) { + char argbuf[1024], *q = argbuf; + const char *av[18], **pp = av; + int soxpipe[2]; + pid_t soxkid; + + *pp++ = "sox"; + soxargs(&pp, &q, &t->format); + *pp++ = "-"; + soxargs(&pp, &q, &config->sample_format); + *pp++ = "-"; + *pp++ = 0; + if(debugging) { + for(pp = av; *pp; pp++) + D(("sox arg[%d] = %s", pp - av, *pp)); + D(("end args")); + } + xpipe(soxpipe); + soxkid = xfork(); + if(soxkid == 0) { + signal(SIGPIPE, SIG_DFL); + xdup2(t->fd, 0); + xdup2(soxpipe[1], 1); + fcntl(0, F_SETFL, fcntl(0, F_GETFL) & ~O_NONBLOCK); + close(soxpipe[0]); + close(soxpipe[1]); + close(t->fd); + execvp("sox", (char **)av); + _exit(1); + } + D(("forking sox for format conversion (kid = %d)", soxkid)); + close(t->fd); + close(soxpipe[1]); + t->fd = soxpipe[0]; + t->format = config->sample_format; + } +} + +/** @brief Read data into a sample buffer + * @param t Pointer to track + * @return 0 on success, -1 on EOF + * + * This is effectively the read callback on @c t->fd. + */ static int fill(struct track *t) { size_t where, left; int n; @@ -223,6 +454,8 @@ static int fill(struct track *t) { /* Check that our assumptions are met. */ if(t->format.bits & 7) fatal(0, "bits per sample not a multiple of 8"); + /* If the input format is unsuitable, arrange to translate it */ + enable_translation(t); /* Make a new buffer for audio data. */ t->size = bytes_per_frame(&t->format) * t->format.rate * BUFFER_SECONDS; t->buffer = xmalloc(t->size); @@ -234,37 +467,16 @@ static int fill(struct track *t) { return 0; } -/* Return true if A and B denote identical libao formats, else false. */ -static int formats_equal(const ao_sample_format *a, - const ao_sample_format *b) { - return (a->bits == b->bits - && a->rate == b->rate - && a->channels == b->channels - && a->byte_format == b->byte_format); -} - -/* Close the sound device. */ +/** @brief Close the sound device */ static void idle(void) { D(("idle")); -#if API_ALSA - if(pcm) { - int err; - - if((err = snd_pcm_nonblock(pcm, 0)) < 0) - fatal(0, "error calling snd_pcm_nonblock: %d", err); - D(("draining pcm")); - snd_pcm_drain(pcm); - D(("closing pcm")); - snd_pcm_close(pcm); - pcm = 0; - forceplay = 0; - D(("released audio device")); - } -#endif + if(backend->deactivate) + backend->deactivate(); + idled = 1; ready = 0; } -/* Abandon the current track */ +/** @brief Abandon the current track */ static void abandon(void) { struct speaker_message sm; @@ -280,6 +492,7 @@ static void abandon(void) { } #if API_ALSA +/** @brief Log ALSA parameters */ static void log_params(snd_pcm_hw_params_t *hwparams, snd_pcm_sw_params_t *swparams) { snd_pcm_uframes_t f; @@ -306,100 +519,284 @@ static void log_params(snd_pcm_hw_params_t *hwparams, } #endif -static void soxargs(const char ***pp, char **qq, ao_sample_format *ao) { - int n; - - *(*pp)++ = "-t.raw"; - *(*pp)++ = "-s"; - *(*pp)++ = *qq; n = sprintf(*qq, "-r%d", ao->rate); *qq += n + 1; - *(*pp)++ = *qq; n = sprintf(*qq, "-c%d", ao->channels); *qq += n + 1; - /* sox 12.17.9 insists on -b etc; CVS sox insists on - etc; both are - * deployed! */ - switch(config->sox_generation) { - case 0: - if(ao->bits != 8 - && ao->byte_format != AO_FMT_NATIVE - && ao->byte_format != MACHINE_AO_FMT) { - *(*pp)++ = "-x"; - } - switch(ao->bits) { - case 8: *(*pp)++ = "-b"; break; - case 16: *(*pp)++ = "-w"; break; - case 32: *(*pp)++ = "-l"; break; - case 64: *(*pp)++ = "-d"; break; - default: fatal(0, "cannot handle sample size %d", (int)ao->bits); - } - break; - case 1: - switch(ao->byte_format) { - case AO_FMT_NATIVE: break; - case AO_FMT_BIG: *(*pp)++ = "-B"; break; - case AO_FMT_LITTLE: *(*pp)++ = "-L"; break; - } - *(*pp)++ = *qq; n = sprintf(*qq, "-%d", ao->bits/8); *qq += n + 1; - break; - } -} - -/* Make sure the sound device is open and has the right sample format. Return - * 0 on success and -1 on error. */ +/** @brief Enable sound output + * + * Makes sure the sound device is open and has the right sample format. Return + * 0 on success and -1 on error. + */ static int activate(void) { /* If we don't know the format yet we cannot start. */ if(!playing->got_format) { D((" - not got format for %s", playing->id)); return -1; } - if(kidfd >= 0) { - if(!formats_equal(&playing->format, &config->sample_format)) { - char argbuf[1024], *q = argbuf; - const char *av[18], **pp = av; - int soxpipe[2]; - pid_t soxkid; - *pp++ = "sox"; - soxargs(&pp, &q, &playing->format); - *pp++ = "-"; - soxargs(&pp, &q, &config->sample_format); - *pp++ = "-"; - *pp++ = 0; - if(debugging) { - for(pp = av; *pp; pp++) - D(("sox arg[%d] = %s", pp - av, *pp)); - D(("end args")); + return backend->activate(); +} + +/* Check to see whether the current track has finished playing */ +static void maybe_finished(void) { + if(playing + && playing->eof + && (!playing->got_format + || playing->used < bytes_per_frame(&playing->format))) + abandon(); +} + +static void fork_cmd(void) { + pid_t cmdpid; + int pfd[2]; + if(cmdfd != -1) close(cmdfd); + xpipe(pfd); + cmdpid = xfork(); + if(!cmdpid) { + signal(SIGPIPE, SIG_DFL); + xdup2(pfd[0], 0); + close(pfd[0]); + close(pfd[1]); + execl("/bin/sh", "sh", "-c", config->speaker_command, (char *)0); + fatal(errno, "error execing /bin/sh"); + } + close(pfd[0]); + cmdfd = pfd[1]; + D(("forked cmd %d, fd = %d", cmdpid, cmdfd)); +} + +static void play(size_t frames) { + size_t avail_frames, avail_bytes, write_bytes, written_frames; + ssize_t written_bytes; + struct rtp_header header; + struct iovec vec[2]; + + /* Make sure the output device is activated */ + if(activate()) { + if(playing) + forceplay = frames; + else + forceplay = 0; /* Must have called abandon() */ + return; + } + D(("play: play %zu/%zu%s %dHz %db %dc", frames, playing->used / bpf, + playing->eof ? " EOF" : "", + playing->format.rate, + playing->format.bits, + playing->format.channels)); + /* If we haven't got enough bytes yet wait until we have. Exception: when + * we are at eof. */ + if(playing->used < frames * bpf && !playing->eof) { + forceplay = frames; + return; + } + /* We have got enough data so don't force play again */ + forceplay = 0; + /* Figure out how many frames there are available to write */ + if(playing->start + playing->used > playing->size) + /* The ring buffer is currently wrapped, only play up to the wrap point */ + avail_bytes = playing->size - playing->start; + else + /* The ring buffer is not wrapped, can play the lot */ + avail_bytes = playing->used; + avail_frames = avail_bytes / bpf; + /* Only play up to the requested amount */ + if(avail_frames > frames) + avail_frames = frames; + if(!avail_frames) + return; + + switch(config->speaker_backend) { +#if API_ALSA + case BACKEND_ALSA: { + written_frames = backend->play(avail_frames); + break; + } +#endif + case BACKEND_COMMAND: + if(avail_bytes > frames * bpf) + avail_bytes = frames * bpf; + written_bytes = write(cmdfd, playing->buffer + playing->start, + avail_bytes); + D(("actually play %zu bytes, wrote %d", + avail_bytes, (int)written_bytes)); + if(written_bytes < 0) { + switch(errno) { + case EPIPE: + error(0, "hmm, command died; trying another"); + fork_cmd(); + return; + case EAGAIN: + return; } - xpipe(soxpipe); - soxkid = xfork(); - if(soxkid == 0) { - xdup2(playing->fd, 0); - xdup2(soxpipe[1], 1); - fcntl(0, F_SETFL, fcntl(0, F_GETFL) & ~O_NONBLOCK); - close(soxpipe[0]); - close(soxpipe[1]); - close(playing->fd); - execvp("sox", (char **)av); - _exit(1); + } + written_frames = written_bytes / bpf; /* good enough */ + break; + case BACKEND_NETWORK: + /* We transmit using RTP (RFC3550) and attempt to conform to the internet + * AVT profile (RFC3551). */ + + if(idled) { + /* There may have been a gap. Fix up the RTP time accordingly. */ + struct timeval now; + uint64_t delta; + uint64_t target_rtp_time; + + /* Find the current time */ + xgettimeofday(&now, 0); + /* Find the number of microseconds elapsed since rtp_time=0 */ + delta = tvsub_us(now, rtp_time_0); + assert(delta <= UINT64_MAX / 88200); + target_rtp_time = (delta * playing->format.rate + * playing->format.channels) / 1000000; + /* Overflows at ~6 years uptime with 44100Hz stereo */ + + /* rtp_time is the number of samples we've played. NB that we play + * RTP_AHEAD_MS ahead of ourselves, so it may legitimately be ahead of + * the value we deduce from time comparison. + * + * Suppose we have 1s track started at t=0, and another track begins to + * play at t=2s. Suppose RTP_AHEAD_MS=1000 and 44100Hz stereo. In that + * case we'll send 1s of audio as fast as we can, giving rtp_time=88200. + * rtp_time stops at this point. + * + * At t=2s we'll have calculated target_rtp_time=176400. In this case we + * set rtp_time=176400 and the player can correctly conclude that it + * should leave 1s between the tracks. + * + * Suppose instead that the second track arrives at t=0.5s, and that + * we've managed to transmit the whole of the first track already. We'll + * have target_rtp_time=44100. + * + * The desired behaviour is to play the second track back to back with + * first. In this case therefore we do not modify rtp_time. + * + * Is it ever right to reduce rtp_time? No; for that would imply + * transmitting packets with overlapping timestamp ranges, which does not + * make sense. + */ + if(target_rtp_time > rtp_time) { + /* More time has elapsed than we've transmitted samples. That implies + * we've been 'sending' silence. */ + info("advancing rtp_time by %"PRIu64" samples", + target_rtp_time - rtp_time); + rtp_time = target_rtp_time; + } else if(target_rtp_time < rtp_time) { + const int64_t samples_ahead = ((uint64_t)RTP_AHEAD_MS + * config->sample_format.rate + * config->sample_format.channels + / 1000); + + if(target_rtp_time + samples_ahead < rtp_time) { + info("reversing rtp_time by %"PRIu64" samples", + rtp_time - target_rtp_time); + } } - D(("forking sox for format conversion (kid = %d)", soxkid)); - close(playing->fd); - close(soxpipe[1]); - playing->fd = soxpipe[0]; - playing->format = config->sample_format; - ready = 0; } - if(!ready) { - pcm_format = config->sample_format; - bufsize = 3 * FRAMES; - bpf = bytes_per_frame(&config->sample_format); - D(("acquired audio device")); - ready = 1; + header.vpxcc = 2 << 6; /* V=2, P=0, X=0, CC=0 */ + header.seq = htons(rtp_seq++); + header.timestamp = htonl((uint32_t)rtp_time); + header.ssrc = rtp_id; + header.mpt = (idled ? 0x80 : 0x00) | 10; + /* 10 = L16 = 16-bit x 2 x 44100KHz. We ought to deduce this value from + * the sample rate (in a library somewhere so that configuration.c can rule + * out invalid rates). + */ + idled = 0; + if(avail_bytes > NETWORK_BYTES - sizeof header) { + avail_bytes = NETWORK_BYTES - sizeof header; + /* Always send a whole number of frames */ + avail_bytes -= avail_bytes % bpf; } - return 0; + /* "The RTP clock rate used for generating the RTP timestamp is independent + * of the number of channels and the encoding; it equals the number of + * sampling periods per second. For N-channel encodings, each sampling + * period (say, 1/8000 of a second) generates N samples. (This terminology + * is standard, but somewhat confusing, as the total number of samples + * generated per second is then the sampling rate times the channel + * count.)" + */ + write_bytes = avail_bytes; + if(write_bytes) { + vec[0].iov_base = (void *)&header; + vec[0].iov_len = sizeof header; + vec[1].iov_base = playing->buffer + playing->start; + vec[1].iov_len = avail_bytes; + do { + written_bytes = writev(bfd, + vec, + 2); + } while(written_bytes < 0 && errno == EINTR); + if(written_bytes < 0) { + error(errno, "error transmitting audio data"); + ++audio_errors; + if(audio_errors == 10) + fatal(0, "too many audio errors"); + return; + } + } else + audio_errors /= 2; + written_bytes = avail_bytes; + written_frames = written_bytes / bpf; + /* Advance RTP's notion of the time */ + rtp_time += written_frames * playing->format.channels; + break; + default: + assert(!"reached"); + } + written_bytes = written_frames * bpf; + /* written_bytes and written_frames had better both be set and correct by + * this point */ + playing->start += written_bytes; + playing->used -= written_bytes; + playing->played += written_frames; + /* If the pointer is at the end of the buffer (or the buffer is completely + * empty) wrap it back to the start. */ + if(!playing->used || playing->start == playing->size) + playing->start = 0; + frames -= written_frames; +} + +/* Notify the server what we're up to. */ +static void report(void) { + struct speaker_message sm; + + if(playing && playing->buffer != (void *)&playing->format) { + memset(&sm, 0, sizeof sm); + sm.type = paused ? SM_PAUSED : SM_PLAYING; + strcpy(sm.id, playing->id); + sm.data = playing->played / playing->format.rate; + speaker_send(1, &sm, 0); } - if(config->speaker_command) + time(&last_report); +} + +static void reap(int __attribute__((unused)) sig) { + pid_t cmdpid; + int st; + + do + cmdpid = waitpid(-1, &st, WNOHANG); + while(cmdpid > 0); + signal(SIGCHLD, reap); +} + +static int addfd(int fd, int events) { + if(fdno < NFDS) { + fds[fdno].fd = fd; + fds[fdno].events = events; + return fdno++; + } else return -1; +} + #if API_ALSA +/** @brief ALSA backend initialization */ +static void alsa_init(void) { + info("selected ALSA backend"); +} + +/** @brief ALSA backend activation */ +static int alsa_activate(void) { /* If we need to change format then close the current device. */ if(pcm && !formats_equal(&playing->format, &pcm_format)) - idle(); + idle(); if(!pcm) { snd_pcm_hw_params_t *hwparams; snd_pcm_sw_params_t *swparams; @@ -496,166 +893,184 @@ error: snd_pcm_close(pcm); pcm = 0; } -#endif return -1; } -/* Check to see whether the current track has finished playing */ -static void maybe_finished(void) { - if(playing - && playing->eof - && (!playing->got_format - || playing->used < bytes_per_frame(&playing->format))) - abandon(); +/** @brief Play via ALSA */ +static size_t alsa_play(size_t frames) { + snd_pcm_sframes_t pcm_written_frames; + int err; + + pcm_written_frames = snd_pcm_writei(pcm, + playing->buffer + playing->start, + frames); + D(("actually play %zu frames, wrote %d", + frames, (int)pcm_written_frames)); + if(pcm_written_frames < 0) { + switch(pcm_written_frames) { + case -EPIPE: /* underrun */ + error(0, "snd_pcm_writei reports underrun"); + if((err = snd_pcm_prepare(pcm)) < 0) + fatal(0, "error calling snd_pcm_prepare: %d", err); + return 0; + case -EAGAIN: + return 0; + default: + fatal(0, "error calling snd_pcm_writei: %d", + (int)pcm_written_frames); + } + } else + return pcm_written_frames; } -static void fork_kid(void) { - pid_t kid; - int pfd[2]; - if(kidfd != -1) close(kidfd); - xpipe(pfd); - kid = xfork(); - if(!kid) { - xdup2(pfd[0], 0); - close(pfd[0]); - close(pfd[1]); - execl("/bin/sh", "sh", "-c", config->speaker_command, (char *)0); - fatal(errno, "error execing /bin/sh"); +/** @brief ALSA deactivation */ +static void alsa_deactivate(void) { + if(pcm) { + int err; + + if((err = snd_pcm_nonblock(pcm, 0)) < 0) + fatal(0, "error calling snd_pcm_nonblock: %d", err); + D(("draining pcm")); + snd_pcm_drain(pcm); + D(("closing pcm")); + snd_pcm_close(pcm); + pcm = 0; + forceplay = 0; + D(("released audio device")); } - close(pfd[0]); - kidfd = pfd[1]; - D(("forked kid %d, fd = %d", kid, kidfd)); } +#endif -static void play(size_t frames) { - size_t avail_bytes, written_frames; - ssize_t written_bytes; - - if(activate()) { - if(playing) - forceplay = frames; - else - forceplay = 0; /* Must have called abandon() */ - return; - } - D(("play: play %zu/%zu%s %dHz %db %dc", frames, playing->used / bpf, - playing->eof ? " EOF" : "", - playing->format.rate, - playing->format.bits, - playing->format.channels)); - /* If we haven't got enough bytes yet wait until we have. Exception: when - * we are at eof. */ - if(playing->used < frames * bpf && !playing->eof) { - forceplay = frames; - return; - } - /* We have got enough data so don't force play again */ - forceplay = 0; - /* Figure out how many frames there are available to write */ - if(playing->start + playing->used > playing->size) - avail_bytes = playing->size - playing->start; - else - avail_bytes = playing->used; +/** @brief Command backend initialization */ +static void command_init(void) { + info("selected command backend"); + fork_cmd(); +} - if(!config->speaker_command) { -#if API_ALSA - snd_pcm_sframes_t pcm_written_frames; - size_t avail_frames; - int err; +/** @brief Play to a subprocess */ +static size_t command_play(size_t frames) { + return frames; +} - avail_frames = avail_bytes / bpf; - if(avail_frames > frames) - avail_frames = frames; - if(!avail_frames) - return; - pcm_written_frames = snd_pcm_writei(pcm, - playing->buffer + playing->start, - avail_frames); - D(("actually play %zu frames, wrote %d", - avail_frames, (int)pcm_written_frames)); - if(pcm_written_frames < 0) { - switch(pcm_written_frames) { - case -EPIPE: /* underrun */ - error(0, "snd_pcm_writei reports underrun"); - if((err = snd_pcm_prepare(pcm)) < 0) - fatal(0, "error calling snd_pcm_prepare: %d", err); - return; - case -EAGAIN: - return; - default: - fatal(0, "error calling snd_pcm_writei: %d", - (int)pcm_written_frames); - } - } - written_frames = pcm_written_frames; - written_bytes = written_frames * bpf; -#else - assert(!"reached"); -#endif - } else { - if(avail_bytes > frames * bpf) - avail_bytes = frames * bpf; - written_bytes = write(kidfd, playing->buffer + playing->start, - avail_bytes); - D(("actually play %zu bytes, wrote %d", - avail_bytes, (int)written_bytes)); - if(written_bytes < 0) { - switch(errno) { - case EPIPE: - error(0, "hmm, kid died; trying another"); - fork_kid(); - return; - case EAGAIN: - return; - } - } - written_frames = written_bytes / bpf; /* good enough */ +/** @brief Command/network backend activation */ +static int generic_activate(void) { + if(!ready) { + bufsize = 3 * FRAMES; + bpf = bytes_per_frame(&config->sample_format); + D(("acquired audio device")); + ready = 1; } - playing->start += written_bytes; - playing->used -= written_bytes; - playing->played += written_frames; - /* If the pointer is at the end of the buffer (or the buffer is completely - * empty) wrap it back to the start. */ - if(!playing->used || playing->start == playing->size) - playing->start = 0; - frames -= written_frames; + return 0; } -/* Notify the server what we're up to. */ -static void report(void) { - struct speaker_message sm; +/** @brief Network backend initialization */ +static void network_init(void) { + struct addrinfo *res, *sres; + static const struct addrinfo pref = { + 0, + PF_INET, + SOCK_DGRAM, + IPPROTO_UDP, + 0, + 0, + 0, + 0 + }; + static const struct addrinfo prefbind = { + AI_PASSIVE, + PF_INET, + SOCK_DGRAM, + IPPROTO_UDP, + 0, + 0, + 0, + 0 + }; + static const int one = 1; + int sndbuf, target_sndbuf = 131072; + socklen_t len; + char *sockname, *ssockname; - if(playing && playing->buffer != (void *)&playing->format) { - memset(&sm, 0, sizeof sm); - sm.type = paused ? SM_PAUSED : SM_PLAYING; - strcpy(sm.id, playing->id); - sm.data = playing->played / playing->format.rate; - speaker_send(1, &sm, 0); + res = get_address(&config->broadcast, &pref, &sockname); + if(!res) exit(-1); + if(config->broadcast_from.n) { + sres = get_address(&config->broadcast_from, &prefbind, &ssockname); + if(!sres) exit(-1); + } else + sres = 0; + if((bfd = socket(res->ai_family, + res->ai_socktype, + res->ai_protocol)) < 0) + fatal(errno, "error creating broadcast socket"); + if(setsockopt(bfd, SOL_SOCKET, SO_BROADCAST, &one, sizeof one) < 0) + fatal(errno, "error setting SO_BROADCAST on broadcast socket"); + len = sizeof sndbuf; + if(getsockopt(bfd, SOL_SOCKET, SO_SNDBUF, + &sndbuf, &len) < 0) + fatal(errno, "error getting SO_SNDBUF"); + if(target_sndbuf > sndbuf) { + if(setsockopt(bfd, SOL_SOCKET, SO_SNDBUF, + &target_sndbuf, sizeof target_sndbuf) < 0) + error(errno, "error setting SO_SNDBUF to %d", target_sndbuf); + else + info("changed socket send buffer size from %d to %d", + sndbuf, target_sndbuf); + } else + info("default socket send buffer is %d", + sndbuf); + /* We might well want to set additional broadcast- or multicast-related + * options here */ + if(sres && bind(bfd, sres->ai_addr, sres->ai_addrlen) < 0) + fatal(errno, "error binding broadcast socket to %s", ssockname); + if(connect(bfd, res->ai_addr, res->ai_addrlen) < 0) + fatal(errno, "error connecting broadcast socket to %s", sockname); + /* Select an SSRC */ + gcry_randomize(&rtp_id, sizeof rtp_id, GCRY_STRONG_RANDOM); + info("selected network backend, sending to %s", sockname); + if(config->sample_format.byte_format != AO_FMT_BIG) { + info("forcing big-endian sample format"); + config->sample_format.byte_format = AO_FMT_BIG; } - time(&last_report); } -static void reap(int __attribute__((unused)) sig) { - pid_t kid; - int st; - - do - kid = waitpid(-1, &st, WNOHANG); - while(kid > 0); - signal(SIGCHLD, reap); +/** @brief Play over the network */ +static size_t network_play(size_t frames) { + return frames; } -static int addfd(int fd, int events) { - if(fdno < NFDS) { - fds[fdno].fd = fd; - fds[fdno].events = events; - return fdno++; - } else - return -1; -} +/** @brief Table of speaker backends */ +static const struct speaker_backend backends[] = { +#if API_ALSA + { + BACKEND_ALSA, + 0, + alsa_init, + alsa_activate, + alsa_play, + alsa_deactivate + }, +#endif + { + BACKEND_COMMAND, + FIXED_FORMAT, + command_init, + generic_activate, + command_play, + 0 /* deactivate */ + }, + { + BACKEND_NETWORK, + FIXED_FORMAT, + network_init, + generic_activate, + network_play, + 0 /* deactivate */ + }, + { -1, 0, 0, 0, 0, 0 } +}; int main(int argc, char **argv) { - int n, fd, stdin_slot, alsa_slots, kid_slot; + int n, fd, stdin_slot, alsa_slots, cmdfd_slot, bfd_slot, poke, timeout; struct track *t; struct speaker_message sm; #if API_ALSA @@ -663,7 +1078,6 @@ int main(int argc, char **argv) { #endif set_progname(argv); - mem_init(0); if(!setlocale(LC_CTYPE, "")) fatal(errno, "error calling setlocale"); while((n = getopt_long(argc, argv, "hVc:dD", options, 0)) >= 0) { switch(n) { @@ -692,16 +1106,15 @@ int main(int argc, char **argv) { become_mortal(); /* make sure we're not root, whatever the config says */ if(getuid() == 0 || geteuid() == 0) fatal(0, "do not run as root"); - info("started"); - if(config->speaker_command) - fork_kid(); - else { -#if API_ALSA - /* ok */ -#else - fatal(0, "invoked speaker but no speaker_command and no known sound API"); - #endif - } + /* identify the backend used to play */ + for(n = 0; backends[n].backend != -1; ++n) + if(backends[n].backend == config->speaker_backend) + break; + if(backends[n].backend == -1) + fatal(0, "unsupported backend %d", config->speaker_backend); + backend = &backends[n]; + /* backend-specific initialization */ + backend->init(); while(getppid() != 1) { fdno = 0; /* Always ready for commands from the main server. */ @@ -715,13 +1128,60 @@ int main(int argc, char **argv) { /* If forceplay is set then wait until it succeeds before waiting on the * sound device. */ alsa_slots = -1; - kid_slot = -1; + cmdfd_slot = -1; + bfd_slot = -1; + /* By default we will wait up to a second before thinking about current + * state. */ + timeout = 1000; if(ready && !forceplay) { - if(config->speaker_command) { - if(kidfd >= 0) - kid_slot = addfd(kidfd, POLLOUT); - } else { + switch(config->speaker_backend) { + case BACKEND_COMMAND: + /* We send sample data to the subprocess as fast as it can accept it. + * This isn't ideal as pause latency can be very high as a result. */ + if(cmdfd >= 0) + cmdfd_slot = addfd(cmdfd, POLLOUT); + break; + case BACKEND_NETWORK: { + struct timeval now; + uint64_t target_us; + uint64_t target_rtp_time; + const int64_t samples_ahead = ((uint64_t)RTP_AHEAD_MS + * config->sample_format.rate + * config->sample_format.channels + / 1000); +#if 0 + static unsigned logit; +#endif + + /* If we're starting then initialize the base time */ + if(!rtp_time) + xgettimeofday(&rtp_time_0, 0); + /* We send audio data whenever we get RTP_AHEAD seconds or more + * behind */ + xgettimeofday(&now, 0); + target_us = tvsub_us(now, rtp_time_0); + assert(target_us <= UINT64_MAX / 88200); + target_rtp_time = (target_us * config->sample_format.rate + * config->sample_format.channels) + + / 1000000; +#if 0 + /* TODO remove logging guff */ + if(!(logit++ & 1023)) + info("rtp_time %llu target %llu difference %lld [%lld]", + rtp_time, target_rtp_time, + rtp_time - target_rtp_time, + samples_ahead); +#endif + if((int64_t)(rtp_time - target_rtp_time) < samples_ahead) + bfd_slot = addfd(bfd, POLLOUT); + break; + } #if API_ALSA + case BACKEND_ALSA: { + /* We send sample data to ALSA as fast as it can accept it, relying on + * the fact that it has a relatively small buffer to minimize pause + * latency. */ int retry = 3; alsa_slots = fdno; @@ -739,7 +1199,11 @@ int main(int argc, char **argv) { } while(retry-- > 0); if(alsa_nslots >= 0) fdno += alsa_nslots; + break; + } #endif + default: + assert(!"unknown backend"); } } /* If any other tracks don't have a full buffer, try to read sample data @@ -751,29 +1215,47 @@ int main(int argc, char **argv) { } else t->slot = -1; } - /* Wait up to a second before thinking about current state */ - n = poll(fds, fdno, 1000); + /* Wait for something interesting to happen */ + n = poll(fds, fdno, timeout); if(n < 0) { if(errno == EINTR) continue; fatal(errno, "error calling poll"); } /* Play some sound before doing anything else */ - if(alsa_slots != -1) { + poke = 0; + switch(config->speaker_backend) { #if API_ALSA - unsigned short alsa_revents; - - if((err = snd_pcm_poll_descriptors_revents(pcm, - &fds[alsa_slots], - alsa_nslots, - &alsa_revents)) < 0) - fatal(0, "error calling snd_pcm_poll_descriptors_revents: %d", err); - if(alsa_revents & (POLLOUT | POLLERR)) - play(3 * FRAMES); + case BACKEND_ALSA: + if(alsa_slots != -1) { + unsigned short alsa_revents; + + if((err = snd_pcm_poll_descriptors_revents(pcm, + &fds[alsa_slots], + alsa_nslots, + &alsa_revents)) < 0) + fatal(0, "error calling snd_pcm_poll_descriptors_revents: %d", err); + if(alsa_revents & (POLLOUT | POLLERR)) + play(3 * FRAMES); + } else + poke = 1; + break; #endif - } else if(kid_slot != -1) { - if(fds[kid_slot].revents & (POLLOUT | POLLERR)) - play(3 * FRAMES); - } else { + case BACKEND_COMMAND: + if(cmdfd_slot != -1) { + if(fds[cmdfd_slot].revents & (POLLOUT | POLLERR)) + play(3 * FRAMES); + } else + poke = 1; + break; + case BACKEND_NETWORK: + if(bfd_slot != -1) { + if(fds[bfd_slot].revents & (POLLOUT | POLLERR)) + play(3 * FRAMES); + } else + poke = 1; + break; + } + if(poke) { /* Some attempt to play must have failed */ if(playing && !paused) play(forceplay);