* USA
*/
/** @file server/speaker.c
- * @brief Speaker processs
+ * @brief Speaker process
*
* This program is responsible for transmitting a single coherent audio stream
* to its destination (over the network, to some sound API, to some
* subprocess). It receives connections from decoders via file descriptor
* passing from the main server and plays them in the right order.
*
- * For the <a href="http://www.alsa-project.org/">ALSA</a> API, 8- and 16- bit
- * stereo and mono are supported, with any sample rate (within the limits that
- * ALSA can deal with.)
+ * @b Encodings. For the <a href="http://www.alsa-project.org/">ALSA</a> API,
+ * 8- and 16- bit stereo and mono are supported, with any sample rate (within
+ * the limits that ALSA can deal with.)
*
* When communicating with a subprocess, <a
* href="http://sox.sourceforge.net/">sox</a> is invoked to convert the inbound
* between versions; the speaker is assumed to be built from the same source
* and run on the same host as the main server.
*
- * This program deliberately does not use the garbage collector even though it
- * might be convenient to do so. This is for two reasons. Firstly some sound
- * APIs use thread threads and we do not want to have to deal with potential
- * interactions between threading and garbage collection. Secondly this
- * process needs to be able to respond quickly and this is not compatible with
- * the collector hanging the program even relatively briefly.
+ * @b Garbage @b Collection. This program deliberately does not use the
+ * garbage collector even though it might be convenient to do so. This is for
+ * two reasons. Firstly some sound APIs use thread threads and we do not want
+ * to have to deal with potential interactions between threading and garbage
+ * collection. Secondly this process needs to be able to respond quickly and
+ * this is not compatible with the collector hanging the program even
+ * relatively briefly.
+ *
+ * @b Units. This program thinks at various times in three different units.
+ * Bytes are obvious. A sample is a single sample on a single channel. A
+ * frame is several samples on different channels at the same point in time.
+ * So (for instance) a 16-bit stereo frame is 4 bytes and consists of a pair of
+ * 2-byte samples.
*/
#include <config.h>
#include "log.h"
#include "defs.h"
#include "mem.h"
-#include "speaker.h"
+#include "speaker-protocol.h"
#include "user.h"
#include "addr.h"
#include "timeval.h"
#include "rtp.h"
+#include "speaker.h"
#if API_ALSA
#include <alsa/asoundlib.h>
#endif
-#ifdef WORDS_BIGENDIAN
-# define MACHINE_AO_FMT AO_FMT_BIG
-#else
-# define MACHINE_AO_FMT AO_FMT_LITTLE
-#endif
-
-/** @brief How many seconds of input to buffer
- *
- * While any given connection has this much audio buffered, no more reads will
- * be issued for that connection. The decoder will have to wait.
- */
-#define BUFFER_SECONDS 5
-
-#define FRAMES 4096 /* Frame batch size */
-
-/** @brief Bytes to send per network packet
- *
- * Don't make this too big or arithmetic will start to overflow.
- */
-#define NETWORK_BYTES (1024+sizeof(struct rtp_header))
-
-/** @brief Maximum RTP playahead (ms) */
-#define RTP_AHEAD_MS 1000
-
-/** @brief Maximum number of FDs to poll for */
-#define NFDS 256
+/** @brief Linked list of all prepared tracks */
+struct track *tracks;
-/** @brief Track structure
- *
- * Known tracks are kept in a linked list. Usually there will be at most two
- * of these but rearranging the queue can cause there to be more.
- */
-static struct track {
- struct track *next; /* next track */
- int fd; /* input FD */
- char id[24]; /* ID */
- size_t start, used; /* start + bytes used */
- int eof; /* input is at EOF */
- int got_format; /* got format yet? */
- ao_sample_format format; /* sample format */
- unsigned long long played; /* number of frames played */
- char *buffer; /* sample buffer */
- size_t size; /* sample buffer size */
- int slot; /* poll array slot */
-} *tracks, *playing; /* all tracks + playing track */
+/** @brief Playing track, or NULL */
+struct track *playing;
static time_t last_report; /* when we last reported */
static int paused; /* pause status */
static size_t bpf; /* bytes per frame */
static struct pollfd fds[NFDS]; /* if we need more than that */
static int fdno; /* fd number */
-static size_t bufsize; /* buffer size */
#if API_ALSA
/** @brief The current PCM handle */
static snd_pcm_t *pcm;
static snd_pcm_uframes_t last_pcm_bufsize; /* last seen buffer size */
-static ao_sample_format pcm_format; /* current format if aodev != 0 */
#endif
-/** @brief Ready to send audio
+/** @brief The current device state */
+enum device_states device_state;
+
+/** @brief The current device sample format
*
- * This is set when the destination is ready to receive audio. Generally
- * this implies that the sound device is open. In the ALSA backend it
- * does @b not necessarily imply that is has the right sample format.
+ * Only meaningful if @ref device_state = @ref device_open or perhaps @ref
+ * device_error. For @ref FIXED_FORMAT backends, this should always match @c
+ * config->sample_format.
*/
-static int ready;
+ao_sample_format device_format;
-static int forceplay; /* frames to force play */
-static int cmdfd = -1; /* child process input */
-static int bfd = -1; /* broadcast FD */
+/** @brief Pipe to subprocess
+ *
+ * This is the file descriptor to write to for @ref BACKEND_COMMAND.
+ */
+static int cmdfd = -1;
+
+/** @brief Network socket
+ *
+ * This is the file descriptor to write to for @ref BACKEND_NETWORK.
+ */
+static int bfd = -1;
/** @brief RTP timestamp
*
*/
static struct timeval rtp_time_0;
-static uint16_t rtp_seq; /* frame sequence number */
-static uint32_t rtp_id; /* RTP SSRC */
-static int idled; /* set when idled */
-static int audio_errors; /* audio error counter */
+/** @brief RTP packet sequence number */
+static uint16_t rtp_seq;
-/** @brief Structure of a backend */
-struct speaker_backend {
- /** @brief Which backend this is
- *
- * @c -1 terminates the list.
- */
- int backend;
+/** @brief RTP SSRC */
+static uint32_t rtp_id;
- /** @brief Flags
- *
- * Possible values
- * - @ref FIXED_FORMAT
- */
- unsigned flags;
-/** @brief Lock to configured sample format */
-#define FIXED_FORMAT 0x0001
-
- /** @brief Initialization
- *
- * Called once at startup. This is responsible for one-time setup
- * operations, for instance opening a network socket to transmit to.
- *
- * When writing to a native sound API this might @b not imply opening the
- * native sound device - that might be done by @c activate below.
- */
- void (*init)(void);
-
- /** @brief Activation
- * @return 0 on success, non-0 on error
- *
- * Called to activate the output device.
- *
- * After this function succeeds, @ref ready should be non-0. As well as
- * opening the audio device, this function is responsible for reconfiguring
- * if it necessary to cope with different samples formats (for backends that
- * don't demand a single fixed sample format for the lifetime of the server).
- */
- int (*activate)(void);
+/** @brief Set when idled
+ *
+ * This is set when the sound device is deliberately closed by idle().
+ */
+static int idled; /* set when idled */
- /** @brief Play sound
- * @param frames Number of frames to play
- * @return Number of frames actually played
- */
- size_t (*play)(size_t frames);
-
- /** @brief Deactivation
- *
- * Called to deactivate the sound device. This is the inverse of
- * @c activate above.
- */
- void (*deactivate)(void);
-};
+/** @brief Error counter */
+static int audio_errors;
/** @brief Selected backend */
static const struct speaker_backend *backend;
* @param t Pointer to track
* @return 0 on success, -1 on EOF
*
- * This is effectively the read callback on @c t->fd.
+ * This is effectively the read callback on @c t->fd. It is called from the
+ * main loop whenever the track's file descriptor is readable, assuming the
+ * buffer has not reached the maximum allowed occupancy.
*/
static int fill(struct track *t) {
size_t where, left;
return 0;
}
-/** @brief Close the sound device */
+/** @brief Close the sound device
+ *
+ * This is called to deactivate the output device when pausing, and also by the
+ * ALSA backend when changing encoding (in which case the sound device will be
+ * immediately reactivated).
+ */
static void idle(void) {
D(("idle"));
- if(backend->deactivate)
+ if(backend->deactivate)
backend->deactivate();
+ else
+ device_state = device_closed;
idled = 1;
- ready = 0;
}
/** @brief Abandon the current track */
removetrack(playing->id);
destroy(playing);
playing = 0;
- forceplay = 0;
-}
-
-#if API_ALSA
-/** @brief Log ALSA parameters */
-static void log_params(snd_pcm_hw_params_t *hwparams,
- snd_pcm_sw_params_t *swparams) {
- snd_pcm_uframes_t f;
- unsigned u;
-
- return; /* too verbose */
- if(hwparams) {
- /* TODO */
- }
- if(swparams) {
- snd_pcm_sw_params_get_silence_size(swparams, &f);
- info("sw silence_size=%lu", (unsigned long)f);
- snd_pcm_sw_params_get_silence_threshold(swparams, &f);
- info("sw silence_threshold=%lu", (unsigned long)f);
- snd_pcm_sw_params_get_sleep_min(swparams, &u);
- info("sw sleep_min=%lu", (unsigned long)u);
- snd_pcm_sw_params_get_start_threshold(swparams, &f);
- info("sw start_threshold=%lu", (unsigned long)f);
- snd_pcm_sw_params_get_stop_threshold(swparams, &f);
- info("sw stop_threshold=%lu", (unsigned long)f);
- snd_pcm_sw_params_get_xfer_align(swparams, &f);
- info("sw xfer_align=%lu", (unsigned long)f);
- }
}
-#endif
/** @brief Enable sound output
*
* Makes sure the sound device is open and has the right sample format. Return
* 0 on success and -1 on error.
*/
-static int activate(void) {
+static void activate(void) {
/* If we don't know the format yet we cannot start. */
if(!playing->got_format) {
D((" - not got format for %s", playing->id));
- return -1;
+ return;
+ }
+ if(backend->flags & FIXED_FORMAT)
+ device_format = config->sample_format;
+ if(backend->activate) {
+ backend->activate();
+ } else {
+ assert(backend->flags & FIXED_FORMAT);
+ /* ...otherwise device_format not set */
+ device_state = device_open;
}
- return backend->activate();
+ if(device_state == device_open)
+ bpf = bytes_per_frame(&device_format);
}
-/* Check to see whether the current track has finished playing */
+/** @brief Check whether the current track has finished
+ *
+ * The current track is determined to have finished either if the input stream
+ * eded before the format could be determined (i.e. it is malformed) or the
+ * input is at end of file and there is less than a frame left unplayed. (So
+ * it copes with decoders that crash mid-frame.)
+ */
static void maybe_finished(void) {
if(playing
&& playing->eof
abandon();
}
-static void fork_cmd(void) {
- pid_t cmdpid;
- int pfd[2];
- if(cmdfd != -1) close(cmdfd);
- xpipe(pfd);
- cmdpid = xfork();
- if(!cmdpid) {
- signal(SIGPIPE, SIG_DFL);
- xdup2(pfd[0], 0);
- close(pfd[0]);
- close(pfd[1]);
- execl("/bin/sh", "sh", "-c", config->speaker_command, (char *)0);
- fatal(errno, "error execing /bin/sh");
- }
- close(pfd[0]);
- cmdfd = pfd[1];
- D(("forked cmd %d, fd = %d", cmdpid, cmdfd));
-}
-
+/** @brief Play up to @p frames frames of audio
+ *
+ * It is always safe to call this function.
+ * - If @ref playing is 0 then it will just return
+ * - If @ref paused is non-0 then it will just return
+ * - If @ref device_state != @ref device_open then it will call activate() and
+ * return if it it fails.
+ * - If there is not enough audio to play then it play what is available.
+ *
+ * If there are not enough frames to play then whatever is available is played
+ * instead. It is up to mainloop() to ensure that play() is not called when
+ * unreasonably only an small amounts of data is available to play.
+ */
static void play(size_t frames) {
- size_t avail_frames, avail_bytes, write_bytes, written_frames;
+ size_t avail_frames, avail_bytes, written_frames;
ssize_t written_bytes;
- struct rtp_header header;
- struct iovec vec[2];
- /* Make sure the output device is activated */
- if(activate()) {
- if(playing)
- forceplay = frames;
- else
- forceplay = 0; /* Must have called abandon() */
+ /* Make sure there's a track to play and it is not pasued */
+ if(!playing || paused)
return;
+ /* Make sure the output device is open and has the right sample format */
+ if(device_state != device_open
+ || !formats_equal(&device_format, &playing->format)) {
+ activate();
+ if(device_state != device_open)
+ return;
}
D(("play: play %zu/%zu%s %dHz %db %dc", frames, playing->used / bpf,
playing->eof ? " EOF" : "",
playing->format.rate,
playing->format.bits,
playing->format.channels));
- /* If we haven't got enough bytes yet wait until we have. Exception: when
- * we are at eof. */
- if(playing->used < frames * bpf && !playing->eof) {
- forceplay = frames;
- return;
- }
- /* We have got enough data so don't force play again */
- forceplay = 0;
/* Figure out how many frames there are available to write */
if(playing->start + playing->used > playing->size)
/* The ring buffer is currently wrapped, only play up to the wrap point */
avail_frames = frames;
if(!avail_frames)
return;
-
- switch(config->speaker_backend) {
-#if API_ALSA
- case BACKEND_ALSA: {
- written_frames = backend->play(avail_frames);
- break;
- }
-#endif
- case BACKEND_COMMAND:
- if(avail_bytes > frames * bpf)
- avail_bytes = frames * bpf;
- written_bytes = write(cmdfd, playing->buffer + playing->start,
- avail_bytes);
- D(("actually play %zu bytes, wrote %d",
- avail_bytes, (int)written_bytes));
- if(written_bytes < 0) {
- switch(errno) {
- case EPIPE:
- error(0, "hmm, command died; trying another");
- fork_cmd();
- return;
- case EAGAIN:
- return;
- }
- }
- written_frames = written_bytes / bpf; /* good enough */
- break;
- case BACKEND_NETWORK:
- /* We transmit using RTP (RFC3550) and attempt to conform to the internet
- * AVT profile (RFC3551). */
-
- if(idled) {
- /* There may have been a gap. Fix up the RTP time accordingly. */
- struct timeval now;
- uint64_t delta;
- uint64_t target_rtp_time;
-
- /* Find the current time */
- xgettimeofday(&now, 0);
- /* Find the number of microseconds elapsed since rtp_time=0 */
- delta = tvsub_us(now, rtp_time_0);
- assert(delta <= UINT64_MAX / 88200);
- target_rtp_time = (delta * playing->format.rate
- * playing->format.channels) / 1000000;
- /* Overflows at ~6 years uptime with 44100Hz stereo */
-
- /* rtp_time is the number of samples we've played. NB that we play
- * RTP_AHEAD_MS ahead of ourselves, so it may legitimately be ahead of
- * the value we deduce from time comparison.
- *
- * Suppose we have 1s track started at t=0, and another track begins to
- * play at t=2s. Suppose RTP_AHEAD_MS=1000 and 44100Hz stereo. In that
- * case we'll send 1s of audio as fast as we can, giving rtp_time=88200.
- * rtp_time stops at this point.
- *
- * At t=2s we'll have calculated target_rtp_time=176400. In this case we
- * set rtp_time=176400 and the player can correctly conclude that it
- * should leave 1s between the tracks.
- *
- * Suppose instead that the second track arrives at t=0.5s, and that
- * we've managed to transmit the whole of the first track already. We'll
- * have target_rtp_time=44100.
- *
- * The desired behaviour is to play the second track back to back with
- * first. In this case therefore we do not modify rtp_time.
- *
- * Is it ever right to reduce rtp_time? No; for that would imply
- * transmitting packets with overlapping timestamp ranges, which does not
- * make sense.
- */
- if(target_rtp_time > rtp_time) {
- /* More time has elapsed than we've transmitted samples. That implies
- * we've been 'sending' silence. */
- info("advancing rtp_time by %"PRIu64" samples",
- target_rtp_time - rtp_time);
- rtp_time = target_rtp_time;
- } else if(target_rtp_time < rtp_time) {
- const int64_t samples_ahead = ((uint64_t)RTP_AHEAD_MS
- * config->sample_format.rate
- * config->sample_format.channels
- / 1000);
-
- if(target_rtp_time + samples_ahead < rtp_time) {
- info("reversing rtp_time by %"PRIu64" samples",
- rtp_time - target_rtp_time);
- }
- }
- }
- header.vpxcc = 2 << 6; /* V=2, P=0, X=0, CC=0 */
- header.seq = htons(rtp_seq++);
- header.timestamp = htonl((uint32_t)rtp_time);
- header.ssrc = rtp_id;
- header.mpt = (idled ? 0x80 : 0x00) | 10;
- /* 10 = L16 = 16-bit x 2 x 44100KHz. We ought to deduce this value from
- * the sample rate (in a library somewhere so that configuration.c can rule
- * out invalid rates).
- */
- idled = 0;
- if(avail_bytes > NETWORK_BYTES - sizeof header) {
- avail_bytes = NETWORK_BYTES - sizeof header;
- /* Always send a whole number of frames */
- avail_bytes -= avail_bytes % bpf;
- }
- /* "The RTP clock rate used for generating the RTP timestamp is independent
- * of the number of channels and the encoding; it equals the number of
- * sampling periods per second. For N-channel encodings, each sampling
- * period (say, 1/8000 of a second) generates N samples. (This terminology
- * is standard, but somewhat confusing, as the total number of samples
- * generated per second is then the sampling rate times the channel
- * count.)"
- */
- write_bytes = avail_bytes;
- if(write_bytes) {
- vec[0].iov_base = (void *)&header;
- vec[0].iov_len = sizeof header;
- vec[1].iov_base = playing->buffer + playing->start;
- vec[1].iov_len = avail_bytes;
- do {
- written_bytes = writev(bfd,
- vec,
- 2);
- } while(written_bytes < 0 && errno == EINTR);
- if(written_bytes < 0) {
- error(errno, "error transmitting audio data");
- ++audio_errors;
- if(audio_errors == 10)
- fatal(0, "too many audio errors");
- return;
- }
- } else
- audio_errors /= 2;
- written_bytes = avail_bytes;
- written_frames = written_bytes / bpf;
- /* Advance RTP's notion of the time */
- rtp_time += written_frames * playing->format.channels;
- break;
- default:
- assert(!"reached");
- }
+ /* Play it, Sam */
+ written_frames = backend->play(avail_frames);
written_bytes = written_frames * bpf;
/* written_bytes and written_frames had better both be set and correct by
* this point */
if(!playing->used || playing->start == playing->size)
playing->start = 0;
frames -= written_frames;
+ return;
}
/* Notify the server what we're up to. */
info("selected ALSA backend");
}
+/** @brief Log ALSA parameters */
+static void log_params(snd_pcm_hw_params_t *hwparams,
+ snd_pcm_sw_params_t *swparams) {
+ snd_pcm_uframes_t f;
+ unsigned u;
+
+ return; /* too verbose */
+ if(hwparams) {
+ /* TODO */
+ }
+ if(swparams) {
+ snd_pcm_sw_params_get_silence_size(swparams, &f);
+ info("sw silence_size=%lu", (unsigned long)f);
+ snd_pcm_sw_params_get_silence_threshold(swparams, &f);
+ info("sw silence_threshold=%lu", (unsigned long)f);
+ snd_pcm_sw_params_get_sleep_min(swparams, &u);
+ info("sw sleep_min=%lu", (unsigned long)u);
+ snd_pcm_sw_params_get_start_threshold(swparams, &f);
+ info("sw start_threshold=%lu", (unsigned long)f);
+ snd_pcm_sw_params_get_stop_threshold(swparams, &f);
+ info("sw stop_threshold=%lu", (unsigned long)f);
+ snd_pcm_sw_params_get_xfer_align(swparams, &f);
+ info("sw xfer_align=%lu", (unsigned long)f);
+ }
+}
+
+/** @brief ALSA deactivation */
+static void alsa_deactivate(void) {
+ if(pcm) {
+ int err;
+
+ if((err = snd_pcm_nonblock(pcm, 0)) < 0)
+ fatal(0, "error calling snd_pcm_nonblock: %d", err);
+ D(("draining pcm"));
+ snd_pcm_drain(pcm);
+ D(("closing pcm"));
+ snd_pcm_close(pcm);
+ pcm = 0;
+ device_state = device_closed;
+ D(("released audio device"));
+ }
+}
+
/** @brief ALSA backend activation */
-static int alsa_activate(void) {
+static void alsa_activate(void) {
/* If we need to change format then close the current device. */
- if(pcm && !formats_equal(&playing->format, &pcm_format))
- idle();
+ if(pcm && !formats_equal(&playing->format, &device_format))
+ alsa_deactivate();
+ /* Now if the sound device is open it must have the right format */
if(!pcm) {
snd_pcm_hw_params_t *hwparams;
snd_pcm_sw_params_t *swparams;
playing->format.channels, err);
goto fatal;
}
- bufsize = 3 * FRAMES;
- pcm_bufsize = bufsize;
+ pcm_bufsize = 3 * FRAMES;
if((err = snd_pcm_hw_params_set_buffer_size_near(pcm, hwparams,
&pcm_bufsize)) < 0)
fatal(0, "error from snd_pcm_hw_params_set_buffer_size (%d): %d",
FRAMES, err);
if((err = snd_pcm_sw_params(pcm, swparams)) < 0)
fatal(0, "error calling snd_pcm_sw_params: %d", err);
- pcm_format = playing->format;
- bpf = bytes_per_frame(&pcm_format);
+ device_format = playing->format;
D(("acquired audio device"));
log_params(hwparams, swparams);
- ready = 1;
+ device_state = device_open;
}
- return 0;
+ return;
fatal:
abandon();
error:
if(pcm) {
snd_pcm_close(pcm);
pcm = 0;
+ device_state = device_error;
}
- return -1;
+ return;
}
/** @brief Play via ALSA */
return pcm_written_frames;
}
-/** @brief ALSA deactivation */
-static void alsa_deactivate(void) {
- if(pcm) {
- int err;
-
- if((err = snd_pcm_nonblock(pcm, 0)) < 0)
- fatal(0, "error calling snd_pcm_nonblock: %d", err);
- D(("draining pcm"));
- snd_pcm_drain(pcm);
- D(("closing pcm"));
- snd_pcm_close(pcm);
- pcm = 0;
- forceplay = 0;
- D(("released audio device"));
- }
+static int alsa_slots, alsa_nslots = -1;
+
+/** @brief Fill in poll fd array for ALSA */
+static void alsa_beforepoll(void) {
+ /* We send sample data to ALSA as fast as it can accept it, relying on
+ * the fact that it has a relatively small buffer to minimize pause
+ * latency. */
+ int retry = 3, err;
+
+ alsa_slots = fdno;
+ do {
+ retry = 0;
+ alsa_nslots = snd_pcm_poll_descriptors(pcm, &fds[fdno], NFDS - fdno);
+ if((alsa_nslots <= 0
+ || !(fds[alsa_slots].events & POLLOUT))
+ && snd_pcm_state(pcm) == SND_PCM_STATE_XRUN) {
+ error(0, "underrun detected after call to snd_pcm_poll_descriptors()");
+ if((err = snd_pcm_prepare(pcm)))
+ fatal(0, "error calling snd_pcm_prepare: %d", err);
+ } else
+ break;
+ } while(retry-- > 0);
+ if(alsa_nslots >= 0)
+ fdno += alsa_nslots;
+}
+
+/** @brief Process poll() results for ALSA */
+static int alsa_ready(void) {
+ int err;
+
+ unsigned short alsa_revents;
+
+ if((err = snd_pcm_poll_descriptors_revents(pcm,
+ &fds[alsa_slots],
+ alsa_nslots,
+ &alsa_revents)) < 0)
+ fatal(0, "error calling snd_pcm_poll_descriptors_revents: %d", err);
+ if(alsa_revents & (POLLOUT | POLLERR))
+ return 1;
+ else
+ return 0;
}
#endif
+/** @brief Start the subprocess for @ref BACKEND_COMMAND */
+static void fork_cmd(void) {
+ pid_t cmdpid;
+ int pfd[2];
+ if(cmdfd != -1) close(cmdfd);
+ xpipe(pfd);
+ cmdpid = xfork();
+ if(!cmdpid) {
+ signal(SIGPIPE, SIG_DFL);
+ xdup2(pfd[0], 0);
+ close(pfd[0]);
+ close(pfd[1]);
+ execl("/bin/sh", "sh", "-c", config->speaker_command, (char *)0);
+ fatal(errno, "error execing /bin/sh");
+ }
+ close(pfd[0]);
+ cmdfd = pfd[1];
+ D(("forked cmd %d, fd = %d", cmdpid, cmdfd));
+}
+
/** @brief Command backend initialization */
static void command_init(void) {
info("selected command backend");
/** @brief Play to a subprocess */
static size_t command_play(size_t frames) {
- return frames;
+ size_t bytes = frames * bpf;
+ int written_bytes;
+
+ written_bytes = write(cmdfd, playing->buffer + playing->start, bytes);
+ D(("actually play %zu bytes, wrote %d",
+ bytes, written_bytes));
+ if(written_bytes < 0) {
+ switch(errno) {
+ case EPIPE:
+ error(0, "hmm, command died; trying another");
+ fork_cmd();
+ return 0;
+ case EAGAIN:
+ return 0;
+ default:
+ fatal(errno, "error writing to subprocess");
+ }
+ } else
+ return written_bytes / bpf;
}
-/** @brief Command/network backend activation */
-static int generic_activate(void) {
- if(!ready) {
- bufsize = 3 * FRAMES;
- bpf = bytes_per_frame(&config->sample_format);
- D(("acquired audio device"));
- ready = 1;
- }
- return 0;
+static int cmdfd_slot;
+
+/** @brief Update poll array for writing to subprocess */
+static void command_beforepoll(void) {
+ /* We send sample data to the subprocess as fast as it can accept it.
+ * This isn't ideal as pause latency can be very high as a result. */
+ if(cmdfd >= 0)
+ cmdfd_slot = addfd(cmdfd, POLLOUT);
+}
+
+/** @brief Process poll() results for subprocess play */
+static int command_ready(void) {
+ if(fds[cmdfd_slot].revents & (POLLOUT | POLLERR))
+ return 1;
+ else
+ return 0;
}
/** @brief Network backend initialization */
/** @brief Play over the network */
static size_t network_play(size_t frames) {
- return frames;
+ struct rtp_header header;
+ struct iovec vec[2];
+ size_t bytes = frames * bpf, written_frames;
+ int written_bytes;
+ /* We transmit using RTP (RFC3550) and attempt to conform to the internet
+ * AVT profile (RFC3551). */
+
+ if(idled) {
+ /* There may have been a gap. Fix up the RTP time accordingly. */
+ struct timeval now;
+ uint64_t delta;
+ uint64_t target_rtp_time;
+
+ /* Find the current time */
+ xgettimeofday(&now, 0);
+ /* Find the number of microseconds elapsed since rtp_time=0 */
+ delta = tvsub_us(now, rtp_time_0);
+ assert(delta <= UINT64_MAX / 88200);
+ target_rtp_time = (delta * playing->format.rate
+ * playing->format.channels) / 1000000;
+ /* Overflows at ~6 years uptime with 44100Hz stereo */
+
+ /* rtp_time is the number of samples we've played. NB that we play
+ * RTP_AHEAD_MS ahead of ourselves, so it may legitimately be ahead of
+ * the value we deduce from time comparison.
+ *
+ * Suppose we have 1s track started at t=0, and another track begins to
+ * play at t=2s. Suppose RTP_AHEAD_MS=1000 and 44100Hz stereo. In that
+ * case we'll send 1s of audio as fast as we can, giving rtp_time=88200.
+ * rtp_time stops at this point.
+ *
+ * At t=2s we'll have calculated target_rtp_time=176400. In this case we
+ * set rtp_time=176400 and the player can correctly conclude that it
+ * should leave 1s between the tracks.
+ *
+ * Suppose instead that the second track arrives at t=0.5s, and that
+ * we've managed to transmit the whole of the first track already. We'll
+ * have target_rtp_time=44100.
+ *
+ * The desired behaviour is to play the second track back to back with
+ * first. In this case therefore we do not modify rtp_time.
+ *
+ * Is it ever right to reduce rtp_time? No; for that would imply
+ * transmitting packets with overlapping timestamp ranges, which does not
+ * make sense.
+ */
+ target_rtp_time &= ~(uint64_t)1; /* stereo! */
+ if(target_rtp_time > rtp_time) {
+ /* More time has elapsed than we've transmitted samples. That implies
+ * we've been 'sending' silence. */
+ info("advancing rtp_time by %"PRIu64" samples",
+ target_rtp_time - rtp_time);
+ rtp_time = target_rtp_time;
+ } else if(target_rtp_time < rtp_time) {
+ const int64_t samples_ahead = ((uint64_t)RTP_AHEAD_MS
+ * config->sample_format.rate
+ * config->sample_format.channels
+ / 1000);
+
+ if(target_rtp_time + samples_ahead < rtp_time) {
+ info("reversing rtp_time by %"PRIu64" samples",
+ rtp_time - target_rtp_time);
+ }
+ }
+ }
+ header.vpxcc = 2 << 6; /* V=2, P=0, X=0, CC=0 */
+ header.seq = htons(rtp_seq++);
+ header.timestamp = htonl((uint32_t)rtp_time);
+ header.ssrc = rtp_id;
+ header.mpt = (idled ? 0x80 : 0x00) | 10;
+ /* 10 = L16 = 16-bit x 2 x 44100KHz. We ought to deduce this value from
+ * the sample rate (in a library somewhere so that configuration.c can rule
+ * out invalid rates).
+ */
+ idled = 0;
+ if(bytes > NETWORK_BYTES - sizeof header) {
+ bytes = NETWORK_BYTES - sizeof header;
+ /* Always send a whole number of frames */
+ bytes -= bytes % bpf;
+ }
+ /* "The RTP clock rate used for generating the RTP timestamp is independent
+ * of the number of channels and the encoding; it equals the number of
+ * sampling periods per second. For N-channel encodings, each sampling
+ * period (say, 1/8000 of a second) generates N samples. (This terminology
+ * is standard, but somewhat confusing, as the total number of samples
+ * generated per second is then the sampling rate times the channel
+ * count.)"
+ */
+ vec[0].iov_base = (void *)&header;
+ vec[0].iov_len = sizeof header;
+ vec[1].iov_base = playing->buffer + playing->start;
+ vec[1].iov_len = bytes;
+ do {
+ written_bytes = writev(bfd, vec, 2);
+ } while(written_bytes < 0 && errno == EINTR);
+ if(written_bytes < 0) {
+ error(errno, "error transmitting audio data");
+ ++audio_errors;
+ if(audio_errors == 10)
+ fatal(0, "too many audio errors");
+ return 0;
+ } else
+ audio_errors /= 2;
+ written_bytes -= sizeof (struct rtp_header);
+ written_frames = written_bytes / bpf;
+ /* Advance RTP's notion of the time */
+ rtp_time += written_frames * playing->format.channels;
+ return written_frames;
+}
+
+static int bfd_slot;
+
+/** @brief Set up poll array for network play */
+static void network_beforepoll(void) {
+ struct timeval now;
+ uint64_t target_us;
+ uint64_t target_rtp_time;
+ const int64_t samples_ahead = ((uint64_t)RTP_AHEAD_MS
+ * config->sample_format.rate
+ * config->sample_format.channels
+ / 1000);
+
+ /* If we're starting then initialize the base time */
+ if(!rtp_time)
+ xgettimeofday(&rtp_time_0, 0);
+ /* We send audio data whenever we get RTP_AHEAD seconds or more
+ * behind */
+ xgettimeofday(&now, 0);
+ target_us = tvsub_us(now, rtp_time_0);
+ assert(target_us <= UINT64_MAX / 88200);
+ target_rtp_time = (target_us * config->sample_format.rate
+ * config->sample_format.channels)
+ / 1000000;
+ if((int64_t)(rtp_time - target_rtp_time) < samples_ahead)
+ bfd_slot = addfd(bfd, POLLOUT);
+}
+
+/** @brief Process poll() results for network play */
+static int network_ready(void) {
+ if(fds[bfd_slot].revents & (POLLOUT | POLLERR))
+ return 1;
+ else
+ return 0;
}
/** @brief Table of speaker backends */
alsa_init,
alsa_activate,
alsa_play,
- alsa_deactivate
+ alsa_deactivate,
+ alsa_beforepoll,
+ alsa_ready
},
#endif
{
BACKEND_COMMAND,
FIXED_FORMAT,
command_init,
- generic_activate,
+ 0, /* activate */
command_play,
- 0 /* deactivate */
+ 0, /* deactivate */
+ command_beforepoll,
+ command_ready
},
{
BACKEND_NETWORK,
FIXED_FORMAT,
network_init,
- generic_activate,
+ 0, /* activate */
network_play,
- 0 /* deactivate */
+ 0, /* deactivate */
+ network_beforepoll,
+ network_ready
},
- { -1, 0, 0, 0, 0, 0 }
+ { -1, 0, 0, 0, 0, 0, 0, 0 } /* end of list */
};
-int main(int argc, char **argv) {
- int n, fd, stdin_slot, alsa_slots, cmdfd_slot, bfd_slot, poke, timeout;
+/** @brief Return nonzero if we want to play some audio
+ *
+ * We want to play audio if there is a current track; and it is not paused; and
+ * there are at least @ref FRAMES frames of audio to play, or we are in sight
+ * of the end of the current track.
+ */
+static int playable(void) {
+ return playing
+ && !paused
+ && (playing->used >= FRAMES || playing->eof);
+}
+
+/** @brief Main event loop */
+static void mainloop(void) {
struct track *t;
struct speaker_message sm;
-#if API_ALSA
- int alsa_nslots = -1, err;
-#endif
+ int n, fd, stdin_slot, timeout;
- set_progname(argv);
- if(!setlocale(LC_CTYPE, "")) fatal(errno, "error calling setlocale");
- while((n = getopt_long(argc, argv, "hVc:dD", options, 0)) >= 0) {
- switch(n) {
- case 'h': help();
- case 'V': version();
- case 'c': configfile = optarg; break;
- case 'd': debugging = 1; break;
- case 'D': debugging = 0; break;
- default: fatal(0, "invalid option");
- }
- }
- if(getenv("DISORDER_DEBUG_SPEAKER")) debugging = 1;
- /* If stderr is a TTY then log there, otherwise to syslog. */
- if(!isatty(2)) {
- openlog(progname, LOG_PID, LOG_DAEMON);
- log_default = &log_syslog;
- }
- if(config_read()) fatal(0, "cannot read configuration");
- /* ignore SIGPIPE */
- signal(SIGPIPE, SIG_IGN);
- /* reap kids */
- signal(SIGCHLD, reap);
- /* set nice value */
- xnice(config->nice_speaker);
- /* change user */
- become_mortal();
- /* make sure we're not root, whatever the config says */
- if(getuid() == 0 || geteuid() == 0) fatal(0, "do not run as root");
- /* identify the backend used to play */
- for(n = 0; backends[n].backend != -1; ++n)
- if(backends[n].backend == config->speaker_backend)
- break;
- if(backends[n].backend == -1)
- fatal(0, "unsupported backend %d", config->speaker_backend);
- backend = &backends[n];
- /* backend-specific initialization */
- backend->init();
while(getppid() != 1) {
fdno = 0;
+ /* By default we will wait up to a second before thinking about current
+ * state. */
+ timeout = 1000;
/* Always ready for commands from the main server. */
stdin_slot = addfd(0, POLLIN);
/* Try to read sample data for the currently playing track if there is
* buffer space. */
- if(playing && !playing->eof && playing->used < playing->size) {
+ if(playing && !playing->eof && playing->used < playing->size)
playing->slot = addfd(playing->fd, POLLIN);
- } else if(playing)
+ else if(playing)
playing->slot = -1;
- /* If forceplay is set then wait until it succeeds before waiting on the
- * sound device. */
- alsa_slots = -1;
- cmdfd_slot = -1;
- bfd_slot = -1;
- /* By default we will wait up to a second before thinking about current
- * state. */
- timeout = 1000;
- if(ready && !forceplay) {
- switch(config->speaker_backend) {
- case BACKEND_COMMAND:
- /* We send sample data to the subprocess as fast as it can accept it.
- * This isn't ideal as pause latency can be very high as a result. */
- if(cmdfd >= 0)
- cmdfd_slot = addfd(cmdfd, POLLOUT);
- break;
- case BACKEND_NETWORK: {
- struct timeval now;
- uint64_t target_us;
- uint64_t target_rtp_time;
- const int64_t samples_ahead = ((uint64_t)RTP_AHEAD_MS
- * config->sample_format.rate
- * config->sample_format.channels
- / 1000);
-#if 0
- static unsigned logit;
-#endif
-
- /* If we're starting then initialize the base time */
- if(!rtp_time)
- xgettimeofday(&rtp_time_0, 0);
- /* We send audio data whenever we get RTP_AHEAD seconds or more
- * behind */
- xgettimeofday(&now, 0);
- target_us = tvsub_us(now, rtp_time_0);
- assert(target_us <= UINT64_MAX / 88200);
- target_rtp_time = (target_us * config->sample_format.rate
- * config->sample_format.channels)
-
- / 1000000;
-#if 0
- /* TODO remove logging guff */
- if(!(logit++ & 1023))
- info("rtp_time %llu target %llu difference %lld [%lld]",
- rtp_time, target_rtp_time,
- rtp_time - target_rtp_time,
- samples_ahead);
-#endif
- if((int64_t)(rtp_time - target_rtp_time) < samples_ahead)
- bfd_slot = addfd(bfd, POLLOUT);
- break;
- }
-#if API_ALSA
- case BACKEND_ALSA: {
- /* We send sample data to ALSA as fast as it can accept it, relying on
- * the fact that it has a relatively small buffer to minimize pause
- * latency. */
- int retry = 3;
-
- alsa_slots = fdno;
- do {
- retry = 0;
- alsa_nslots = snd_pcm_poll_descriptors(pcm, &fds[fdno], NFDS - fdno);
- if((alsa_nslots <= 0
- || !(fds[alsa_slots].events & POLLOUT))
- && snd_pcm_state(pcm) == SND_PCM_STATE_XRUN) {
- error(0, "underrun detected after call to snd_pcm_poll_descriptors()");
- if((err = snd_pcm_prepare(pcm)))
- fatal(0, "error calling snd_pcm_prepare: %d", err);
- } else
- break;
- } while(retry-- > 0);
- if(alsa_nslots >= 0)
- fdno += alsa_nslots;
- break;
- }
-#endif
- default:
- assert(!"unknown backend");
- }
+ if(playable()) {
+ /* We want to play some audio. If the device is closed then we attempt
+ * to open it. */
+ if(device_state == device_closed)
+ activate();
+ /* If the device is (now) open then we will wait up until it is ready for
+ * more. If something went wrong then we should have device_error
+ * instead, but the post-poll code will cope even if it's
+ * device_closed. */
+ if(device_state == device_open)
+ backend->beforepoll();
}
/* If any other tracks don't have a full buffer, try to read sample data
- * from them. */
+ * from them. We do this last of all, so that if we run out of slots,
+ * nothing important can't be monitored. */
for(t = tracks; t; t = t->next)
if(t != playing) {
if(!t->eof && t->used < t->size) {
fatal(errno, "error calling poll");
}
/* Play some sound before doing anything else */
- poke = 0;
- switch(config->speaker_backend) {
-#if API_ALSA
- case BACKEND_ALSA:
- if(alsa_slots != -1) {
- unsigned short alsa_revents;
-
- if((err = snd_pcm_poll_descriptors_revents(pcm,
- &fds[alsa_slots],
- alsa_nslots,
- &alsa_revents)) < 0)
- fatal(0, "error calling snd_pcm_poll_descriptors_revents: %d", err);
- if(alsa_revents & (POLLOUT | POLLERR))
- play(3 * FRAMES);
- } else
- poke = 1;
- break;
-#endif
- case BACKEND_COMMAND:
- if(cmdfd_slot != -1) {
- if(fds[cmdfd_slot].revents & (POLLOUT | POLLERR))
+ if(playable()) {
+ /* We want to play some audio */
+ if(device_state == device_open) {
+ if(backend->ready())
play(3 * FRAMES);
- } else
- poke = 1;
- break;
- case BACKEND_NETWORK:
- if(bfd_slot != -1) {
- if(fds[bfd_slot].revents & (POLLOUT | POLLERR))
- play(3 * FRAMES);
- } else
- poke = 1;
- break;
- }
- if(poke) {
- /* Some attempt to play must have failed */
- if(playing && !paused)
- play(forceplay);
- else
- forceplay = 0; /* just in case */
+ } else {
+ /* We must be in _closed or _error, and it should be the latter, but we
+ * cope with either.
+ *
+ * We most likely timed out, so now is a good time to retry. play()
+ * knows to re-activate the device if necessary.
+ */
+ play(3 * FRAMES);
+ }
}
/* Perhaps we have a command to process */
if(fds[stdin_slot].revents & POLLIN) {
+ /* There might (in theory) be several commands queued up, but in general
+ * this won't be the case, so we don't bother looping around to pick them
+ * all up. */
n = speaker_recv(0, &sm, &fd);
if(n > 0)
switch(sm.type) {
t = findtrack(sm.id, 1);
if(fd != -1) acquire(t, fd);
playing = t;
- play(bufsize);
+ /* We attempt to play straight away rather than going round the loop.
+ * play() is clever enough to perform any activation that is
+ * required. */
+ play(3 * FRAMES);
report();
break;
case SM_PAUSE:
D(("SM_RESUME"));
if(paused) {
paused = 0;
+ /* As for SM_PLAY we attempt to play straight away. */
if(playing)
- play(bufsize);
+ play(3 * FRAMES);
}
report();
break;
for(t = tracks; t; t = t->next)
if(t->slot != -1 && (fds[t->slot].revents & (POLLIN | POLLHUP)))
fill(t);
- /* We might be able to play now */
- if(ready && forceplay && playing && !paused)
- play(forceplay);
/* Maybe we finished playing a track somewhere in the above */
maybe_finished();
/* If we don't need the sound device for now then close it for the benefit
* of anyone else who wants it. */
- if((!playing || paused) && ready)
+ if((!playing || paused) && device_state == device_open)
idle();
/* If we've not reported out state for a second do so now. */
if(time(0) > last_report)
report();
}
+}
+
+int main(int argc, char **argv) {
+ int n;
+
+ set_progname(argv);
+ if(!setlocale(LC_CTYPE, "")) fatal(errno, "error calling setlocale");
+ while((n = getopt_long(argc, argv, "hVc:dD", options, 0)) >= 0) {
+ switch(n) {
+ case 'h': help();
+ case 'V': version();
+ case 'c': configfile = optarg; break;
+ case 'd': debugging = 1; break;
+ case 'D': debugging = 0; break;
+ default: fatal(0, "invalid option");
+ }
+ }
+ if(getenv("DISORDER_DEBUG_SPEAKER")) debugging = 1;
+ /* If stderr is a TTY then log there, otherwise to syslog. */
+ if(!isatty(2)) {
+ openlog(progname, LOG_PID, LOG_DAEMON);
+ log_default = &log_syslog;
+ }
+ if(config_read()) fatal(0, "cannot read configuration");
+ /* ignore SIGPIPE */
+ signal(SIGPIPE, SIG_IGN);
+ /* reap kids */
+ signal(SIGCHLD, reap);
+ /* set nice value */
+ xnice(config->nice_speaker);
+ /* change user */
+ become_mortal();
+ /* make sure we're not root, whatever the config says */
+ if(getuid() == 0 || geteuid() == 0) fatal(0, "do not run as root");
+ /* identify the backend used to play */
+ for(n = 0; backends[n].backend != -1; ++n)
+ if(backends[n].backend == config->speaker_backend)
+ break;
+ if(backends[n].backend == -1)
+ fatal(0, "unsupported backend %d", config->speaker_backend);
+ backend = &backends[n];
+ /* backend-specific initialization */
+ backend->init();
+ mainloop();
info("stopped (parent terminated)");
exit(0);
}