/*----- Imports -----------------------------------------------------------*/
-import java.io.{Closeable, File, FileOutputStream, FileReader, IOException};
+import scala.collection.mutable.{ArrayBuffer, HashMap};
-import scala.collection.mutable.HashMap;
+import java.io.{Closeable, File, IOException};
+import java.lang.{Long => JLong};
+import java.net.{URL, URLConnection};
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.zip.GZIPInputStream;
+
+import sys.{SystemError, hashsz, runCommand};
+import sys.Errno.EEXIST;
+import sys.FileImplicits._;
+import sys.FileInfo.{DIR, REG};
+
+import progress.{Eyecandy, SimpleModel, DataModel, DetailedModel};
+import Implicits.truish;
/*----- Useful regular expressions ----------------------------------------*/
-val RX_COMMENT = """(?x) ^ \s* (?: \# .* )? $""".r;
-val RX_KEYVAL = """(?x) ^ \s*
+private final val RX_COMMENT = """(?x) ^ \s* (?: \# .* )? $""".r;
+private final val RX_KEYVAL = """(?x) ^ \s*
([-\w]+)
(?:\s+(?!=)|\s*=\s*)
(|\S|\S.*\S)
\s* $""".r;
-val RX_DOLLARSUBST = """(?x) \$ \{ ([-\w]+) \}""".r;
+private final val RX_DOLLARSUBST = """(?x) \$ \{ ([-\w]+) \}""".r;
+
+private final val RX_PUBKEY = """(?x) ^ peer- (.*) \.pub $""".r;
+
+private final val RX_KEYINFO = """(?x) ^ ([^:]*) : \s* (\S.*) $""".r
+private final val RX_KEYATTR = """(?x) ^ \s*
+ ([^\s=] | [^\s=][^=]*[^\s=])
+ \s* = \s*
+ (\S.*) $""".r;
/*----- Things that go wrong ----------------------------------------------*/
type Config = scala.collection.Map[String, String];
-val DEFAULTS: Seq[(String, Config => String)] =
+private val DEFAULTS: Seq[(String, Config => String)] =
Seq("repos-base" -> { _ => "tripe-keys.tar.gz" },
"sig-base" -> { _ => "tripe-keys.sig-<SEQ>" },
"repos-url" -> { conf => conf("base-url") + conf("repos-base") },
case "naclbox" => "poly1305/128"
case _ =>
val h = conf("hash");
- JNI.hashsz(h) match {
+ hashsz(h) match {
case -1 => throw new DefaultFailed("hash")
case hsz => s"${h}-hmac/${4*hsz}"
}
"sig-fresh" -> { _ => "always" },
"fingerprint-hash" -> { _("hash") });
-def readConfig(path: String): Config = {
- var m = HashMap[String, String]();
- withCleaner { clean =>
- var in = new FileReader(path); clean { in.close(); }
+private def parseConfig(file: File): HashMap[String, String] = {
+
+ /* Build the new configuration in a temporary place. */
+ val m = HashMap[String, String]();
+
+ /* Read the config file into our map. */
+ file.withReader { in =>
var lno = 1;
for (line <- lines(in)) {
line match {
- case RX_COMMENT() => ();
- case RX_KEYVAL(key, value) => m += key -> value;
+ case RX_COMMENT() => ok;
+ case RX_KEYVAL(key, value) => m(key) = value;
case _ =>
- throw new ConfigSyntaxError(path, lno, "failed to parse line");
+ throw new ConfigSyntaxError(file.getPath, lno,
+ "failed to parse line");
}
lno += 1;
}
}
+ /* Done. */
+ m
+}
+
+private def readConfig(file: File): Config = {
+ var m = parseConfig(file);
+
+ /* Fill in defaults where things have been missed out. */
for ((key, dflt) <- DEFAULTS) {
if (!(m contains key)) {
- try { m += key -> dflt(m); }
+ try { m(key) = dflt(m); }
catch {
case e: DefaultFailed =>
- throw new ConfigDefaultFailed(path, key, e.key, m(e.key));
+ throw new ConfigDefaultFailed(file.getPath, key,
+ e.key, m(e.key));
}
}
}
+
+ /* And we're done. */
m
}
/*----- Managing a key repository -----------------------------------------*/
+def downloadToFile(file: File, url: URL,
+ maxlen: Long = Long.MaxValue,
+ ic: Eyecandy) {
+ ic.job(new SimpleModel(s"connecting to `$url'", -1)) { jr =>
+ fetchURL(url, new URLFetchCallbacks {
+ val out = file.openForOutput();
+ private def toobig() {
+ throw new KeyConfigException(
+ s"remote file `$url' is suspiciously large");
+ }
+ var totlen: Long = 0;
+ override def preflight(conn: URLConnection) {
+ totlen = conn.getContentLength;
+ if (totlen > maxlen) toobig();
+ jr.change(new SimpleModel(s"downloading `$url'", totlen)
+ with DataModel,
+ 0);
+ }
+ override def done(win: Boolean) { out.close(); }
+ def write(buf: Array[Byte], n: Int, len: Long) {
+ if (len + n > maxlen) toobig();
+ out.write(buf, 0, n);
+ jr.step(len + n);
+ }
+ })
+ }
+}
+
/* Lifecycle notes
*
* -> empty
* (delete old/)
*/
+class RepositoryStateException(val state: Repository.State.Value,
+ msg: String)
+ extends Exception(msg);
+
+class KeyConfigException(msg: String) extends Exception(msg);
+
+private def launderFingerprint(fp: String): String =
+ fp filter { _.isLetterOrDigit };
+
+private def fingerprintsEqual(a: String, b: String) =
+ launderFingerprint(a) == launderFingerprint(b);
+
+private def keyFingerprint(kr: File, tag: String, hash: String): String = {
+ val (out, _) = runCommand("key", "-k", kr.getPath, "fingerprint",
+ "-a", hash, "-f", "-secret", tag);
+ nextToken(out) match {
+ case Some((fp, _)) => fp
+ case _ =>
+ throw new IOException("unexpected output from `key fingerprint'");
+ }
+}
+
+private def checkIdent(id: String) {
+ if (id exists { ch => ch == ':' || ch == '.' || ch.isWhitespace })
+ throw new IllegalArgumentException(s"bad key tag `$id'");
+}
+
object Repository {
object State extends Enumeration {
val Empty, Pending, Confirmed, Updating, Committing, Live = Value;
}
+}
+
+def checkConfigSanity(file: File, ic: Eyecandy) {
+ ic.operation("checking new configuration") { _ =>
+
+ /* Make sure we can read and understand the file. */
+ val conf = readConfig(file);
+
+ /* Make sure there are entries which we can use to update. This won't
+ * guarantee that we can reliably update, but it will help.
+ */
+ conf("repos-url"); conf("sig-url");
+ conf("fingerprint-hash"); conf("sig-fresh");
+ conf("master-sequence"); conf("hk-master");
+ }
+}
+
+private val keydatefmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z");
+
+class PrivateKey private[keys](repo: Repository, dir: File) {
+ private[this] lazy val keyring = dir/"keyring";
+ private[this] lazy val meta = parseConfig(dir/"meta");
+ lazy val tag = meta("tag");
+ lazy val time = datefmt synchronized { datefmt.parse(meta("time")); };
+ lazy val fingerprint = keyFingerprint(keyring, tag,
+ repo.config("fingerprint-hash"));
+
+ def remove() { dir.rmTree(); }
+
+ private[this] lazy val (info, _attr) = {
+ val m = Map.newBuilder[String, String];
+ val a = Map.newBuilder[String, String];
+ val (out, _) = runCommand("key", "-k", keyring.getPath,
+ "list", "-vv", tag);
+ val lines = out.lines;
+ while (lines.hasNext) lines.next match {
+ case "attributes:" =>
+ while (lines.hasNext) lines.next match {
+ case RX_KEYATTR(k, v) => a += k -> v;
+ case line => throw new IOException(
+ s"unexpected output from `key list': $line");
+ }
+ case RX_KEYINFO(k, v) =>
+ m += k -> v;
+ case line => throw new IOException(
+ s"unexpected output from `key list': $line");
+ }
+ (m.result, a.result)
+ }
+ lazy val expires = info("expiry") match {
+ case "forever" => None
+ case d => Some(keydatefmt synchronized { keydatefmt.parse(d) })
+ }
+ lazy val ty = info("type");
+ lazy val comment = info("comment");
+ lazy val keyid = {
+
+ /* Ugh. Using `Int' throws an exception on words whose top bit is set
+ * because Java doesn't have proper unsigned integers. There's
+ * `parseUnsignedInt' in Java 1.8, but that limits our Android targets.
+ * And Scala has put its own `Long' object in the way of Java's so we
+ * need this circumlocution.
+ */
+ (JLong.parseLong(info("keyid"), 16)&0xffffffff).toInt;
+ }
+ lazy val attr = _attr;
}
class Repository(val root: File) extends Closeable {
import Repository.State.{Value => State, _};
- val livedir = new File(root, "live");
- val livereposdir = new File(livedir, "repos");
- val newdir = new File(root, "new");
- val olddir = new File(root, "old");
- val pendingdir = new File(root, "pending");
- val tmpdir = new File(root, "tmp");
-
- val lock = {
- if (!root.isDirectory && !root.mkdir()) ???;
- val chan = new FileOutputStream(new File(root, "lk")).getChannel;
- chan.tryLock() match {
- case null =>
- throw new IOException(s"repository `${root.getPath}' locked")
- case lk => lk
- }
- }
-
- def close() {
- lock.release();
- lock.channel.close();
- }
-
- def state: State =
- if (livedir.isDirectory) {
- if (!livereposdir.isDirectory) Confirmed
- else if (newdir.isDirectory && olddir.isDirectory) Committing
- else Live
- } else {
- if (newdir.isDirectory) Updating
- else if (pendingdir.isDirectory) Pending
- else Empty
- }
-
- def commitState(): State = state match {
- case Updating => rmTree(newdir); state
- case Committing =>
- if (!newdir.renameTo(livedir) && !olddir.renameTo(livedir))
- throw new IOException("failed to commit update");
- state
- case st => st;
-
- def clean() {
-
+ /* Important directories and files. */
+ private[this] val configdir = root/"config";
+ private[this] val livedir = configdir/"live";
+ private[this] val livereposdir = livedir/"repos";
+ private[this] val newdir = configdir/"new";
+ private[this] val olddir = configdir/"old";
+ private[this] val pendingdir = configdir/"pending";
+ private[this] val tmpdir = root/"tmp";
+ private[this] val keysdir = root/"keys";
+
+ /* Take out a lock in case of other instances. */
+ private[this] var open = false;
+ private[this] val lock = {
+ root.mkdirNew_!();
+ open = true;
+ (root/"lk").lock_!()
+ }
+ def close() { lock.close(); open = false; }
+ private[this] def checkLocked()
+ { if (!open) throw new IllegalStateException("repository is unlocked"); }
+
+ /* Maintain a cache of some repository state. */
+ private var _state: State = null;
+ private var _config: Config = null;
+ private def invalidate() {
+ _state = null;
+ _config = null;
+ }
+
+ def state: State = {
+ /* Determine the current repository state. */
+
+ if (_state == null)
+ _state = if (livedir.isdir_!) {
+ if (!livereposdir.isdir_!) Confirmed
+ else if (newdir.isdir_!) Updating
+ else Live
+ } else {
+ if (newdir.isdir_!) Committing
+ else if (pendingdir.isdir_!) Pending
+ else Empty
+ }
+
+ _state
+ }
+
+ def checkState(wanted: State*) {
+ /* Ensure we're in a particular state. */
+ checkLocked();
+ val st = state;
+ if (wanted.forall(_ != st)) {
+ throw new RepositoryStateException(st, s"Repository is $st, not " +
+ oxford("or",
+ wanted.map(_.toString)));
+ }
+ }
+
+ def cleanup(ic: Eyecandy) {
+
+ /* If we're part-way through an update then back out or press forward. */
+ state match {
+
+ case Updating =>
+ /* We have a new tree allegedly ready, but the current one is still
+ * in place. It seems safer to zap the new one here, but we could go
+ * either way.
+ */
+
+ ic.operation("rolling back failed update")
+ { _ => newdir.rmTree(); }
+ invalidate(); // should move back to `Live' or `Confirmed'
+
+ case Committing =>
+ /* We have a new tree ready, and an old one moved aside. We're going
+ * to have to move one of them. Let's try committing the new tree.
+ */
+
+ ic.operation("committing interrupted update")
+ { _ => newdir.rename_!(livedir); }
+ invalidate(); // should move on to `Live'
+
+ case _ =>
+ /* Other states are stable. */
+ ok;
+ }
+
+ /* Now work through the things in our area of the filesystem and zap the
+ * ones which don't belong. In particular, this will always erase
+ * `tmpdir'.
+ */
+ ic.operation("cleaning up configuration area") { or =>
+ val st = state;
+ root foreachFile { f => f.getName match {
+ case "lk" | "keys" => ok;
+ case "config" => configdir foreachFile { f => (f.getName, st) match {
+ case ("live", Live | Confirmed) => ok;
+ case ("pending", Pending) => ok;
+ case (_, Updating | Committing) =>
+ unreachable(s"unexpectedly still in `$st' state");
+ case _ => or.step(s"delete `$f'"); f.rmTree();
+ } }
+ case _ => or.step(s"delete `$f'"); f.rmTree();
+ } }
+ }
+ }
+
+ def destroy(ic: Eyecandy) {
+ /* Clear out the entire repository. Everything. It's all gone. */
+ ic.operation("clearing configuration")
+ { _ => root foreachFile { f => if (f.getName != "lk") f.rmTree(); } }
+ }
+
+ def clearTmp() {
+ /* Arrange to have an empty `tmpdir'. */
+ tmpdir.rmTree();
+ tmpdir.mkdir_!();
+ }
+
+ def config: Config = {
+ /* Return the repository configuration. */
+
+ checkLocked();
+ if (_config == null) {
+
+ /* Firstly, decide where to find the configuration file. */
+ checkState(Pending, Confirmed, Live);
+ val dir = state match {
+ case Live | Confirmed => livedir
+ case Pending => pendingdir
+ case _ => ???
+ }
+
+ /* And then read the configuration. */
+ _config = readConfig(dir/"tripe-keys.conf");
+ }
+
+ _config
+ }
+
+ def fetchConfig(url: URL, ic: Eyecandy) {
+ /* Fetch an initial configuration file from a given URL. */
+
+ checkState(Empty);
+ clearTmp();
+
+ val conffile = tmpdir/"tripe-keys.conf";
+ downloadToFile(conffile, url, 16*1024, ic);
+ checkConfigSanity(conffile, ic);
+ configdir.mkdirNew_!();
+ ic.operation("committing configuration")
+ { _ => tmpdir.rename_!(pendingdir); }
+ invalidate(); // should move to `Pending'
+ cleanup(ic);
+ }
+
+ def confirm(ic: Eyecandy) {
+ /* The user has approved the master key fingerprint in the `Pending'
+ * configuration. Advance to `Confirmed'.
+ */
+
+ checkState(Pending);
+ ic.operation("confirming configuration")
+ { _ => pendingdir.rename_!(livedir); }
+ invalidate(); // should move to `Confirmed'
+ }
+
+ def update(ic: Eyecandy) {
+ /* Update the repository from the master.
+ *
+ * Fetch a (possibly new) archive; unpack it; verify the master key
+ * against the known fingerprint; and check the signature on the bundle.
+ */
+
+ cleanup(ic);
+ checkState(Confirmed, Live);
+ val conf = config;
+ clearTmp();
+
+ /* First thing is to download the tarball and signature. */
+ val tarfile = tmpdir/"tripe-keys.tar.gz";
+ downloadToFile(tarfile, new URL(conf("repos-url")), 256*1024, ic);
+ val sigfile = tmpdir/"tripe-keys.sig";
+ val seq = conf("master-sequence");
+ downloadToFile(sigfile,
+ new URL(conf("sig-url").replaceAllLiterally("<SEQ>",
+ seq)),
+ 4*1024, ic);
+
+ /* Unpack the tarball. Carefully. */
+ val unpkdir = tmpdir/"unpk";
+ ic.operation("unpacking archive") { or =>
+ unpkdir.mkdir_!();
+ withCleaner { clean =>
+ val tar = new TarFile(new GZIPInputStream(tarfile.open()));
+ clean { tar.close(); }
+ for (e <- tar) {
+
+ /* Check the filename to make sure it's not evil. */
+ if (e.name(0) == '/' || e.name.split('/').exists { _ == ".." }) {
+ throw new KeyConfigException(
+ s"invalid path `${e.name}' in tarball");
+ }
+
+ /* Report on progress. */
+ or.step(s"entry `${e.name}'");
+
+ /* Find out where this file points. */
+ val f = unpkdir/e.name;
+
+ /* Unpack it. */
+ e.typ match {
+ case DIR =>
+ /* A directory. Create it if it doesn't exist already. */
+
+ f.mkdirNew_!();
+
+ case REG =>
+ /* A regular file. Write stuff to it. */
+
+ e.withStream { in =>
+ f.withOutput { out =>
+ for ((b, n) <- blocks(in)) out.write(b, 0, n);
+ }
+ }
+
+ case ty =>
+ /* Something else. Be paranoid and reject it. */
+
+ throw new KeyConfigException(
+ s"entry `${e.name}' has unexpected object type $ty");
+ }
+ }
+ }
+ }
+
+ /* There ought to be a file in here called `repos/master.pub'. */
+ val reposdir = unpkdir/"repos";
+ val masterfile = reposdir/"master.pub";
+
+ if (!reposdir.isdir_!)
+ throw new KeyConfigException("missing `repos/' directory");
+ if (!masterfile.isreg_!)
+ throw new KeyConfigException("missing `repos/master.pub' file");
+ val mastertag = s"master-$seq";
+
+ /* Fetch the master key's fingerprint. */
+ ic.operation("checking master key fingerprint") { _ =>
+ val foundfp = keyFingerprint(masterfile, mastertag,
+ conf("fingerprint-hash"));
+ val wantfp = conf("hk-master");
+ if (!fingerprintsEqual(wantfp, foundfp)) {
+ throw new KeyConfigException(
+ s"master key #$seq has wrong fingerprint: " +
+ s"expected $wantfp but found $foundfp");
+ }
+ }
+
+ /* Check the archive signature. */
+ ic.operation("verifying archive signature") { or =>
+ runCommand("catsign", "-k", masterfile.getPath, "verify", "-aqC",
+ "-k", mastertag, "-t", conf("sig-fresh"),
+ sigfile.getPath, tarfile.getPath);
+ }
+
+ /* Confirm that the configuration in the new archive is sane. */
+ checkConfigSanity(unpkdir/"tripe-keys.conf", ic);
+
+ /* Build the public keyring. */
+ ic.job(new SimpleModel("counting public keys", -1)) { jr =>
+
+ /* Delete the accumulated keyring. */
+ val pubkeys = unpkdir/"keyring.pub";
+ pubkeys.remove_!();
+
+ /* Figure out which files we need to hack. */
+ var kv = ArrayBuffer[File]();
+ reposdir.foreachFile { file => file.getName match {
+ case RX_PUBKEY(peer) if file.isreg_! => kv += file;
+ case _ => ok;
+ } }
+ kv = kv.sorted;
+ val m = new DetailedModel("collecting public keys", kv.length);
+ var i: Long = 0;
+
+ /* Work through the key files. */
+ for (k <- kv) {
+ m.detail = k.getName;
+ if (!i) jr.change(m, i);
+ else jr.step(i);
+ runCommand("key", "-k", pubkeys.getPath, "merge", k.getPath);
+ i += 1;
+ }
+
+ /* Clean up finally. */
+ (unpkdir/"keyring.pub.old").remove_!();
+ }
+
+ /* Now we just have to juggle the files about. */
+ ic.operation("committing new configuration") { _ =>
+ unpkdir.rename_!(newdir);
+ livedir.rename_!(olddir);
+ newdir.rename_!(livedir);
+ }
+
+ /* All done. */
+ invalidate(); // should move to `Live'
+ cleanup(ic);
+ }
+
+ def generateKey(tag: String, label: String, ic: Eyecandy) {
+ checkIdent(tag);
+ if (label.exists { _ == '/' })
+ throw new IllegalArgumentException(s"invalid label string `$label'");
+ if ((keysdir/label).isdir_!)
+ throw new IllegalArgumentException(s"key `$label' already exists");
+
+ cleanup(ic);
+ checkState(Live);
+ val conf = config;
+ clearTmp();
+
+ val now = datefmt synchronized { datefmt.format(new Date) };
+ val kr = tmpdir/"keyring";
+ val pub = tmpdir/s"peer-$tag.pub";
+ val param = livereposdir/"param";
+
+ keysdir.mkdirNew_!();
+
+ ic.operation("fetching key-generation parameters") { _ =>
+ runCommand("key", "-k", kr.getPath, "merge", param.getPath);
+ }
+ ic.operation("generating new key") { _ =>
+ runCommand("key", "-k", kr.getPath, "add",
+ "-a", conf("kx-genalg"), "-p", "param",
+ "-e", conf("kx-expire"), "-t", tag, "tripe");
+ }
+ ic.operation("extracting public key") { _ =>
+ runCommand("key", "-k", kr.getPath, "extract",
+ "-f", "-secret", pub.getPath, tag);
+ }
+ ic.operation("writing metadata") { _ =>
+ tmpdir/"meta" withWriter { w =>
+ w.write(s"tag = $tag\n");
+ w.write(s"time = $now\n");
+ }
+ }
+ ic.operation("installing new key") { _ =>
+ tmpdir.rename_!(keysdir/label);
+ }
+ }
+
+ def key(label: String): PrivateKey = new PrivateKey(this, keysdir/label);
+ def keyLabels: Seq[String] = (keysdir.files_! map { _.getName }).toStream;
+ def keys: Seq[PrivateKey] = keyLabels map { k => key(k) };
}
/*----- That's all, folks -------------------------------------------------*/