--- /dev/null
+#! /usr/bin/python
+###
+### Create, upgrade, and maintain (native and cross-) chroots
+###
+### (c) 2018 Mark Wooding
+###
+
+###----- Licensing notice ---------------------------------------------------
+###
+### This file is part of the distorted.org.uk chroot maintenance tools.
+###
+### distorted-chroot is free software: you can redistribute it and/or
+### modify it under the terms of the GNU General Public License as
+### published by the Free Software Foundation; either version 2 of the
+### License, or (at your option) any later version.
+###
+### distorted-chroot is distributed in the hope that it will be useful,
+### but WITHOUT ANY WARRANTY; without even the implied warranty of
+### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+### General Public License for more details.
+###
+### You should have received a copy of the GNU General Public License
+### along with distorted-chroot. If not, write to the Free Software
+### Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+### USA.
+
+## still to do:
+## tidy up
+
+import contextlib as CTX
+import errno as E
+import fcntl as FC
+import fnmatch as FM
+import glob as GLOB
+import itertools as I
+import optparse as OP
+import os as OS
+import random as R
+import re as RX
+import signal as SIG
+import select as SEL
+import stat as ST
+from cStringIO import StringIO
+import sys as SYS
+import time as T
+import traceback as TB
+
+import jobclient as JC
+
+QUIS = OS.path.basename(SYS.argv[0])
+TODAY = T.strftime("%Y-%m-%d")
+NOW = T.time()
+
+###--------------------------------------------------------------------------
+### Random utilities.
+
+RC = 0
+def moan(msg):
+ """Print MSG to stderr as a warning."""
+ if not OPT.silent: OS.write(2, "%s: %s\n" % (QUIS, msg))
+def error(msg):
+ """Print MSG to stderr, and remember to exit nonzero."""
+ global RC
+ moan(msg)
+ RC = 2
+
+class ExpectedError (Exception):
+ """A fatal error which shouldn't print a backtrace."""
+ pass
+
+@CTX.contextmanager
+def toplevel_handler():
+ """Catch `ExpectedError's and report Unixish error messages."""
+ try: yield None
+ except ExpectedError, err: moan(err); SYS.exit(2)
+
+def spew(msg):
+ """Print MSG to stderr as a debug trace."""
+ if OPT.debug: OS.write(2, ";; %s\n" % msg)
+
+class Tag (object):
+ """Unique objects with no internal structure."""
+ def __init__(me, label): me._label = label
+ def __str__(me): return '#<%s %s>' % (me.__class__.__name__, me._label)
+ def __repr__(me): return '#<%s %s>' % (me.__class__.__name__, me._label)
+
+class Struct (object):
+ def __init__(me, **kw): me.__dict__.update(kw)
+
+class Cleanup (object):
+ """
+ A context manager for stacking other context managers.
+
+ By itself, it does nothing. Attach other context managers with `enter' or
+ loose cleanup functions with `add'. On exit, contexts are left and
+ cleanups performed in reverse order.
+ """
+ def __init__(me):
+ me._cleanups = []
+ def __enter__(me):
+ return me
+ def __exit__(me, exty, exval, extb):
+ trap = False
+ for c in reversed(me._cleanups):
+ if c(exty, exval, extb): trap = True
+ return trap
+ def enter(me, ctx):
+ v = ctx.__enter__()
+ me._cleanups.append(ctx.__exit__)
+ return v
+ def add(me, func):
+ me._cleanups.append(lambda exty, exval, extb: func())
+
+def zulu(t = None):
+ """Return the time T (default now) as a string."""
+ return T.strftime("%Y-%m-%dT%H:%M:%SZ", T.gmtime(t))
+
+R_ZULU = RX.compile(r"^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z$")
+def unzulu(z):
+ """Convert the time string Z back to a Unix time."""
+ m = R_ZULU.match(z)
+ if not m: raise ValueError("bad time spec `%s'" % z)
+ yr, mo, dy, hr, mi, se = map(int, m.groups())
+ return T.mktime((yr, mo, dy, hr, mi, se, 0, 0, 0))
+
+###--------------------------------------------------------------------------
+### Simple select(2) utilities.
+
+class BaseSelector (object):
+ """
+ A base class for hooking into `select_loop'.
+
+ See `select_loop' for details of the protocol.
+ """
+ def preselect(me, rfds, wfds): pass
+ def postselect_read(me, fd): pass
+ def postselect_write(me, fd): pass
+
+class WriteLinesSelector (BaseSelector):
+ """Write whole lines to an output file descriptor."""
+
+ def __init__(me, fd, nextfn = None, *args, **kw):
+ """
+ Initialize the WriteLinesSelector to write to the file descriptor FD.
+
+ The FD is marked non-blocking.
+
+ The lines are produced by the NEXTFN, which is called without arguments.
+ It can affect the output in three ways:
+
+ * It can return a string (or almost any other kind of object, which
+ will be converted into a string by `str'), which will be written to
+ the descriptor followed by a newline. Lines are written in the order
+ in which they are produced.
+
+ * It can return `None', which indicates that there are no more items to
+ be written for the moment. The function will be called again from
+ time to time, to see if it has changed its mind. This is the right
+ thing to do in order to stall output temporarily.
+
+ * It can raise `StopIteration', which indicates that there will never
+ be any more items. The file descriptor will be closed.
+
+ Subclasses can override this behaviour by defining a method `_next' and
+ passing `None' as the NEXTFN.
+ """
+ super(WriteLinesSelector, me).__init__(*args, **kw)
+ set_nonblocking(fd)
+ me._fd = fd
+ if nextfn is not None: me._next = nextfn
+
+ ## Selector state.
+ ##
+ ## * `_buf' contains a number of output items, already formatted, and
+ ## ready for output in a single batch. It might be empty.
+ ##
+ ## * `_pos' is the current output position in `_buf'.
+ ##
+ ## * `_more' is set unless the `_next' function has raised
+ ## `StopIteration': it indicates that we should close the descriptor
+ ## once the all of the remaining data in the buffer has been sent.
+ me._buf = ""
+ me._pos = 0
+ me._more = True
+
+ def _refill(me):
+ """Refill `_buf' by calling `_next'."""
+ sio = StringIO(); n = 0
+ while n < 4096:
+ try: item = me._next()
+ except StopIteration: me._more = False; break
+ if item is None: break
+ item = str(item)
+ sio.write(item); sio.write("\n"); n += len(item) + 1
+ me._buf = sio.getvalue(); me._pos = 0
+
+ def preselect(me, rfds, wfds):
+ if me._fd == -1: return
+ if me._buf == "" and me._more: me._refill()
+ if me._buf != "" or not me._more: wfds.append(me._fd)
+
+ def postselect_write(me, fd):
+ if fd != me._fd: return
+ while True:
+ if me._pos >= len(me._buf):
+ if me._more: me._refill()
+ if not me._more: OS.close(me._fd); me._fd = -1; break
+ if not me._buf: break
+ try: n = OS.write(me._fd, me._buf[me._pos:])
+ except OSError, err:
+ if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break
+ elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; break
+ else: raise
+ me._pos += n
+
+class ReadLinesSelector (BaseSelector):
+ """Report whole lines from an input file descriptor as they arrive."""
+
+ def __init__(me, fd, linefn = None, *args, **kw):
+ """
+ Initialize the ReadLinesSelector to read from the file descriptor FD.
+
+ The FD is marked non-blocking.
+
+ For each whole line, and the final partial line (if any), the selector
+ calls LINEFN with the line as an argument (without the terminating
+ newline, if any).
+
+ Subclasses can override this behaviour by defining a method `_line' and
+ passing `None' as the LINEFN.
+ """
+ super(ReadLinesSelector, me).__init__(*args, **kw)
+ set_nonblocking(fd)
+ me._fd = fd
+ me._buf = ""
+ if linefn is not None: me._line = linefn
+
+ def preselect(me, rfds, wfds):
+ if me._fd != -1: rfds.append(me._fd)
+
+ def postselect_read(me, fd):
+ if fd != me._fd: return
+ while True:
+ try: buf = OS.read(me._fd, 4096)
+ except OSError, err:
+ if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break
+ else: raise
+ if buf == "":
+ OS.close(me._fd); me._fd = -1
+ if me._buf: me._line(me._buf)
+ break
+ buf = me._buf + buf
+ i = 0
+ while True:
+ try: j = buf.index("\n", i)
+ except ValueError: break
+ me._line(buf[i:j])
+ i = j + 1
+ me._buf = buf[i:]
+
+def select_loop(selectors):
+ """
+ Multiplex I/O between the various SELECTORS.
+
+ A `selector' SEL is an object which implements the selector protocol, which
+ consists of three methods.
+
+ * SEL.preselect(RFDS, WFDS) -- add any file descriptors which the
+ selector is interested in reading from to the list RFDS, and add file
+ descriptors it's interested in writing to to the list WFDS.
+
+ * SEL.postselect_read(FD) -- informs the selector that FD is ready for
+ reading.
+
+ * SEL.postselect_write(FD) -- informs the selector that FD is ready for
+ writing.
+
+ The `select_loop' function loops as follows.
+
+ * It calls the `preselect' method on each SELECTOR to determine what I/O
+ events it thinks are interesting.
+
+ * It waits for some interesting event to happen.
+
+ * It calls the `postselect_read' and/or `postselect_write' methods on all
+ of the selectors for each file descriptor which is ready.
+
+ The loop ends when no selector is interested in any events. This is simple
+ but rather inefficient.
+ """
+ while True:
+ rfds, wfds = [], []
+ for sel in selectors: sel.preselect(rfds, wfds)
+ if not rfds and not wfds: break
+ rfds, wfds, _ = SEL.select(rfds, wfds, [])
+ for fd in rfds:
+ for sel in selectors: sel.postselect_read(fd)
+ for fd in wfds:
+ for sel in selectors: sel.postselect_write(fd)
+
+###--------------------------------------------------------------------------
+### Running subprocesses.
+
+def wait_outcome(st):
+ """
+ Given a ST from `waitpid' (or similar), return a human-readable outcome.
+ """
+ if OS.WIFSIGNALED(st): return "killed by signal %d" % OS.WTERMSIG(st)
+ elif OS.WIFEXITED(st):
+ rc = OS.WEXITSTATUS(st)
+ if rc: return "failed: rc = %d" % rc
+ else: return "completed successfully"
+ else: return "died with incomprehensible status 0x%04x" % st
+
+class SubprocessFailure (Exception):
+ """An exception indicating that a subprocess failed."""
+ def __init__(me, what, st):
+ me.st = st
+ me.what = what
+ if OS.WIFEXITED(st): me.rc, me.sig = OS.WEXITSTATUS(st), None
+ elif OS.WIFSIGNALED(st): me.rc, me.sig = None, OS.WTERMSIG(st)
+ else: me.rc, me.sig = None, None
+ def __str__(me):
+ return "subprocess `%s' %s" % (me.what, wait_outcome(me.st))
+
+INHERIT = Tag('INHERIT')
+PIPE = Tag('PIPE')
+DISCARD = Tag('DISCARD')
+@CTX.contextmanager
+def subprocess(command,
+ stdin = INHERIT, stdout = INHERIT, stderr = INHERIT,
+ cwd = INHERIT, jobserver = DISCARD):
+ """
+ Hairy context manager for running subprocesses.
+
+ The COMMAND is a list of arguments; COMMAND[0] names the program to be
+ invoked. (There's currently no way to run a program with an unusual
+ `argv[0]'.)
+
+ The keyword arguments `stdin', `stdout', and `stderr' explain what to do
+ with the standard file descriptors.
+
+ * `INHERIT' means that they should be left alone: the child will use a
+ copy of the parent's descriptor. This is the default.
+
+ * `DISCARD' means that the descriptor should be re-opened onto
+ `/dev/null' (for reading or writing as appropriate).
+
+ * `PIPE' means that the descriptor should be re-opened as (the read or
+ write end, as appropriate, of) a pipe, and the other end returned to
+ the context body.
+
+ Simiarly, the JOBSERVER may be `INHERIT' to pass the jobserver descriptors
+ and environment variable down to the child, or `DISCARD' to close it. The
+ default is `DISCARD'.
+
+ The CWD may be `INHERIT' to run the child with the same working directory
+ as the parent, or a pathname to change to an explicitly given working
+ directory.
+
+ The context is returned three values, which are file descriptors for other
+ pipe ends for stdin, stdout, and stderr respectively, or -1 if there is no
+ pipe.
+
+ The context owns the pipe descriptors, and is expected to close them
+ itself. (Timing of closure is significant, particularly for `stdin'.)
+ """
+
+ ## Set up.
+ r_in, w_in = -1, -1
+ r_out, w_out = -1, -1
+ r_err, w_err = -1, -1
+ spew("running subprocess `%s'" % " ".join(command))
+
+ ## Clean up as necessary...
+ try:
+
+ ## Set up stdin.
+ if stdin is PIPE: r_in, w_in = OS.pipe()
+ elif stdin is DISCARD: r_in = OS.open("/dev/null", OS.O_RDONLY)
+ elif stdin is not INHERIT:
+ raise ValueError("bad `stdin' value `%r'" % stdin)
+
+ ## Set up stdout.
+ if stdout is PIPE: r_out, w_out = OS.pipe()
+ elif stdout is DISCARD: w_out = OS.open("/dev/null", OS.O_WRONLY)
+ elif stdout is not INHERIT:
+ raise ValueError("bad `stderr' value `%r'" % stdout)
+
+ ## Set up stderr.
+ if stderr is PIPE: r_err, w_err = OS.pipe()
+ elif stderr is DISCARD: w_err = OS.open("/dev/null", OS.O_WRONLY)
+ elif stderr is not INHERIT:
+ raise ValueError("bad `stderr' value `%r'" % stderr)
+
+ ## Start up the child.
+ kid = OS.fork()
+
+ if kid == 0:
+ ## Child process.
+
+ ## Fix up stdin.
+ if r_in != -1: OS.dup2(r_in, 0); OS.close(r_in)
+ if w_in != -1: OS.close(w_in)
+
+ ## Fix up stdout.
+ if w_out != -1: OS.dup2(w_out, 1); OS.close(w_out)
+ if r_out != -1: OS.close(r_out)
+
+ ## Fix up stderr.
+ if w_err != -1: OS.dup2(w_err, 2); OS.close(w_err)
+ if r_err != -1: OS.close(r_err)
+
+ ## Change directory.
+ if cwd is not INHERIT: OS.chdir(cwd)
+
+ ## Fix up the jobserver.
+ if jobserver is DISCARD: SCHED.close_jobserver()
+
+ ## Run the program.
+ try: OS.execvp(command[0], command)
+ except OSError, err:
+ moan("failed to run `%s': %s" % err.strerror)
+ OS._exit(127)
+
+ ## Close the other ends of the pipes.
+ if r_in != -1: OS.close(r_in); r_in = -1
+ if w_out != -1: OS.close(w_out); w_out = -1
+ if w_err != -1: OS.close(w_err); w_err = -1
+
+ ## Return control to the context body. Remember not to close its pipes.
+ yield w_in, r_out, r_err
+ w_in = r_out = r_err = -1
+
+ ## Collect the child process's exit status.
+ _, st = OS.waitpid(kid, 0)
+ spew("subprocess `%s' %s" % (" ".join(command), wait_outcome(st)))
+ if st: raise SubprocessFailure(" ".join(command), st)
+
+ ## Tidy up.
+ finally:
+
+ ## Close any left-over file descriptors.
+ for fd in [r_in, w_in, r_out, w_out, r_err, w_err]:
+ if fd != -1: OS.close(fd)
+
+def set_nonblocking(fd):
+ """Mark the descriptor FD as non-blocking."""
+ FC.fcntl(fd, FC.F_SETFL, FC.fcntl(fd, FC.F_GETFL) | OS.O_NONBLOCK)
+
+class DribbleOut (BaseSelector):
+ """A simple selector to feed a string to a descriptor, in pieces."""
+ def __init__(me, fd, string, *args, **kw):
+ super(DribbleOut, me).__init__(*args, **kw)
+ me._fd = fd
+ me._string = string
+ me._i = 0
+ set_nonblocking(me._fd)
+ me.result = None
+ def preselect(me, rfds, wfds):
+ if me._fd != -1: wfds.append(me._fd)
+ def postselect_write(me, fd):
+ if fd != me._fd: return
+ try: n = OS.write(me._fd, me._string)
+ except OSError, err:
+ if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: return
+ elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; return
+ else: raise
+ if n == len(me._string): OS.close(me._fd); me._fd = -1
+ else: me._string = me._string[n:]
+
+class DribbleIn (BaseSelector):
+ """A simple selector to collect all the input as a big string."""
+ def __init__(me, fd, *args, **kw):
+ super(DribbleIn, me).__init__(*args, **kw)
+ me._fd = fd
+ me._buf = StringIO()
+ set_nonblocking(me._fd)
+ def preselect(me, rfds, wfds):
+ if me._fd != -1: rfds.append(me._fd)
+ def postselect_read(me, fd):
+ if fd != me._fd: return
+ while True:
+ try: buf = OS.read(me._fd, 4096)
+ except OSError, err:
+ if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: break
+ else: raise
+ if buf == "": OS.close(me._fd); me._fd = -1; break
+ else: me._buf.write(buf)
+ @property
+ def result(me): return me._buf.getvalue()
+
+RETURN = Tag('RETURN')
+def run_program(command,
+ stdin = INHERIT, stdout = INHERIT, stderr = INHERIT,
+ *args, **kwargs):
+ """
+ A simplifying wrapper around `subprocess'.
+
+ The COMMAND is a list of arguments; COMMAND[0] names the program to be
+ invoked, as for `subprocess'.
+
+ The keyword arguments `stdin', `stdout', and `stderr' explain what to do
+ with the standard file descriptors.
+
+ * `INHERIT' means that they should be left alone: the child will use a
+ copy of the parent's descriptor.
+
+ * `DISCARD' means that the descriptor should be re-opened onto
+ `/dev/null' (for reading or writing as appropriate).
+
+ * `RETURN', for an output descriptor, means that all of the output
+ produced on that descriptor should be collected and returned as a
+ string.
+
+ * A string, for stdin, means that the string should be provided on the
+ child's standard input.
+
+ (The value `PIPE' is not permitted here.)
+
+ Other arguments are passed on to `subprocess'.
+
+ If no descriptors are marked `RETURN', then the function returns `None'; if
+ exactly one descriptor is so marked, then the function returns that
+ descriptor's output as a string; otherwise, it returns a tuple of strings
+ for each such descriptor, in the usual order.
+ """
+ kw = dict(); kw.update(kwargs)
+ selfn = []
+
+ if isinstance(stdin, basestring):
+ kw['stdin'] = PIPE; selfn.append(lambda fds: DribbleOut(fds[0], stdin))
+ elif stdin is INHERIT or stdin is DISCARD:
+ kw['stdin'] = stdin
+ else:
+ raise ValueError("bad `stdin' value `%r'" % stdin)
+
+ if stdout is RETURN:
+ kw['stdout'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[1]))
+ elif stdout is INHERIT or stdout is DISCARD:
+ kw['stdout'] = stdout
+ else:
+ raise ValueError("bad `stdout' value `%r'" % stdout)
+
+ if stderr is RETURN:
+ kw['stderr'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[2]))
+ elif stderr is INHERIT or stderr is DISCARD:
+ kw['stderr'] = stderr
+ else:
+ raise ValueError("bad `stderr' value `%r'" % stderr)
+
+ with subprocess(command, *args, **kw) as fds:
+ sel = [fn(fds) for fn in selfn]
+ select_loop(sel)
+ rr = []
+ for s in sel:
+ r = s.result
+ if r is not None: rr.append(r)
+ if len(rr) == 0: return None
+ if len(rr) == 1: return rr[0]
+ else: return tuple(rr)
+
+###--------------------------------------------------------------------------
+### Other system-ish utilities.
+
+@CTX.contextmanager
+def safewrite(path):
+ """
+ Context manager for writing to a file.
+
+ A new file, named `PATH.new', is opened for writing, and the file object
+ provided to the context body. If the body completes normally, the file is
+ closed and renamed to PATH. If the body raises an exception, the file is
+ still closed, but not renamed into place.
+ """
+ new = path + ".new"
+ with open(new, "w") as f: yield f
+ OS.rename(new, path)
+
+@CTX.contextmanager
+def safewrite_root(path, mode = None, uid = None, gid = None):
+ """
+ Context manager for writing to a file with root privileges.
+
+ This is as for `safewrite', but the file is opened and written as root.
+ """
+ new = path + ".new"
+ with subprocess(C.ROOTLY + ["tee", new],
+ stdin = PIPE, stdout = DISCARD) as (fd_in, _, _):
+ pipe = OS.fdopen(fd_in, 'w')
+ try: yield pipe
+ finally: pipe.close()
+ if mode is not None: run_program(C.ROOTLY + ["chmod", mode, new])
+ if uid is not None:
+ run_program(C.ROOTLY + ["chown",
+ uid + (gid is not None and ":" + gid or ""),
+ new])
+ elif gid is not None:
+ run_program(C.ROOTLY + ["chgrp", gid, new])
+ run_program(C.ROOTLY + ["mv", new, path])
+
+def mountpoint_p(dir):
+ """Return true if DIR is a mountpoint."""
+
+ ## A mountpoint can be distinguished because it is a directory whose device
+ ## number differs from its parent.
+ try: st1 = OS.stat(dir)
+ except OSError, err:
+ if err.errno == E.ENOENT: return False
+ else: raise
+ if not ST.S_ISDIR(st1.st_mode): return False
+ st0 = OS.stat(OS.path.join(dir, ".."))
+ return st0.st_dev != st1.st_dev
+
+def mkdir_p(dir, mode = 0777):
+ """
+ Make a directory DIR, and any parents, as necessary.
+
+ Unlike `OS.makedirs', this doesn't fail if DIR already exists.
+ """
+ d = ""
+ for p in dir.split("/"):
+ d = OS.path.join(d, p)
+ if d == "": continue
+ try: OS.mkdir(d, mode)
+ except OSError, err:
+ if err.errno == E.EEXIST: pass
+ else: raise
+
+def umount(fs):
+ """
+ Unmount the filesystem FS.
+
+ The FS may be the block device holding the filesystem, or (more usually)
+ the mount point.
+ """
+
+ ## Sometimes random things can prevent unmounting. Be persistent.
+ for i in xrange(5):
+ try: run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD)
+ except SubprocessFailure, err:
+ if err.rc == 32: pass
+ else: raise
+ else: return
+ T.sleep(0.2)
+ run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD)
+
+@CTX.contextmanager
+def lockfile(lock, exclp = True, waitp = True):
+ """
+ Acquire an exclusive lock on a named file LOCK while executing the body.
+
+ If WAITP is true, wait until the lock is available; if false, then fail
+ immediately if the lock can't be acquired.
+ """
+ fd = -1
+ flag = 0
+ if exclp: flag |= FC.LOCK_EX
+ else: flag |= FC.LOCK_SH
+ if not waitp: flag |= FC.LOCK_NB
+ spew("acquiring %s lock on `%s'" %
+ (exclp and "exclusive" or "shared", lock))
+ try:
+ while True:
+
+ ## Open the file and take note of which file it is.
+ fd = OS.open(lock, OS.O_RDWR | OS.O_CREAT, 0666)
+ st0 = OS.fstat(fd)
+
+ ## Acquire the lock, waiting if necessary.
+ FC.lockf(fd, flag)
+
+ ## Check that the lock file is still the same one. It's permissible
+ ## for the lock holder to release the lock by unlinking or renaming the
+ ## lock file, in which case there might be a different lockfile there
+ ## now which we need to acquire instead.
+ ##
+ ## It's tempting to `optimize' this code by opening a new file
+ ## descriptor here so as to elide the additional call to fstat(2)
+ ## above. But this doesn't work: if we successfully acquire the lock,
+ ## we then have two file descriptors open on the lock file, so we have
+ ## to close one -- but, under the daft fcntl(2) rules, even closing
+ ## `nfd' will release the lock immediately.
+ try:
+ st1 = OS.stat(lock)
+ except OSError, err:
+ if err.errno == E.ENOENT: pass
+ else: raise
+ if st0.st_dev == st1.st_dev and st0.st_ino == st1.st_ino: break
+ OS.close(fd)
+
+ ## We have the lock, so away we go.
+ spew("lock `%s' acquired" % lock)
+ yield None
+ spew("lock `%s' released" % lock)
+
+ finally:
+ if fd != -1: OS.close(fd)
+
+def block_device_p(dev):
+ """Return true if DEV names a block device."""
+ try: st = OS.stat(dev)
+ except OSError, err:
+ if err.errno == E.ENOENT: return False
+ else: raise
+ else: return ST.S_ISBLK(st.st_mode)
+
+###--------------------------------------------------------------------------
+### Running parallel jobs.
+
+## Return codes from `check'
+SLEEP = Tag('SLEEP')
+READY = Tag('READY')
+FAILED = Tag('FAILED')
+DONE = Tag('DONE')
+
+class BaseJob (object):
+ """
+ Base class for jobs.
+
+ Subclasses must implement `run' and `_mkname', and probably ought to extend
+ `prepare' and `check'.
+ """
+
+ ## A magic token to prevent sneaky uninterned jobs.
+ _MAGIC = Tag('MAGIC')
+
+ ## A map from job names to objects.
+ _MAP = {}
+
+ ## Number of tail lines of the log to print on failure.
+ LOGLINES = 20
+
+ def __init__(me, _token, *args, **kw):
+ """
+ Initialize a job.
+
+ Jobs are interned! Don't construct instances (of subclasses) directly:
+ use the `ensure' class method.
+ """
+ assert _token is me._MAGIC
+ super(BaseJob, me).__init__(*args, **kw)
+
+ ## Dependencies on other jobs.
+ me._deps = None
+ me._waiting = set()
+
+ ## Attributes maintained by the JobServer.
+ me.done = False
+ me.started = False
+ me.win = None
+ me._token = None
+ me._known = False
+ me._st = None
+ me._logkid = -1
+ me._logfile = None
+
+ def prepare(me):
+ """
+ Establish any prerequisite jobs.
+
+ Delaying this allows command-line settings to override those chosen by
+ dependent jobs.
+ """
+ pass
+
+ @classmethod
+ def ensure(cls, *args, **kw):
+ """
+ Return the unique job with the given parameters.
+
+ If a matching job already exists, then return it. Otherwise, create the
+ new job, register it in the table, and notify the scheduler about it.
+ """
+ me = cls(_token = cls._MAGIC, *args, **kw)
+ try:
+ job = cls._MAP[me.name]
+ except KeyError:
+ cls._MAP[me.name] = me
+ SCHED.add(me)
+ return me
+ else:
+ return job
+
+ ## Naming.
+ @property
+ def name(me):
+ """Return the job's name, as calculated by `_mkname'."""
+ try: name = me._name
+ except AttributeError: name = me._name = me._mkname()
+ return name
+
+ ## Subclass responsibilities.
+ def _mkname(me):
+ """
+ Return the job's name.
+
+ By default, this is an unhelpful string which is distinct for every job.
+ Subclasses should normally override this method to return a name as an
+ injective function of the job parameters.
+ """
+ return "%s.%x" % (me.__class__.__name__, id(me))
+
+ def check(me):
+ """
+ Return whether the job is ready to run.
+
+ Returns a pair STATE, REASON. The REASON is a human-readable string
+ explaining what's going on, or `None' if it's not worth explaining. The
+ STATE is one of the following.
+
+ * `READY' -- the job can be run at any time.
+
+ * `FAILED' -- the job can't be started. Usually, this means that some
+ prerequisite job failed, there was some error in the job's
+ parameters, or the environment is unsuitable for the job to run.
+
+ * `DONE' -- the job has nothing to do. Usually, this means that the
+ thing the job acts on is already up-to-date. It's bad form to do
+ even minor work in `check'.
+
+ * `SLEEP' -- the job can't be run right now. It has arranged to be
+ retried if conditions change. (Spurious wakeups are permitted and
+ must be handled correctly.)
+
+ The default behaviour checks the set of dependencies, as built by the
+ `await' method, and returns `SLEEP' or `FAILED' as appropriate, or
+ `READY' if all the prerequisite jobs have completed successfully.
+ """
+ for job in me._deps:
+ if not job.done:
+ job._waiting.add(me)
+ return SLEEP, "waiting for job `%s'" % job.name
+ elif not job.win and not OPT.ignerr:
+ return FAILED, "dependent on failed job `%s'" % job.name
+ return READY, None
+
+ ## Subclass utilities.
+ def await(me, job):
+ """Make sure that JOB completes before allowing this job to start."""
+ me._deps.add(job)
+
+ def _logtail(me):
+ """
+ Dump the last `LOGLINES' lines of the logfile.
+
+ This is called if the job fails and was being run quietly, to provide the
+ user with some context for the failure.
+ """
+
+ ## Gather blocks from the end of the log until we have enough lines.
+ with open(me._logfile, 'r') as f:
+ nlines = 0
+ bufs = []
+ bufsz = 4096
+ f.seek(0, 2); off = f.tell()
+ spew("start: off = %d" % off)
+ while nlines <= me.LOGLINES and off > 0:
+ off = max(0, off - bufsz)
+ f.seek(off, 0)
+ spew("try at off = %d" % off)
+ buf = f.read(bufsz)
+ nlines += buf.count("\n")
+ spew("now lines = %d" % nlines)
+ bufs.append(buf)
+ buf = ''.join(reversed(bufs))
+
+ ## We probably overshot. Skip the extra lines from the start.
+ i = 0
+ while nlines > me.LOGLINES: i = buf.index("\n", i) + 1; nlines -= 1
+
+ ## If we ended up trimming the log, print an ellipsis.
+ if off > 0 or i > 0: print "%-*s * [...]" % (TAGWD, me.name)
+
+ ## Print the log tail.
+ lines = buf[i:].split("\n")
+ if lines and lines[-1] == '': lines.pop()
+ for line in lines: print "%-*s %s" % (TAGWD, me.name, line)
+
+class BaseJobToken (object):
+ """
+ A job token is the authorization for a job to be run.
+
+ Subclasses must implement `recycle' to allow some other job to use the
+ token.
+ """
+ pass
+
+class TrivialJobToken (BaseJobToken):
+ """
+ A trivial reusable token, for when issuing jobs in parallel without limit.
+
+ There only needs to be one of these.
+ """
+ def recycle(me):
+ spew("no token needed; nothing to recycle")
+TRIVIAL_TOKEN = TrivialJobToken()
+
+class JobServerToken (BaseJobToken):
+ """A job token storing a byte from the jobserver pipe."""
+ def __init__(me, char, pipefd, *args, **kw):
+ super(JobServerToken, me).__init__(*args, **kw)
+ me._char = char
+ me._fd = pipefd
+ def recycle(me):
+ spew("returning token to jobserver pipe")
+ OS.write(me._fd, me._char)
+
+class PrivateJobToken (BaseJobToken):
+ """
+ The private job token belonging to a scheduler.
+
+ When running under a GNU Make jobserver, there is a token for each byte in
+ the pipe, and an additional one which represents the slot we're actually
+ running in. This class represents that additional token.
+ """
+ def __init__(me, sched, *args, **kw):
+ super(PrivateJobToken, me).__init__(*args, **kw)
+ me._sched = sched
+ def recycle(me):
+ assert me._sched._privtoken is None
+ spew("recycling private token")
+ me._sched._privtoken = me
+
+TAGWD = 29
+LOGKEEP = 20
+
+class JobScheduler (object):
+ """
+ The main machinery for running and ordering jobs.
+
+ This handles all of the details of job scheduling.
+ """
+
+ def __init__(me, rfd = -1, wfd = -1, npar = 1):
+ """
+ Initialize a scheduler.
+
+ * RFD and WFD are the read and write ends of the jobserver pipe, as
+ determined from the `MAKEFLAGS' environment variable, or -1.
+
+ * NPAR is the maximum number of jobs to run in parallel, or `True' if
+ there is no maximum (i.e., we're in `forkbomb' mode).
+ """
+
+ ## Set the parallelism state. The `_rfd' and `_wfd' are the read and
+ ## write ends of the jobserver pipe, or -1 if there is no jobserver.
+ ## `_par' is true if we're meant to run jobs in parallel. The case _par
+ ## and _rfd = -1 means unconstrained parallelism.
+ ##
+ ## The jobserver pipe contains a byte for each shared job slot. A
+ ## scheduler reads a byte from the pipe for each job it wants to run
+ ## (nearly -- see `_privtoken' below), and puts the byte back when the
+ ## job finishes. The GNU Make jobserver protocol specification insists
+ ## that we preserve the value of the byte in the pipe (though doesn't
+ ## currently make any use of this flexibility), so we record it in a
+ ## `JobToken' object's `_char' attribute.
+ me._par = rfd != -1 or npar is True or npar != 1
+ spew("par is %r" % me._par)
+ if rfd == -1 and npar > 1:
+ rfd, wfd = OS.pipe()
+ OS.write(wfd, (npar - 1)*'+')
+ OS.environ["MAKEFLAGS"] = \
+ (" -j --jobserver-auth=%(rfd)d,%(wfd)d " +
+ "--jobserver-fds=%(rfd)d,%(wfd)d") % dict(rfd = rfd, wfd = wfd)
+ me._rfd = rfd; me._wfd = wfd
+
+ ## The scheduler state. A job starts in the `_check' list. Each
+ ## iteration of the scheduler loop will inspect the jobs here and see
+ ## whether it's ready to run: if not, it gets put in the `_sleep' list,
+ ## where it will languish until something moves it back; if it is ready,
+ ## it gets moved to the `_ready' list to wait for a token from the
+ ## jobserver. At that point the job can be started, and it moves to the
+ ## `_kidmap', which associates a process-id with each running job.
+ ## Finally, jobs which have completed are simply forgotten. The `_njobs'
+ ## counter keeps track of how many jobs are outstanding, so that we can
+ ## stop when there are none left.
+ me._check = set()
+ me._sleep = set()
+ me._ready = set()
+ me._kidmap = {}
+ me._logkidmap = {}
+ me._njobs = 0
+
+ ## As well as the jobserver pipe, we implicitly have one extra job slot,
+ ## which is the one we took when we were started by our parent. The
+ ## right to do processing in this slot is represnted by the `private
+ ## token' here, distinguished from tokens from the jobserver pipe by
+ ## having `None' as its `_char' value.
+ me._privtoken = PrivateJobToken(me)
+
+ def add(me, job):
+ """Notice a new job and arrange for it to (try to) run."""
+ if job._known: return
+ spew("adding new job `%s'" % job.name)
+ job._known = True
+ me._check.add(job)
+ me._njobs += 1
+
+ def close_jobserver(me):
+ """
+ Close the jobserver file descriptors.
+
+ This should be called within child processes to prevent them from messing
+ with the jobserver.
+ """
+ if me._rfd != -1: OS.close(me._rfd); me._rfd = -1
+ if me._wfd != -1: OS.close(me._wfd); me._wfd = -1
+ try: del OS.environ["MAKEFLAGS"]
+ except KeyError: pass
+
+ def _killall(me):
+ """Zap all jobs which aren't yet running."""
+ for jobset in [me._sleep, me._check, me._ready]:
+ while jobset:
+ job = jobset.pop()
+ job.done = True
+ job.win = False
+ me._njobs -= 1
+
+ def _retire(me, job, win, outcome):
+ """
+ Declare that a job has stopped, and deal with the consequences.
+
+ JOB is the completed job, which should not be on any of the job queues.
+ WIN is true if the job succeeded, and false otherwise. OUTCOME is a
+ human-readable string explaining how the job came to its end, or `None'
+ if no message should be reported.
+ """
+
+ global RC
+
+ ## Return the job's token to the pool.
+ if job._token is not None: job._token.recycle()
+ job._token = None
+ me._njobs -= 1
+
+ ## Update and maybe report the job's status.
+ job.done = True
+ job.win = win
+ if outcome is not None and not OPT.silent:
+ if OPT.quiet and not job.win and job._logfile: job._logtail()
+ if not job.win or not OPT.quiet:
+ print "%-*s %c (%s)" % \
+ (TAGWD, job.name, job.win and '|' or '*', outcome)
+
+ ## If the job failed, and we care, arrange to exit nonzero.
+ if not win and not OPT.ignerr: RC = 2
+
+ ## If the job failed, and we're supposed to give up after the first
+ ## error, then zap all of the waiting jobs.
+ if not job.win and not OPT.keepon and not OPT.ignerr: me._killall()
+
+ ## If this job has dependents then wake them up and see whether they're
+ ## ready to run.
+ for j in job._waiting:
+ try: me._sleep.remove(j)
+ except KeyError: pass
+ else:
+ spew("waking dependent job `%s'" % j.name)
+ me._check.add(j)
+
+ def _reap(me, kid, st):
+ """
+ Deal with the child with process-id KID having exited with status ST.
+ """
+
+ ## Figure out what kind of child this is. Note that it has finished.
+ try: job = me._kidmap[kid]
+ except KeyError:
+ try: job = me._logkidmap[kid]
+ except KeyError:
+ spew("unknown child %d exits with status 0x%04x" % (kid, st))
+ return
+ else:
+ ## It's a logging child.
+ del me._logkidmap[kid]
+ job._logkid = DONE
+ spew("logging process for job `%s' exits with status 0x%04x" %
+ (job.name, st))
+ else:
+ job._st = st
+ del me._kidmap[kid]
+ spew("main process for job `%s' exits with status 0x%04x" %
+ (job.name, st))
+
+ ## If either of the job's associated processes is still running then we
+ ## should stop now and give the other one a chance.
+ if job._st is None or job._logkid is not DONE:
+ spew("deferring retirement for job `%s'" % job.name)
+ return
+ spew("completing deferred retirement for job `%s'" % job.name)
+
+ ## Update and (maybe) report the job status.
+ if job._st == 0: win = True; outcome = None
+ else: win = False; outcome = wait_outcome(job._st)
+
+ ## Retire the job.
+ me._retire(job, win, outcome)
+
+ def _reapkids(me):
+ """Reap all finished child processes."""
+ while True:
+ try: kid, st = OS.waitpid(-1, OS.WNOHANG)
+ except OSError, err:
+ if err.errno == E.ECHILD: break
+ else: raise
+ if kid == 0: break
+ me._reap(kid, st)
+
+ def run_job(me, job):
+ """Start running the JOB."""
+
+ job.started = True
+ if OPT.dryrun: return None, None
+
+ ## Make pipes to collect the job's output and error reports.
+ r_out, w_out = OS.pipe()
+ r_err, w_err = OS.pipe()
+
+ ## Find a log file to write. Avoid races over the log names; but this
+ ## means that the log descriptor needs to be handled somewhat carefully.
+ logdir = OS.path.join(C.STATE, "log"); mkdir_p(logdir)
+ logseq = 1
+ while True:
+ logfile = OS.path.join(logdir, "%s-%s#%d" % (job.name, TODAY, logseq))
+ try:
+ logfd = OS.open(logfile, OS.O_WRONLY | OS.O_CREAT | OS.O_EXCL, 0666)
+ except OSError, err:
+ if err.errno == E.EEXIST: logseq += 1; continue
+ else: raise
+ else:
+ break
+ job._logfile = logfile
+
+ ## Make sure there's no pending output, or we might get two copies. (I
+ ## don't know how to flush all output streams in Python, but this is good
+ ## enough for our purposes.)
+ SYS.stdout.flush()
+
+ ## Set up the logging child first. If we can't, take down the whole job.
+ try: job._logkid = OS.fork()
+ except OSError, err: OS.close(logfd); return None, err
+ if not job._logkid:
+ ## The main logging loop.
+
+ ## Close the jobserver descriptors, and the write ends of the pipes.
+ me.close_jobserver()
+ OS.close(w_out); OS.close(w_err)
+
+ ## Capture the job's stdout and stderr and wait for everything to
+ ## happen.
+ def log_lines(fd, marker):
+ def fn(line):
+ if not OPT.quiet:
+ OS.write(1, "%-*s %s %s\n" % (TAGWD, job.name, marker, line))
+ OS.write(logfd, "%s %s\n" % (marker, line))
+ return ReadLinesSelector(fd, fn)
+ select_loop([log_lines(r_out, "|"), log_lines(r_err, "*")])
+
+ ## We're done. (Closing the descriptors here would be like polishing
+ ## the floors before the building is demolished.)
+ OS._exit(0)
+
+ ## Back in the main process: record the logging child. At this point we
+ ## no longer need the logfile descriptor.
+ me._logkidmap[job._logkid] = job
+ OS.close(logfd)
+
+ ## Start the main job process.
+ try: kid = OS.fork()
+ except OSError, err: return None, err
+ if not kid:
+ ## The main job.
+
+ ## Close the read ends of the pipes, and move the write ends to the
+ ## right places. (This will go wrong if we were started without enough
+ ## descriptors. Fingers crossed.)
+ OS.dup2(w_out, 1); OS.dup2(w_err, 2)
+ OS.close(r_out); OS.close(w_out)
+ OS.close(r_err); OS.close(w_err)
+ spew("running job `%s' as pid %d" % (job.name, OS.getpid()))
+
+ ## Run the job, catching nonlocal flow.
+ try:
+ job.run()
+ except ExpectedError, err:
+ moan(str(err))
+ OS._exit(2)
+ except Exception, err:
+ TB.print_exc(SYS.stderr)
+ OS._exit(3)
+ except BaseException, err:
+ moan("caught unexpected exception: %r" % err)
+ OS._exit(112)
+ else:
+ spew("job `%s' ran to completion" % job.name)
+
+ ## Clean up old logs.
+ match = []
+ pat = RX.compile(r"^%s-(\d{4})-(\d{2})-(\d{2})\#(\d+)$" %
+ RX.escape(job.name))
+ for f in OS.listdir(logdir):
+ m = pat.match(f)
+ if m: match.append((f, int(m.group(1)), int(m.group(2)),
+ int(m.group(3)), int(m.group(4))))
+ match.sort(key = lambda (_, y, m, d, q): (y, m, d, q))
+ if len(match) > LOGKEEP:
+ for (f, _, _, _, _) in match[:-LOGKEEP]:
+ try: OS.unlink(OS.path.join(logdir, f))
+ except OSError, err:
+ if err.errno == E.ENOENT: pass
+ else: raise
+
+ ## All done.
+ OS._exit(0)
+
+ ## Back in the main process: close both the pipes and return the child
+ ## process.
+ OS.close(r_out); OS.close(w_out)
+ OS.close(r_err); OS.close(w_err)
+ if OPT.quiet: print "%-*s | (started)" % (TAGWD, job.name)
+ return kid, None
+
+ def run(me):
+ """Run the scheduler."""
+
+ spew("JobScheduler starts")
+
+ while True:
+ ## The main scheduler loop. We go through three main phases:
+ ##
+ ## * Inspect the jobs in the `check' list to see whether they can
+ ## run. After this, the `check' list will be empty.
+ ##
+ ## * If there are running jobs, check to see whether any of them have
+ ## stopped, and deal with the results. Also, if there are jobs
+ ## ready to start and a job token has become available, then
+ ## retrieve the token. (Doing these at the same time is the tricky
+ ## part.)
+ ##
+ ## * If there is a job ready to run, and we retrieved a token, then
+ ## start running the job.
+
+ ## Check the pending jobs to see if they can make progress: run each
+ ## job's `check' method and move it to the appropriate queue. (It's OK
+ ## if `check' methods add more jobs to the list, as long as things
+ ## settle down eventually.)
+ while True:
+ try: job = me._check.pop()
+ except KeyError: break
+ if job._deps is None:
+ job._deps = set()
+ job.prepare()
+ state, reason = job.check()
+ tail = reason is not None and ": %s" % reason or ""
+ if state == READY:
+ spew("job `%s' ready to run%s" % (job.name, tail))
+ me._ready.add(job)
+ elif state is FAILED:
+ spew("job `%s' refused to run%s" % (job.name, tail))
+ me._retire(job, False, "refused to run%s" % tail)
+ elif state is DONE:
+ spew("job `%s' has nothing to do%s" % (job.name, tail))
+ me._retire(job, True, reason)
+ elif state is SLEEP:
+ spew("job `%s' can't run yet%s" % (job.name, tail))
+ me._sleep.add(job)
+ else:
+ raise ValueError("unexpected job check from `%s': %r, %r" %
+ (job.name, state, reason))
+
+ ## If there are no jobs left, then we're done.
+ if not me._njobs:
+ spew("all jobs completed")
+ break
+
+ ## Make sure we can make progress. There are no jobs on the check list
+ ## any more, because we just cleared it. We assume that jobs which are
+ ## ready to run will eventually receive a token. So we only end up in
+ ## trouble if there are jobs asleep, but none running or ready to run.
+ ##spew("#jobs = %d" % me._njobs)
+ ##spew("sleeping: %s" % ", ".join([j.name for j in me._sleep]))
+ ##spew("ready: %s" % ", ".join([j.name for j in me._ready]))
+ ##spew("running: %s" % ", ".join([j.name for j in me._kidmap.itervalues()]))
+ assert not me._sleep or me._kidmap or me._logkidmap or me._ready
+
+ ## Wait for something to happen.
+ if not me._ready or (not me._par and me._privtoken is None):
+ ## If we have no jobs ready to run, then we must wait for an existing
+ ## child to exit. Hopefully, a sleeping job will be able to make
+ ## progress after this.
+ ##
+ ## Alternatively, if we're not supposed to be running jobs in
+ ## parallel and we don't have the private token, then we have no
+ ## choice but to wait for the running job to complete.
+ ##
+ ## There's no check here for `ECHILD'. We really shouldn't be here
+ ## if there are no children to wait for. (The check list must be
+ ## empty because we just drained it. If the ready list is empty,
+ ## then all of the jobs must be running or sleeping; but the
+ ## assertion above means that either there are no jobs at all, in
+ ## which case we should have stopped, or at least one is running, in
+ ## which case it's safe to wait for it. The other case is that we're
+ ## running jobs sequentially, and one is currently running, so
+ ## there's nothing for it but to wait for it -- and hope that it will
+ ## wake up one of the sleeping jobs. The remaining possibility is
+ ## that we've miscounted somewhere, which will cause a crash.)
+ if not me._ready:
+ spew("no new jobs ready: waiting for outstanding jobs to complete")
+ else:
+ spew("job running without parallelism: waiting for it to finish")
+ kid, st = OS.waitpid(-1, 0)
+ me._reap(kid, st)
+ me._reapkids()
+ continue
+
+ ## We have jobs ready to run, so try to acquire a token.
+ if me._rfd == -1 and me._par:
+ ## We're running with unlimited parallelism, so we don't need a token
+ ## to run a job.
+ spew("running new job without token")
+ token = TRIVIAL_TOKEN
+ elif me._privtoken:
+ ## Our private token is available, so we can use that to start
+ ## a new job.
+ spew("private token available: assigning to new job")
+ token = me._privtoken
+ me._privtoken = None
+ else:
+ ## We have to read from the jobserver pipe. Unfortunately, we're not
+ ## allowed to set the pipe nonblocking, because make is also using it
+ ## and will get into a serious mess. And we must deal with `SIGCHLD'
+ ## arriving at any moment. We use the same approach as GNU Make. We
+ ## start by making a copy of the jobserver descriptor: it's this
+ ## descriptor we actually try to read from. We set a signal handler
+ ## to close this descriptor if a child exits. And we try one last
+ ## time to reap any children which have exited just before we try
+ ## reading the jobserver pipe. This way we're covered:
+ ##
+ ## * If a child exits during the main loop, before we establish the
+ ## descriptor copy then we'll notice when we try reaping
+ ## children.
+ ##
+ ## * If a child exits between the last-chance reap and the read,
+ ## the signal handler will close the descriptor and the `read'
+ ## call will fail with `EBADF'.
+ ##
+ ## * If a child exits while we're inside the `read' system call,
+ ## then the syscall will fail with `EINTR'.
+ ##
+ ## The only problem is that we can't do this from Python, because
+ ## Python signal handlers are delayed. This is what the `jobclient'
+ ## module is for.
+ ##
+ ## The `jobclient' function is called as
+ ##
+ ## jobclient(FD)
+ ##
+ ## It returns a tuple of three values: TOKEN, PID, STATUS. If TOKEN
+ ## is not `None', then reading the pipe succeeded; if TOKEN is empty,
+ ## then the pipe returned EOF, so we should abort; otherwise, TOKEN
+ ## is a singleton string holding the token character. If PID is not
+ ## `None', then PID is the process id of a child which exited, and
+ ## STATUS is its exit status.
+ spew("waiting for token from jobserver")
+ tokch, kid, st = JC.jobclient(me._rfd)
+
+ if kid is not None:
+ me._reap(kid, st)
+ me._reapkids()
+ if tokch is None:
+ spew("no token; trying again")
+ continue
+ elif token == '':
+ error("jobserver pipe closed; giving up")
+ me._killall()
+ continue
+ spew("received token from jobserver")
+ token = JobServerToken(tokch, me._wfd)
+
+ ## We have a token, so we should start up the job.
+ job = me._ready.pop()
+ job._token = token
+ spew("start new job `%s'" % job.name)
+ kid, err = me.run_job(job)
+ if err is not None:
+ me._retire(job, False, "failed to fork: %s" % err)
+ continue
+ if kid is None: me._retire(job, True, "dry run")
+ else: me._kidmap[kid] = job
+
+ ## We ran out of work to do.
+ spew("JobScheduler done")
+
+###--------------------------------------------------------------------------
+### Configuration.
+
+R_CONFIG = RX.compile(r"^([a-zA-Z0-9_]+)='(.*)'$")
+
+class Config (object):
+
+ def _conv_str(s): return s
+ def _conv_list(s): return s.split()
+ def _conv_set(s): return set(s.split())
+
+ _CONVERT = {
+ "ROOTLY": _conv_list,
+ "DISTS": _conv_set,
+ "MYARCH": _conv_set,
+ "NATIVE_ARCHS": _conv_set,
+ "FOREIGN_ARCHS": _conv_set,
+ "FOREIGN_GNUARCHS": _conv_list,
+ "ALL_ARCHS": _conv_set,
+ "NATIVE_CHROOTS": _conv_set,
+ "FOREIGN_CHROOTS": _conv_set,
+ "ALL_CHROOTS": _conv_set,
+ "BASE_PACKAGES": _conv_list,
+ "EXTRA_PACKAGES": _conv_list,
+ "CROSS_PACKAGES": _conv_list,
+ "CROSS_PATHS": _conv_list,
+ "APTCONF": _conv_list,
+ "LOCALPKGS": _conv_list,
+ "SCHROOT_COPYFILES": _conv_list,
+ "SCHROOT_NSSDATABASES": _conv_list
+ }
+
+ _CONV_MAP = {
+ "*_APTCONFSRC": ("APTCONFSRC", _conv_str),
+ "*_DEPS": ("PKGDEPS", _conv_list),
+ "*_QEMUHOST": ("QEMUHOST", _conv_str),
+ "*_QEMUARCH": ("QEMUARCH", _conv_str),
+ "*_ALIASES": ("DISTALIAS", _conv_str)
+ }
+
+ _conv_str = staticmethod(_conv_str)
+ _conv_list = staticmethod(_conv_list)
+ _conv_set = staticmethod(_conv_set)
+
+ def __init__(me):
+ raw = r"""
+ """; raw = open('state/config.sh').read(); _ignore = """ @@@config@@@
+ """
+ me._conf = {}
+ for line in raw.split("\n"):
+ line = line.strip()
+ if not line or line.startswith('#'): continue
+ m = R_CONFIG.match(line)
+ if not m: raise ExpectedError("bad config line `%s'" % line)
+ k, v = m.group(1), m.group(2).replace("'\\''", "'")
+ d = me._conf
+ try: conv = me._CONVERT[k]
+ except KeyError:
+ i = 0
+ while True:
+ try: i = k.index("_", i + 1)
+ except ValueError: conv = me._conv_str; break
+ try: map, conv = me._CONV_MAP["*" + k[i:]]
+ except KeyError: pass
+ else:
+ d = me._conf.setdefault(map, dict())
+ k = k[:i]
+ if k.startswith("_"): k = k[1:]
+ break
+ d[k] = conv(v)
+
+ def __getattr__(me, attr):
+ try: return me._conf[attr]
+ except KeyError, err: raise AttributeError(err.args[0])
+
+with toplevel_handler(): C = Config()
+
+###--------------------------------------------------------------------------
+### Chroot maintenance utilities.
+
+CREATE = Tag("CREATE")
+FORCE = Tag("FORCE")
+
+def check_fresh(fresh, update):
+ """
+ Compare a refresh mode FRESH against an UPDATE time.
+
+ Return a (STATUS, REASON) pair, suitable for returning from a job `check'
+ method.
+
+ The FRESH argument may be one of the following:
+
+ * `CREATE' is satisfied if the thing exists at all: it returns `READY' if
+ the thing doesn't yet exist (UPDATE is `None'), or `DONE' otherwise.
+
+ * `FORCE' is never satisfied: it always returns `READY'.
+
+ * an integer N is satisfied if UPDATE time is at most N seconds earlier
+ than the present: if returns `READY' if the UPDATE is too old, or
+ `DONE' otherwise.
+ """
+ if update is None: return READY, "must create"
+ elif fresh is FORCE: return READY, "update forced"
+ elif fresh is CREATE: return DONE, "already created"
+ elif NOW - unzulu(update) > fresh: return READY, "too stale: updating"
+ else: return DONE, "already sufficiently up-to-date"
+
+def lockfile_path(file):
+ """
+ Return the full path for a lockfile named FILE.
+
+ Create the lock directory if necessary.
+ """
+ lockdir = OS.path.join(C.STATE, "lock"); mkdir_p(lockdir)
+ return OS.path.join(lockdir, file)
+
+def chroot_src_lockfile(dist, arch):
+ """
+ Return the lockfile for the source-chroot for DIST on ARCH.
+
+ It is not allowed to acquire a source-chroot lock while holding any other
+ locks.
+ """
+ return lockfile_path("source.%s-%s" % (dist, arch))
+
+def chroot_src_lv(dist, arch):
+ """
+ Return the logical volume name for the source-chroot for DIST on ARCH.
+ """
+ return "%s%s-%s" % (C.LVPREFIX, dist, arch)
+
+def chroot_src_blkdev(dist, arch):
+ """
+ Return the block-device name for the source-chroot for DIST on ARCH.
+ """
+ return OS.path.join("/dev", C.VG, chroot_src_lv(dist, arch))
+
+def chroot_src_mntpt(dist, arch):
+ """
+ Return mountpoint path for setting up the source-chroot for DIST on ARCH.
+
+ Note that this is not the mountpoint that schroot(1) uses.
+ """
+ mnt = OS.path.join(C.STATE, "mnt", "%s-%s" % (dist, arch))
+ mkdir_p(mnt)
+ return mnt
+
+def chroot_session_mntpt(session):
+ """Return the mountpoint for an schroot session."""
+ return OS.path.join("/schroot", session)
+
+def crosstools_lockfile(dist, arch):
+ """
+ Return the lockfile for the cross-build tools for DIST, hosted by ARCH.
+
+ When locking multiple cross-build tools, you must acquire the locks in
+ lexicographically ascending order.
+ """
+ return lockfile_path("cross-tools.%s-%s" % (dist, arch))
+
+def switch_prefix(string, map):
+ """
+ Replace the prefix of a STRING, according to the given MAP.
+
+ MAP is a sequence of (OLD, NEW) pairs. For each such pair in turn, test
+ whether STRING starts with OLD: if so, return STRING, but with the prefix
+ OLD replaced by NEW. If no OLD prefix matches, then raise a `ValueError'.
+ """
+ for old, new in map:
+ if string.startswith(old): return new + string[len(old):]
+ raise ValueError("expected `%s' to start with one of %s" %
+ ", ".join(["`%s'" % old for old, new in map]))
+
+def host_to_chroot(path):
+ """
+ Convert a host path under `C.LOCAL' to the corresponding chroot path under
+ `/usr/local.schroot'.
+ """
+ return switch_prefix(path, [(C.LOCAL + "/", "/usr/local.schroot/")])
+
+def chroot_to_host(path):
+ """
+ Convert a chroot path under `/usr/local.schroot' to the corresponding
+ host path under `C.LOCAL'.
+ """
+ return switch_prefix(path, [("/usr/local.schroot/", C.LOCAL + "/")])
+
+def split_dist_arch(spec):
+ """Split a SPEC of the form `DIST-ARCH' into the pair (DIST, ARCH)."""
+ dash = spec.index("-")
+ return spec[:dash], spec[dash + 1:]
+
+def elf_binary_p(arch, path):
+ """Return whether PATH is an ELF binary for ARCH."""
+ if not OS.path.isfile(path): return False
+ with open(path, 'rb') as f: magic = f.read(20)
+ if magic[0:4] != "\x7fELF": return False
+ if magic[8:16] != 8*"\0": return False
+ if arch == "i386":
+ if magic[4:7] != "\x01\x01\x01": return False
+ if magic[18:20] != "\x03\x00": return False
+ elif arch == "amd64":
+ if magic[4:7] != "\x02\x01\x01": return False
+ if magic[18:20] != "\x3e\x00": return False
+ else:
+ raise ValueError("unsupported donor architecture `%s'" % arch)
+ return True
+
+def progress(msg):
+ """
+ Print a progress message MSG.
+
+ This is intended to be called within a job's `run' method, so it doesn't
+ check `OPT.quiet' or `OPT.silent'.
+ """
+ OS.write(1, ";; %s\n" % msg)
+
+class NoSuchChroot (Exception):
+ """
+ Exception indicating that a chroot does not exist.
+
+ Specifically, it means that it doesn't even have a logical volume.
+ """
+ def __init__(me, dist, arch):
+ me.dist = dist
+ me.arch = arch
+ def __str__(me):
+ return "chroot for `%s' on `%s' not found" % (me.dist, me.arch)
+
+@CTX.contextmanager
+def mount_chroot_src(dist, arch):
+ """
+ Context manager for mounting the source-chroot for DIST on ARCH.
+
+ The context manager automatically unmounts the filesystem again when the
+ body exits. You must hold the appropriate source-chroot lock before
+ calling this routine.
+ """
+ dev = chroot_src_blkdev(dist, arch)
+ if not block_device_p(dev): raise NoSuchChroot(dist, arch)
+ mnt = chroot_src_mntpt(dist, arch)
+ try:
+ run_program(C.ROOTLY + ["mount", dev, mnt])
+ yield mnt
+ finally:
+ umount(mnt)
+
+@CTX.contextmanager
+def chroot_session(dist, arch, sourcep = False):
+ """
+ Context manager for running an schroot(1) session.
+
+ Returns the (ugly, automatically generated) session name to the context
+ body. By default, a snapshot session is started: set SOURCEP true to start
+ a source-chroot session. You must hold the appropriate source-chroot lock
+ before starting a source-chroot session.
+
+ The context manager automatically closes the session again when the body
+ exits.
+ """
+ chroot = chroot_src_lv(dist, arch)
+ if sourcep: chroot = "source:" + chroot
+ session = run_program(["schroot", "-uroot", "-b", "-c", chroot],
+ stdout = RETURN).rstrip("\n")
+ try:
+ root = OS.path.join(chroot_session_mntpt(session), "fs")
+ yield session, root
+ finally:
+ run_program(["schroot", "-e", "-c", session])
+
+def run_root(command, **kw):
+ """Run a COMMAND as root. Arguments are as for `run_program'."""
+ return run_program(C.ROOTLY + command, **kw)
+
+def run_schroot_session(session, command, rootp = False, **kw):
+ """
+ Run a COMMAND within an schroot(1) session.
+
+ Arguments are as for `run_program'.
+ """
+ if rootp:
+ return run_program(["schroot", "-uroot", "-r",
+ "-c", session, "--"] + command, **kw)
+ else:
+ return run_program(["schroot", "-r",
+ "-c", session, "--"] + command, **kw)
+
+def run_schroot_source(dist, arch, command, **kw):
+ """
+ Run a COMMAND through schroot(1), in the source-chroot for DIST on ARCH.
+
+ Arguments are as for `run_program'. You must hold the appropriate source-
+ chroot lock before calling this routine.
+ """
+ return run_program(["schroot", "-uroot",
+ "-c", "source:%s" % chroot_src_lv(dist, arch),
+ "--"] + command, **kw)
+
+###--------------------------------------------------------------------------
+### Metadata files.
+
+class MetadataClass (type):
+ """
+ Metaclass for metadata classes.
+
+ Notice a `VARS' attribute in the class dictionary, and augment it with a
+ `_VARSET' attribute, constructed as a set containing the same items. (We
+ need them both: the set satisfies fast lookups, while the original sequence
+ remembers the ordering.)
+ """
+ def __new__(me, name, supers, dict):
+ try: vars = dict['VARS']
+ except KeyError: pass
+ else: dict['_VARSET'] = set(vars)
+ return super(MetadataClass, me).__new__(me, name, supers, dict)
+
+class BaseMetadata (object):
+ """
+ Base class for metadate objects.
+
+ Metadata bundles are simple collections of key/value pairs. Keys should
+ usually be Python identifiers because they're used to name attributes.
+ Values are strings, but shouldn't have leading or trailing whitespace, and
+ can't contain newlines.
+
+ Metadata bundles are written to files. The format is simple enough: empty
+ lines and lines starting with `#' are ignored; otherwise, the line must
+ have the form
+
+ KEY = VALUE
+
+ where KEY does not contain `='; spaces around the `=' are optional, and
+ spaces around the KEY and VALUE are stripped. The order of keys is
+ unimportant; keys are always written in a standard order on output.
+ """
+ __metaclass__ = MetadataClass
+
+ def __init__(me, **kw):
+ """Initialize a metadata bundle from keyword arguments."""
+ for k, v in kw.iteritems():
+ setattr(me, k, v)
+ for v in me.VARS:
+ try: getattr(me, v)
+ except AttributeError: setattr(me, v, None)
+
+ def __setattr__(me, attr, value):
+ """
+ Try to set an attribute.
+
+ Only attribute names listed in the `VARS' class attribute are permitted.
+ """
+ if attr not in me._VARSET: raise AttributeError, attr
+ super(BaseMetadata, me).__setattr__(attr, value)
+
+ @classmethod
+ def read(cls, path):
+ """Return a new metadata bundle read from a named PATH."""
+ map = {}
+ with open(path) as f:
+ for line in f:
+ line = line.strip()
+ if line == "" or line.startswith("#"): continue
+ k, v = line.split("=", 1)
+ map[k.strip()] = v.strip()
+ return cls(**map)
+
+ def _write(me, file):
+ """
+ Write the metadata bundle to the FILE (a file-like object).
+
+ This is intended for use by subclasses which want to override the default
+ I/O behaviour of the main `write' method.
+ """
+ file.write("### -*-conf-*-\n")
+ for k in me.VARS:
+ try: v = getattr(me, k)
+ except AttributeError: pass
+ else:
+ if v is not None: file.write("%s = %s\n" % (k, v))
+
+ def write(me, path):
+ """
+ Write the metadata bundle to a given PATH.
+
+ The file is replaced atomically.
+ """
+ with safewrite(path) as f: me._write(f)
+
+ def __repr__(me):
+ return "#<%s: %s>" % (me.__class__.__name__,
+ ", ".join("%s=%r" % (k, getattr(me, k, None))
+ for k in me.VARS))
+
+class ChrootMetadata (BaseMetadata):
+ VARS = ['dist', 'arch', 'update']
+
+ @classmethod
+ def read(cls, dist, arch):
+ try:
+ with lockfile(chroot_src_lockfile(dist, arch), exclp = False):
+ with mount_chroot_src(dist, arch) as mnt:
+ return super(ChrootMetadata, cls).read(OS.path.join(mnt, "META"))
+ except IOError, err:
+ if err.errno == E.ENOENT: pass
+ else: raise
+ except NoSuchChroot: pass
+ return cls(dist = dist, arch = arch)
+
+ def write(me):
+ with mount_chroot_src(me.dist, me.arch) as mnt:
+ with safewrite_root(OS.path.join(mnt, "META")) as f:
+ me._write(f)
+
+class CrossToolsMetadata (BaseMetadata):
+ VARS = ['dist', 'arch', 'update']
+
+ @classmethod
+ def read(cls, dist, arch):
+ try:
+ return super(CrossToolsMetadata, cls)\
+ .read(OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch), "META"))
+ except IOError, err:
+ if err.errno == E.ENOENT: pass
+ else: raise
+ return cls(dist = dist, arch = arch)
+
+ def write(me, dir = None):
+ if dir is None:
+ dir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (me.dist, me.arch))
+ with safewrite_root(OS.path.join(dir, "META")) as f:
+ me._write(f)
+
+###--------------------------------------------------------------------------
+### Constructing a chroot.
+
+R_DIVERT = RX.compile(r"^diversion of (.*) to .* by install-cross-tools$")
+
+class ChrootJob (BaseJob):
+ """
+ Create or update a chroot.
+ """
+
+ SPECS = C.ALL_CHROOTS
+
+ def __init__(me, spec, fresh = CREATE, *args, **kw):
+ super(ChrootJob, me).__init__(*args, **kw)
+ me._dist, me._arch = split_dist_arch(spec)
+ me._fresh = fresh
+ me._meta = ChrootMetadata.read(me._dist, me._arch)
+ me._tools_chroot = me._qemu_chroot = None
+
+ def _mkname(me): return "chroot.%s-%s" % (me._dist, me._arch)
+
+ def prepare(me):
+ if me._arch in C.FOREIGN_ARCHS:
+ me._tools_chroot = CrossToolsJob.ensure\
+ ("%s-%s" % (me._dist, C.TOOLSARCH), FRESH)
+ me._qemu_chroot = CrossToolsJob.ensure\
+ ("%s-%s" % (me._dist, C.QEMUHOST[me._arch]), FRESH)
+ me.await(me._tools_chroot)
+ me.await(me._qemu_chroot)
+
+ def check(me):
+ status, reason = super(ChrootJob, me).check()
+ if status is not READY: return status, reason
+ if (me._tools_chroot is not None and me._tools_chroot.started) or \
+ (me._qemu_chroot is not None and me._qemu_chroot.started):
+ return READY, "prerequisites run"
+ return check_fresh(me._fresh, me._meta.update)
+
+ def _install_cross_tools(me):
+ """
+ Install or refresh cross-tools in the source-chroot.
+
+ This function version assumes that the source-chroot lock is already
+ held.
+
+ Note that there isn't a job class corresponding to this function. It's
+ done automatically as part of source-chroot setup and update for foreign
+ architectures.
+ """
+ with Cleanup() as clean:
+
+ dist, arch = me._dist, me._arch
+
+ mymulti = run_program(["dpkg-architecture", "-a", C.TOOLSARCH,
+ "-qDEB_HOST_MULTIARCH"],
+ stdout = RETURN).rstrip("\n")
+ gnuarch = run_program(["dpkg-architecture", "-A", arch,
+ "-qDEB_TARGET_GNU_TYPE"],
+ stdout = RETURN).rstrip("\n")
+
+ crossdir = OS.path.join(C.LOCAL, "cross",
+ "%s-%s" % (dist, C.TOOLSARCH))
+
+ qarch, qhost = C.QEMUARCH[arch], C.QEMUHOST[arch]
+ qemudir = OS.path.join(C.LOCAL, "cross",
+ "%s-%s" % (dist, qhost), "QEMU")
+
+ ## Acquire lockfiles in a canonical order to prevent deadlocks.
+ donors = [C.TOOLSARCH]
+ if qarch != C.TOOLSARCH: donors.append(qarch)
+ donors.sort()
+ for a in donors:
+ clean.enter(lockfile(crosstools_lockfile(dist, a), exclp = False))
+
+ ## Open a session.
+ session, root = clean.enter(chroot_session(dist, arch, sourcep = True))
+
+ ## Search the cross-tools tree for tools, to decide what to do with
+ ## each file. Make lists:
+ ##
+ ## * `want_div' is simply a set of all files in the chroot which need
+ ## dpkg diversions to prevent foreign versions of the tools from
+ ## clobbering our native versions.
+ ##
+ ## * `want_link' is a dictionary mapping paths which need symbolic
+ ## links into the cross-tools trees to their link destinations.
+ progress("scan cross-tools tree")
+ want_div = set()
+ want_link = dict()
+ cross_prefix = crossdir + "/"
+ qemu_prefix = qemudir + "/"
+ toolchain_prefix = OS.path.join(crossdir, "TOOLCHAIN", gnuarch) + "/"
+ def examine(path):
+ dest = switch_prefix(path, [(qemu_prefix, "/usr/bin/"),
+ (toolchain_prefix, "/usr/bin/"),
+ (cross_prefix, "/")])
+ if OS.path.islink(path): src = OS.readlink(path)
+ else: src = host_to_chroot(path)
+ want_link[dest] = src
+ if not OS.path.isdir(path): want_div.add(dest)
+ examine(OS.path.join(qemudir, "qemu-%s-static" % qarch))
+ examine(OS.path.join(crossdir, "lib", mymulti))
+ examine(OS.path.join(crossdir, "usr/lib", mymulti))
+ examine(OS.path.join(crossdir, "usr/lib/gcc-cross"))
+ def visit(_, dir, files):
+ ff = []
+ for f in files:
+ if f == "META" or f == "QEMU" or f == "TOOLCHAIN" or \
+ (dir.endswith("/lib") and (f == mymulti or f == "gcc-cross")):
+ continue
+ ff.append(f)
+ path = OS.path.join(dir, f)
+ if not OS.path.isdir(path): examine(path)
+ files[:] = ff
+ OS.path.walk(crossdir, visit, None)
+ OS.path.walk(OS.path.join(crossdir, "TOOLCHAIN", gnuarch),
+ visit, None)
+
+ ## Build the set `have_div' of paths which already have diversions.
+ progress("scan chroot")
+ have_div = set()
+ with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
+ "dpkg-divert", "--list"],
+ stdout = PIPE) as (_, fd_out, _):
+ try:
+ f = OS.fdopen(fd_out)
+ for line in f:
+ m = R_DIVERT.match(line.rstrip("\n"))
+ if m: have_div.add(m.group(1))
+ finally:
+ f.close()
+
+ ## Build a dictionary `have_link' of symbolic links into the cross-
+ ## tools trees. Also, be sure to collect all of the relative symbolic
+ ## links which are in the cross-tools tree.
+ have_link = dict()
+ with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
+ "sh", "-e", "-c", """
+ find / -xdev -lname "/usr/local.schroot/cross/*" -printf "%p %l\n"
+ """], stdout = PIPE) as (_, fd_out, _):
+ try:
+ f = OS.fdopen(fd_out)
+ for line in f:
+ dest, src = line.split()
+ have_link[dest] = src
+ finally:
+ f.close()
+ for path in want_link.iterkeys():
+ real = root + path
+ if not OS.path.islink(real): continue
+ have_link[path] = OS.readlink(real)
+
+ ## Add diversions for the paths which need one, but don't have one.
+ ## There's a hack here because the `--no-rename' option was required in
+ ## the same version in which it was introduced, so there's no single
+ ## incantation that will work across the boundary.
+ progress("add missing diversions")
+ with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
+ "sh", "-e", "-c", """
+ a="%(arch)s"
+
+ if dpkg-divert >/dev/null 2>&1 --no-rename --help
+ then no_rename=--no-rename
+ else no_rename=
+ fi
+
+ while read path; do
+ dpkg-divert --package "install-cross-tools" $no_rename \
+ --divert "$path.$a" --add "$path"
+ done
+ """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _):
+ try:
+ f = OS.fdopen(fd_in, 'w')
+ for path in want_div:
+ if path not in have_div: f.write(path + "\n")
+ finally:
+ f.close()
+
+ ## Go through each diverted tool, and, if it hasn't been moved aside,
+ ## then /link/ it across now. If we rename it, then the chroot will
+ ## stop working -- which is why we didn't allow `dpkg-divert' to do the
+ ## rename. We can tell a tool that hasn't been moved, because it's a
+ ## symlink into one of the cross trees.
+ progress("preserve existing foreign files")
+ chroot_cross_prefix = host_to_chroot(crossdir) + "/"
+ chroot_qemu_prefix = host_to_chroot(qemudir) + "/"
+ for path in want_div:
+ real = root + path; div = real + "." + arch; cross = crossdir + path
+ if OS.path.exists(div): continue
+ if not OS.path.exists(real): continue
+ if OS.path.islink(real):
+ realdest = OS.readlink(real)
+ if realdest.startswith(chroot_cross_prefix) or \
+ realdest.startswith(chroot_qemu_prefix):
+ continue
+ if OS.path.islink(cross) and realdest == OS.readlink(cross):
+ continue
+ progress("preserve existing foreign file `%s'" % path)
+ run_root(["ln", real, div])
+
+ ## Update all of the symbolic links which are currently wrong: add
+ ## links which are missing, delete ones which are obsolete, and update
+ ## ones which have the wrong target.
+ progress("update symlinks")
+ for path, src in want_link.iteritems():
+ real = root + path
+ try: old_src = have_link[path]
+ except KeyError: pass
+ else:
+ if src == old_src: continue
+ new = real + ".new"
+ progress("link `%s' -> `%s'" % (path, src))
+ dir = OS.path.dirname(real)
+ if not OS.path.isdir(dir): run_root(["mkdir", "-p", dir])
+ if OS.path.exists(new): run_root(["rm", "-f", new])
+ run_root(["ln", "-s", src, new])
+ run_root(["mv", new, real])
+ for path in have_link.iterkeys():
+ if path in want_link: continue
+ progress("remove obsolete link `%s' -> `%s'" % path)
+ real = root + path
+ run_root(["rm", "-f", real])
+
+ ## Remove diversions from paths which don't need them any more. Here
+ ## it's safe to rename, because either the tool isn't there, in which
+ ## case it obviously wasn't important, or it is, and `dpkg-divert' will
+ ## atomically replace our link with the foreign version.
+ progress("remove obsolete diversions")
+ with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
+ "sh", "-e", "-c", """
+ a="%(arch)s"
+
+ while read path; do
+ dpkg-divert --package "install-cross-tools" --rename \
+ --divert "$path.$a" --remove "$path"
+ done
+ """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _):
+ try:
+ f = OS.fdopen(fd_in, 'w')
+ for path in have_div:
+ if path not in want_div: f.write(path + "\n")
+ finally:
+ f.close()
+
+ def _make_chroot(me):
+ """
+ Create the source-chroot with chroot metadata META.
+
+ This will recreate a source-chroot from scratch, destroying the existing
+ logical volume if necessary.
+ """
+ with Cleanup() as clean:
+
+ dist, arch = me._dist, me._arch
+ clean.enter(lockfile(chroot_src_lockfile(dist, arch)))
+
+ mnt = chroot_src_mntpt(dist, arch)
+ dev = chroot_src_blkdev(dist, arch)
+ lv = chroot_src_lv(dist, arch)
+ newlv = lv + ".new"
+
+ ## Clean up any leftover debris.
+ if mountpoint_p(mnt): umount(mnt)
+ if block_device_p(dev):
+ run_root(["lvremove", "-f", "%s/%s" % (C.VG, lv)])
+
+ ## Create the logical volume and filesystem. It's important that the
+ ## logical volume not have its official name until after it contains a
+ ## mountable filesystem.
+ progress("create filesystem")
+ run_root(["lvcreate", "--yes", C.LVSZ, "-n", newlv, C.VG])
+ run_root(["mkfs", "-j", "-L%s-%s" % (dist, arch),
+ OS.path.join("/dev", C.VG, newlv)])
+ run_root(["lvrename", C.VG, newlv, lv])
+
+ ## Start installing the chroot.
+ with mount_chroot_src(dist, arch) as mnt:
+
+ ## Set the basic structure.
+ run_root(["mkdir", "-m755", OS.path.join(mnt, "fs")])
+ run_root(["chmod", "750", mnt])
+
+ ## Install the base system.
+ progress("install base system")
+ run_root(["eatmydata", "debootstrap"] +
+ (arch in C.FOREIGN_ARCHS and ["--foreign"] or []) +
+ ["--arch=" + arch, "--variant=minbase",
+ "--include=" + ",".join(C.BASE_PACKAGES),
+ dist, OS.path.join(mnt, "fs"), C.DEBMIRROR])
+
+ ## If this is a cross-installation, then install the necessary `qemu'
+ ## and complete the installation.
+ if arch in C.FOREIGN_ARCHS:
+ qemu = OS.path.join("cross", "%s-%s" % (dist, C.QEMUHOST[arch]),
+ "QEMU", "qemu-%s-static" % C.QEMUARCH[arch])
+ run_root(["install", OS.path.join(C.LOCAL, qemu),
+ OS.path.join(mnt, "fs/usr/bin")])
+ run_root(["chroot", OS.path.join(mnt, "fs"),
+ "/debootstrap/debootstrap", "--second-stage"])
+ run_root(["ln", "-sf",
+ OS.path.join("/usr/local.schroot", qemu),
+ OS.path.join(mnt, "fs/usr/bin")])
+
+ ## Set up `/usr/local'.
+ progress("install `/usr/local' symlink")
+ run_root(["rm", "-rf", OS.path.join(mnt, "fs/usr/local")])
+ run_root(["ln", "-s",
+ OS.path.join("local.schroot", arch),
+ OS.path.join(mnt, "fs/usr/local")])
+
+ ## Install the `apt' configuration.
+ progress("configure package manager")
+ run_root(["rm", "-f", OS.path.join(mnt, "fs/etc/apt/sources.list")])
+ for c in C.APTCONF:
+ run_root(["ln", "-s",
+ OS.path.join("/usr/local.schroot/etc/apt/apt.conf.d", c),
+ OS.path.join(mnt, "fs/etc/apt/apt.conf.d")])
+ run_root(["ln", "-s",
+ "/usr/local.schroot/etc/apt/sources.%s" % dist,
+ OS.path.join(mnt, "fs/etc/apt/sources.list")])
+
+ with safewrite_root\
+ (OS.path.join(mnt, "fs/etc/apt/apt.conf.d/20arch")) as f:
+ f.write("""\
+ ### -*-conf-*-
+
+ APT {
+ Architecture "%s";
+ };
+ """ % arch)
+
+ ## Set up the locale and time zone from the host system.
+ progress("configure locales and timezone")
+ run_root(["cp", "/etc/locale.gen", "/etc/timezone",
+ OS.path.join(mnt, "fs/etc")])
+ with open("/etc/timezone") as f: tz = f.readline().strip()
+ run_root(["ln", "-sf",
+ OS.path.join("/usr/share/timezone", tz),
+ OS.path.join(mnt, "fs/etc/localtime")])
+ run_root(["cp", "/etc/default/locale",
+ OS.path.join(mnt, "fs/etc/default")])
+
+ ## Fix `/etc/mtab'.
+ progress("set `/etc/mtab'")
+ run_root(["ln", "-sf", "/proc/mounts",
+ OS.path.join(mnt, "fs/etc/mtab")])
+
+ ## Prevent daemons from starting within the chroot.
+ progress("inhibit daemon startup")
+ with safewrite_root(OS.path.join(mnt, "fs/usr/sbin/policy-rc.d"),
+ mode = "755") as f:
+ f.write("""\
+ #! /bin/sh
+ echo >&2 "policy-rc.d: Services disabled by policy."
+ exit 101
+ """)
+
+ ## Hack the dynamic linker to prefer libraries in `/usr' over
+ ## `/usr/local'. This prevents `dpkg-shlibdeps' from becoming
+ ## confused.
+ progress("configure dynamic linker")
+ with safewrite_root\
+ (OS.path.join(mnt, "fs/etc/ld.so.conf.d/libc.conf")) as f:
+ f.write("# libc default configuration")
+ with safewrite_root\
+ (OS.path.join(mnt, "fs/etc/ld.so.conf.d/zzz-local.conf")) as f:
+ f.write("""\
+ ### -*-conf-*-
+ ### Local hack to make /usr/local/ late.
+ /usr/local/lib
+ """)
+
+ ## If this is a foreign architecture then we need to set it up.
+ if arch in C.FOREIGN_ARCHS:
+
+ ## Keep the chroot's native Qemu out of our way: otherwise we'll stop
+ ## being able to run programs in the chroot. There's a hack here
+ ## because the `--no-rename' option was required in the same version
+ ## in which is was introduced, so there's no single incantation that
+ ## will work across the boundary.
+ progress("divert emulator")
+ run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", """
+ if dpkg-divert >/dev/null 2>&1 --no-rename --help
+ then no_rename=--no-rename
+ else no_rename=
+ fi
+
+ dpkg-divert --package install-cross-tools $no_rename \
+ --divert /usr/bin/%(qemu)s.%(arch)s --add /usr/bin/%(qemu)s
+ """ % dict(arch = arch, qemu = "qemu-%s-static" % C.QEMUARCH[arch])])
+
+ ## Install faster native tools.
+ me._install_cross_tools()
+
+ ## Finishing touches.
+ progress("finishing touches")
+ run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", """
+ apt-get update
+ apt-get -y upgrade
+ apt-get -y install "$@"
+ ldconfig
+ apt-get -y autoremove
+ apt-get clean
+ """, "."] + C.EXTRA_PACKAGES, stdin = DISCARD)
+
+ ## Mark the chroot as done.
+ me._meta.update = zulu()
+ me._meta.write()
+
+ def _update_chroot(me):
+ """Refresh the source-chroot with chroot metadata META."""
+ with Cleanup() as clean:
+ dist, arch = me._dist, me._arch
+ clean.enter(lockfile(chroot_src_lockfile(dist, arch)))
+ run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", """
+ apt-get update
+ apt-get -y dist-upgrade
+ apt-get -y autoremove
+ apt-get -y clean
+ """], stdin = DISCARD)
+ if arch in C.FOREIGN_ARCHS: me._install_cross_tools()
+ me._meta.update = zulu(); me._meta.write()
+
+ def run(me):
+ if me._meta.update is not None: me._update_chroot()
+ else: me._make_chroot()
+
+###--------------------------------------------------------------------------
+### Extracting the cross tools.
+
+class CrossToolsJob (BaseJob):
+ """Extract cross-tools from a donor chroot."""
+
+ SPECS = C.NATIVE_CHROOTS
+
+ def __init__(me, spec, fresh = CREATE, *args, **kw):
+ super(CrossToolsJob, me).__init__(*args, **kw)
+ me._dist, me._arch = split_dist_arch(spec)
+ me._meta = CrossToolsMetadata.read(me._dist, me._arch)
+ me._fresh = fresh
+ me._chroot = None
+
+ def _mkname(me): return "cross-tools.%s-%s" % (me._dist, me._arch)
+
+ def prepare(me):
+ st, r = check_fresh(me._fresh, me._meta.update)
+ if st is DONE: return
+ me._chroot = ChrootJob.ensure("%s-%s" % (me._dist, me._arch), FRESH)
+ me.await(me._chroot)
+
+ def check(me):
+ status, reason = super(CrossToolsJob, me).check()
+ if status is not READY: return status, reason
+ if me._chroot is not None and me._chroot.started:
+ return READY, "prerequisites run"
+ return check_fresh(me._fresh, me._meta.update)
+
+ def run(me):
+ with Cleanup() as clean:
+
+ dist, arch = me._dist, me._arch
+
+ mymulti = run_program(["dpkg-architecture", "-a" + arch,
+ "-qDEB_HOST_MULTIARCH"],
+ stdout = RETURN).rstrip("\n")
+ crossarchs = [run_program(["dpkg-architecture", "-A" + a,
+ "-qDEB_TARGET_GNU_TYPE"],
+ stdout = RETURN).rstrip("\n")
+ for a in C.FOREIGN_ARCHS]
+
+ crossdir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch))
+ crossold = crossdir + ".old"; crossnew = crossdir + ".new"
+ usrbin = OS.path.join(crossnew, "usr/bin")
+
+ clean.enter(lockfile(crosstools_lockfile(dist, arch)))
+ run_program(["rm", "-rf", crossnew])
+ mkdir_p(crossnew)
+
+ ## Open a session to the donor chroot.
+ progress("establish snapshot")
+ session, root = clean.enter(chroot_session(dist, arch))
+
+ ## Make sure the donor tree is up-to-date, and install the extra
+ ## packages we need.
+ progress("install tools packages")
+ run_schroot_session(session, ["eatmydata", "sh", "-e", "-c", """
+ apt-get update
+ apt-get -y upgrade
+ apt-get -y install "$@"
+ """, "."] + C.CROSS_PACKAGES, rootp = True, stdin = DISCARD)
+
+ def chase(path):
+ dest = ""
+
+ ## Work through the remaining components of the PATH.
+ while path != "":
+ try: sl = path.index("/")
+ except ValueError: step = path; path = ""
+ else: step, path = path[:sl], path[sl + 1:]
+
+ ## Split off and analyse the first component.
+ if step == "" or step == ".":
+ ## A redundant `/' or `./'. Skip it.
+ pass
+ elif step == "..":
+ ## A `../'. Strip off the trailing component of DEST.
+ dest = dest[:dest.rindex("/")]
+ else:
+ ## Something else. Transfer the component name to DEST.
+ dest += "/" + step
+
+ ## If DEST refers to something in the cross-tools tree then we're
+ ## good.
+ crossdest = crossnew + dest
+ try: st = OS.lstat(crossdest)
+ except OSError, err:
+ if err.errno == E.ENOENT:
+ ## No. We need to copy something from the donor tree so that
+ ## the name works.
+
+ st = OS.lstat(root + dest)
+ if ST.S_ISDIR(st.st_mode):
+ OS.mkdir(crossdest)
+ else:
+ progress("copy `%s'" % dest)
+ run_program(["rsync", "-aHR",
+ "%s/.%s" % (root, dest),
+ crossnew])
+ else:
+ raise
+
+ ## If DEST refers to a symbolic link, then prepend the link target
+ ## to PATH so that we can be sure the link will work.
+ if ST.S_ISLNK(st.st_mode):
+ link = OS.readlink(crossdest)
+ if link.startswith("/"): dest = ""; link = link[1:]
+ else:
+ try: dest = dest[:dest.rindex("/")]
+ except ValueError: dest = ""
+ if path == "": path = link
+ else: path = "%s/%s" % (path, link)
+
+ ## Work through the shopping list, copying the things it names into the
+ ## cross-tools tree.
+ scan = []
+ for pat in C.CROSS_PATHS:
+ pat = pat.replace("MULTI", mymulti)
+ any = False
+ for rootpath in GLOB.iglob(root + pat):
+ any = True
+ path = rootpath[len(root):]
+ progress("copy `%s'" % path)
+ run_program(["rsync", "-aHR", "%s/.%s" % (root, path), crossnew])
+ if not any:
+ raise RuntimeError("no matches for cross-tool pattern `%s'" % pat)
+
+ ## Scan the new tree: chase down symbolic links, copying extra stuff
+ ## that we'll need; and examine ELF binaries to make sure we get the
+ ## necessary shared libraries.
+ def visit(_, dir, files):
+ for f in files:
+ path = OS.path.join(dir, f)
+ inside = switch_prefix(path, [(crossnew + "/", "/")])
+ if OS.path.islink(path): chase(inside)
+ if elf_binary_p(arch, path): scan.append(inside)
+ OS.path.walk(crossnew, visit, None)
+
+ ## Work through the ELF binaries in `scan', determining which shared
+ ## libraries they'll need.
+ ##
+ ## The rune running in the chroot session reads ELF binary names on
+ ## stdin, one per line, and runs `ldd' on them to discover the binary's
+ ## needed libraries and resolve them into pathnames. Each pathname is
+ ## printed to stderr as a line `+PATHNAME', followed by a final line
+ ## consisting only of `-' as a terminator. This is necessary so that
+ ## we can tell when we've finished, because newly discovered libraries
+ ## need to be fed back to discover their recursive dependencies. (This
+ ## is why the `WriteLinesSelector' interface is quite so hairy.)
+ with subprocess(["schroot", "-r", "-c", session, "--",
+ "sh", "-e", "-c", """
+ while read path; do
+ ldd "$path" | while read a b c d; do
+ case $a:$b:$c:$d in
+ not:a:dynamic:executable) ;;
+ statically:linked::) ;;
+ /*) echo "+$a" ;;
+ *:=\\>:/*) echo "+$c" ;;
+ linux-*) ;;
+ *) echo >&2 "failed to find shared library \\`$a'"; exit 2 ;;
+ esac
+ done
+ echo -
+ done
+ """], stdin = PIPE, stdout = PIPE) as (fd_in, fd_out, _):
+
+ ## Keep track of the number of binaries we've reported to the `ldd'
+ ## process for which we haven't yet seen all of their dependencies.
+ ## (This is wrapped in a `Struct' because of Python's daft scoping
+ ## rules.)
+ v = Struct(n = 0)
+
+ def line_in():
+ ## Provide a line in., so raise `StopIteration' to signal this.
+
+ try:
+ ## See if there's something to scan.
+ path = scan.pop()
+
+ except IndexError:
+ ## There's nothing currently waiting to be scanned.
+ if v.n:
+ ## There are still outstanding replies, so stall.
+ return None
+ else:
+ ## There are no outstanding replies left, and we have nothing
+ ## more to scan, then we must be finished.
+ raise StopIteration
+
+ else:
+ ## The `scan' list isn't empty, so return an item from that, and
+ ## remember that there's one more thing we expect to see answers
+ ## from.
+ v.n += 1; return path
+
+ def line_out(line):
+ ## We've received a line from the `ldd' process.
+
+ if line == "-":
+ ## It's finished processing one of our binaries. Note this.
+ ## Maybe it's time to stop
+ v.n -= 1
+ return
+
+ ## Strip the leading marker (which is just there so that the
+ ## terminating `-' is unambiguous).
+ assert line.startswith("+")
+ lib = line[1:]
+
+ ## If we already have this binary then we'll already have submitted
+ ## it.
+ path = crossnew + lib
+ try: OS.lstat(path)
+ except OSError, err:
+ if err.errno == E.ENOENT: pass
+ else: raise
+ else: return
+
+ ## Copy it into the tools tree, together with any symbolic links
+ ## along the path.
+ chase(lib)
+
+ ## If this is an ELF binary (and it ought to be!) then submit it
+ ## for further scanning.
+ if elf_binary_p(arch, path):
+ scan.append(switch_prefix(path, [(crossnew + "/", "/")]))
+
+ ## And run this entire contraption. When this is done, we should
+ ## have all of the library dependencies for all of our binaries.
+ select_loop([WriteLinesSelector(fd_in, line_in),
+ ReadLinesSelector(fd_out, line_out)])
+
+ ## Set up the cross-compiler and emulator. Start by moving the cross
+ ## compilers and emulator into their specific places, so they don't end
+ ## up cluttering chroots for non-matching architectures.
+ progress("establish TOOLCHAIN and QEMU")
+ OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN"))
+ qemudir = OS.path.join(crossnew, "QEMU")
+ OS.mkdir(qemudir)
+ for gnu in C.FOREIGN_GNUARCHS:
+ OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN", gnu))
+ for f in OS.listdir(usrbin):
+ for gnu in C.FOREIGN_GNUARCHS:
+ gnuprefix = gnu + "-"
+ if f.startswith(gnuprefix):
+ tooldir = OS.path.join(crossnew, "TOOLCHAIN", gnu)
+ OS.rename(OS.path.join(usrbin, f), OS.path.join(tooldir, f))
+ OS.symlink(f, OS.path.join(tooldir, f[len(gnuprefix):]))
+ break
+ else:
+ if f.startswith("qemu-") and f.endswith("-static"):
+ OS.rename(OS.path.join(usrbin, f), OS.path.join(qemudir, f))
+
+ ## The GNU cross compilers try to find their additional pieces via a
+ ## relative path, which isn't going to end well. Add a symbolic link
+ ## at the right place to where the things are actually going to live.
+ toollib = OS.path.join(crossnew, "TOOLCHAIN", "lib")
+ OS.mkdir(toollib)
+ OS.symlink("../../usr/lib/gcc-cross",
+ OS.path.join(toollib, "gcc-cross"))
+
+ ## We're done. Replace the old cross-tools with our new one.
+ me._meta.update = zulu()
+ me._meta.write(crossnew)
+ if OS.path.exists(crossdir): run_program(["mv", crossdir, crossold])
+ OS.rename(crossnew, crossdir)
+ run_program(["rm", "-rf", crossold])
+
+###--------------------------------------------------------------------------
+### Buliding and installing local packages.
+
+def pkg_metadata_lockfile(pkg):
+ return lockfile_path("pkg-meta.%s" % pkg)
+
+def pkg_srcdir_lockfile(pkg, ver):
+ return lockfile_path("pkg-source.%s-%s" % (pkg, ver))
+
+def pkg_srcdir(pkg, ver):
+ return OS.path.join(C.LOCAL, "src", "%s-%s" % (pkg, ver))
+
+def pkg_builddir(pkg, ver, arch):
+ return OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch)
+
+class PackageMetadata (BaseMetadata):
+ VARS = ["pkg"] + list(C.ALL_ARCHS)
+
+ @classmethod
+ def read(cls, pkg):
+ try:
+ return super(PackageMetadata, cls)\
+ .read(OS.path.join(C.LOCAL, "src", "META.%s" % pkg))
+ except IOError, err:
+ if err.errno == E.ENOENT: pass
+ else: raise
+ return cls(pkg = pkg)
+
+ def write(me):
+ super(PackageMetadata, me)\
+ .write(OS.path.join(C.LOCAL, "src", "META.%s" % me.pkg))
+
+class PackageSourceJob (BaseJob):
+
+ SPECS = C.LOCALPKGS
+
+ def __init__(me, pkg, fresh = CREATE, *args, **kw):
+ super(PackageSourceJob, me).__init__(*args, **kw)
+ me._pkg = pkg
+ tar = None; ver = None
+ r = RX.compile("^%s-(\d.*)\.tar.(?:Z|z|gz|bz2|xz|lzma)$" %
+ RX.escape(pkg))
+ for f in OS.listdir("pkg"):
+ m = r.match(f)
+ if not m: pass
+ elif tar is not None:
+ raise ExpectedError("multiple source tarballs of package `%s'" % pkg)
+ else: tar, ver = f, m.group(1)
+ me.version = ver
+ me.tarball = OS.path.join("pkg", tar)
+
+ def _mkname(me): return "pkg-source.%s" % me._pkg
+
+ def check(me):
+ status, reason = super(PackageSourceJob, me).check()
+ if status is not READY: return status, reason
+ if OS.path.isdir(pkg_srcdir(me._pkg, me.version)):
+ return DONE, "already unpacked"
+ else:
+ return READY, "no source tree"
+
+ def run(me):
+ with Cleanup() as clean:
+ pkg, ver, tar = me._pkg, me.version, me.tarball
+ srcdir = pkg_srcdir(pkg, ver)
+ newdir = srcdir + ".new"
+
+ progress("unpack `%s'" % me.tarball)
+ clean.enter(lockfile(pkg_srcdir_lockfile(pkg, ver)))
+ run_program(["rm", "-rf", newdir])
+ mkdir_p(newdir)
+ run_program(["tar", "xf", OS.path.join(OS.getcwd(), me.tarball)],
+ cwd = newdir)
+ things = OS.listdir(newdir)
+ if len(things) == 1:
+ OS.rename(OS.path.join(newdir, things[0]), srcdir)
+ OS.rmdir(newdir)
+ else:
+ OS.rename(newdir, srcdir)
+
+class PackageBuildJob (BaseJob):
+
+ SPECS = ["%s:%s" % (pkg, arch)
+ for pkg in C.LOCALPKGS
+ for arch in C.ALL_ARCHS]
+
+ def __init__(me, spec, fresh = CREATE, *args, **kw):
+ super(PackageBuildJob, me).__init__(*args, **kw)
+ colon = spec.index(":")
+ me._pkg, me._arch = spec[:colon], spec[colon + 1:]
+
+ def _mkname(me): return "pkg-build.%s:%s" % (me._pkg, me._arch)
+
+ def prepare(me):
+ me.await(ChrootJob.ensure("%s-%s" % (C.PRIMARY_DIST, me._arch), CREATE))
+ me._meta = PackageMetadata.read(me._pkg)
+ me._src = PackageSourceJob.ensure(me._pkg, FRESH); me.await(me._src)
+ me._prereq = [PackageBuildJob.ensure("%s:%s" % (prereq, me._arch), FRESH)
+ for prereq in C.PKGDEPS[me._pkg]]
+ for j in me._prereq: me.await(j)
+
+ def check(me):
+ status, reason = super(PackageBuildJob, me).check()
+ if status is not READY: return status, reason
+ if me._src.started: return READY, "fresh source directory"
+ for j in me._prereq:
+ if j.started:
+ return READY, "dependency `%s' freshly installed" % j._pkg
+ if getattr(me._meta, me._arch) == me._src.version:
+ return DONE, "already installed"
+ return READY, "not yet installed"
+
+ def run(me):
+ with Cleanup() as clean:
+ pkg, ver, arch = me._pkg, me._src.version, me._arch
+
+ session, _ = clean.enter(chroot_session(C.PRIMARY_DIST, arch))
+ builddir = OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch)
+ chroot_builddir = host_to_chroot(builddir)
+ run_program(["rm", "-rf", builddir])
+ OS.mkdir(builddir)
+
+ progress("prepare %s chroot" % (arch))
+ run_schroot_session(session,
+ ["eatmydata", "apt-get", "update"],
+ rootp = True, stdin = DISCARD)
+ run_schroot_session(session,
+ ["eatmydata", "apt-get", "-y", "upgrade"],
+ rootp = True, stdin = DISCARD)
+ run_schroot_session(session,
+ ["eatmydata", "apt-get", "-y",
+ "install", "pkg-config"],
+ rootp = True, stdin = DISCARD)
+ run_schroot_session(session,
+ ["mount", "-oremount,rw", "/usr/local.schroot"],
+ rootp = True, stdin = DISCARD)
+
+ progress("configure `%s' %s for %s" % (pkg, ver, arch))
+ run_schroot_session(session, ["sh", "-e", "-c", """
+ cd "$1" &&
+ ../configure PKG_CONFIG_PATH=/usr/local/lib/pkgconfig.hidden
+ """, ".", chroot_builddir])
+
+ progress("compile `%s' %s for %s" % (pkg, ver, arch))
+ run_schroot_session(session, ["sh", "-e", "-c", """
+ cd "$1" && make -j4 && make -j4 check
+ """, ".", chroot_builddir])
+
+ existing = getattr(me._meta, arch, None)
+ if existing is not None and existing != ver:
+ progress("uninstall existing `%s' %s for %s" % (pkg, existing, arch))
+ run_schroot_session(session, ["sh", "-e", "-c", """
+ cd "$1" && make uninstall
+ """, ".", OS.path.join(pkg_srcdir(pkg, existing),
+ "build.%s" % arch)],
+ rootp = True)
+
+ progress("install `%s' %s for %s" % (pkg, existing, arch))
+ run_schroot_session(session, ["sh", "-e", "-c", """
+ cd "$1" && make install
+ mkdir -p /usr/local/lib/pkgconfig.hidden
+ mv /usr/local/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig.hidden || :
+ """, ".", chroot_builddir], rootp = True)
+
+ clean.enter(lockfile(pkg_metadata_lockfile(pkg)))
+ me._meta = PackageMetadata.read(pkg)
+ setattr(me._meta, arch, ver); me._meta.write()
+
+ with lockfile(chroot_src_lockfile(C.PRIMARY_DIST, arch)):
+ run_schroot_source(C.PRIMARY_DIST, arch, ["ldconfig"])
+
+###--------------------------------------------------------------------------
+### Process the configuration and options.
+
+OPTIONS = OP.OptionParser\
+ (usage = "chroot-maint [-diknqs] [-fFRESH] [-jN] JOB[.SPEC,...] ...")
+for short, long, props in [
+ ("-d", "--debug", {
+ 'dest': 'debug', 'default': False, 'action': 'store_true',
+ 'help': "print lots of debugging drivel" }),
+ ("-f", "--fresh", {
+ 'dest': 'fresh', 'metavar': 'FRESH', 'default': "create",
+ 'help': "how fresh (`create', `force', or `N[s|m|h|d|w]')" }),
+ ("-i", "--ignore-errors", {
+ 'dest': 'ignerr', 'default': False, 'action': 'store_true',
+ 'help': "ignore all errors encountered while processing" }),
+ ("-j", "--jobs", {
+ 'dest': 'njobs', 'metavar': 'N', 'default': 1, 'type': 'int',
+ 'help': 'run up to N jobs in parallel' }),
+ ("-J", "--forkbomb", {
+ 'dest': 'njobs', 'action': 'store_true',
+ 'help': 'run as many jobs in parallel as possible' }),
+ ("-k", "--keep-going", {
+ 'dest': 'keepon', 'default': False, 'action': 'store_true',
+ 'help': "keep going even if independent jobs fail" }),
+ ("-n", "--dry-run", {
+ 'dest': 'dryrun', 'default': False, 'action': 'store_true',
+ 'help': "don't actually do anything" }),
+ ("-q", "--quiet", {
+ 'dest': 'quiet', 'default': False, 'action': 'store_true',
+ 'help': "don't print the output from successful jobs" }),
+ ("-s", "--silent", {
+ 'dest': 'silent', 'default': False, 'action': 'store_true',
+ 'help': "don't print progress messages" })]:
+ OPTIONS.add_option(short, long, **props)
+
+###--------------------------------------------------------------------------
+### Main program.
+
+R_JOBSERV = RX.compile(r'^--jobserver-(?:fds|auth)=(\d+),(\d+)$')
+
+JOBMAP = { "chroot": ChrootJob,
+ "cross-tools": CrossToolsJob,
+ "pkg-source": PackageSourceJob,
+ "pkg-build": PackageBuildJob }
+
+R_FRESH = RX.compile(r"^(?:create|force|(\d+)(|[smhdw]))$")
+
+def parse_fresh(spec):
+ m = R_FRESH.match(spec)
+ if not m: raise ExpectedError("bad freshness `%s'" % spec)
+ if spec == "create": fresh = CREATE
+ elif spec == "force": fresh = FORCE
+ else:
+ n, u = int(m.group(1)), m.group(2)
+ if u == "" or u == "s": fresh = n
+ elif u == "m": fresh = 60*n
+ elif u == "h": fresh = 3600*n
+ elif u == "d": fresh = 86400*n
+ elif u == "w": fresh = 604800*n
+ else: assert False
+ return fresh
+
+with toplevel_handler():
+ OPT, args = OPTIONS.parse_args()
+ rfd, wfd = -1, -1
+ njobs = OPT.njobs
+ try: mkflags = OS.environ['MAKEFLAGS']
+ except KeyError: pass
+ else:
+ ff = mkflags.split()
+ for f in ff:
+ if f == "--": break
+ m = R_JOBSERV.match(f)
+ if m: rfd, wfd = int(m.group(1)), int(m.group(2))
+ elif f == '-j': njobs = None
+ elif not f.startswith('-'):
+ for ch in f:
+ if ch == 'i': OPT.ignerr = True
+ elif ch == 'k': OPT.keepon = True
+ elif ch == 'n': OPT.dryrun = True
+ elif ch == 's': OPT.silent = True
+ if OPT.njobs < 1:
+ raise ExpectedError("running no more than %d jobs is silly" % OPT.njobs)
+
+ FRESH = parse_fresh(OPT.fresh)
+
+ SCHED = JobScheduler(rfd, wfd, njobs)
+ OS.environ["http_proxy"] = C.PROXY
+
+ jobs = []
+ if not args: OPTIONS.print_usage(SYS.stderr); SYS.exit(2)
+ for arg in args:
+ try: sl = arg.index("/")
+ except ValueError: fresh = FRESH
+ else: arg, fresh = arg[:sl], parse_fresh(arg[sl + 1:])
+ try: dot = arg.index(".")
+ except ValueError: jty, pats = arg, "*"
+ else: jty, pats = arg[:dot], arg[dot + 1:]
+ try: jcls = JOBMAP[jty]
+ except KeyError: raise ExpectedError("unknown job type `%s'" % jty)
+ specs = []
+ for pat in pats.split(","):
+ any = False
+ for s in jcls.SPECS:
+ if FM.fnmatch(s, pat): specs.append(s); any = True
+ if not any: raise ExpectedError("no match for `%s'" % pat)
+ for s in specs:
+ jobs.append(jcls.ensure(s, fresh))
+
+ SCHED.run()
+
+SYS.exit(RC)
+
+###----- That's all, folks --------------------------------------------------