Commit | Line | Data |
---|---|---|
a98c9dba MW |
1 | #! /usr/bin/python |
2 | ### | |
3 | ### Create, upgrade, and maintain (native and cross-) chroots | |
4 | ### | |
5 | ### (c) 2018 Mark Wooding | |
6 | ### | |
7 | ||
8 | ###----- Licensing notice --------------------------------------------------- | |
9 | ### | |
10 | ### This file is part of the distorted.org.uk chroot maintenance tools. | |
11 | ### | |
12 | ### distorted-chroot is free software: you can redistribute it and/or | |
13 | ### modify it under the terms of the GNU General Public License as | |
14 | ### published by the Free Software Foundation; either version 2 of the | |
15 | ### License, or (at your option) any later version. | |
16 | ### | |
17 | ### distorted-chroot is distributed in the hope that it will be useful, | |
18 | ### but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
20 | ### General Public License for more details. | |
21 | ### | |
22 | ### You should have received a copy of the GNU General Public License | |
23 | ### along with distorted-chroot. If not, write to the Free Software | |
24 | ### Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, | |
25 | ### USA. | |
26 | ||
27 | ## still to do: | |
28 | ## tidy up | |
29 | ||
30 | import contextlib as CTX | |
31 | import errno as E | |
32 | import fcntl as FC | |
33 | import fnmatch as FM | |
34 | import glob as GLOB | |
35 | import itertools as I | |
36 | import optparse as OP | |
37 | import os as OS | |
38 | import random as R | |
39 | import re as RX | |
40 | import signal as SIG | |
41 | import select as SEL | |
42 | import stat as ST | |
43 | from cStringIO import StringIO | |
44 | import sys as SYS | |
45 | import time as T | |
46 | import traceback as TB | |
47 | ||
48 | import jobclient as JC | |
49 | ||
50 | QUIS = OS.path.basename(SYS.argv[0]) | |
51 | TODAY = T.strftime("%Y-%m-%d") | |
52 | NOW = T.time() | |
53 | ||
54 | ###-------------------------------------------------------------------------- | |
55 | ### Random utilities. | |
56 | ||
57 | RC = 0 | |
58 | def moan(msg): | |
59 | """Print MSG to stderr as a warning.""" | |
60 | if not OPT.silent: OS.write(2, "%s: %s\n" % (QUIS, msg)) | |
61 | def error(msg): | |
62 | """Print MSG to stderr, and remember to exit nonzero.""" | |
63 | global RC | |
64 | moan(msg) | |
65 | RC = 2 | |
66 | ||
67 | class ExpectedError (Exception): | |
68 | """A fatal error which shouldn't print a backtrace.""" | |
69 | pass | |
70 | ||
71 | @CTX.contextmanager | |
72 | def toplevel_handler(): | |
73 | """Catch `ExpectedError's and report Unixish error messages.""" | |
74 | try: yield None | |
75 | except ExpectedError, err: moan(err); SYS.exit(2) | |
76 | ||
77 | def spew(msg): | |
78 | """Print MSG to stderr as a debug trace.""" | |
79 | if OPT.debug: OS.write(2, ";; %s\n" % msg) | |
80 | ||
81 | class Tag (object): | |
82 | """Unique objects with no internal structure.""" | |
83 | def __init__(me, label): me._label = label | |
84 | def __str__(me): return '#<%s %s>' % (me.__class__.__name__, me._label) | |
85 | def __repr__(me): return '#<%s %s>' % (me.__class__.__name__, me._label) | |
86 | ||
87 | class Struct (object): | |
88 | def __init__(me, **kw): me.__dict__.update(kw) | |
89 | ||
90 | class Cleanup (object): | |
91 | """ | |
92 | A context manager for stacking other context managers. | |
93 | ||
94 | By itself, it does nothing. Attach other context managers with `enter' or | |
95 | loose cleanup functions with `add'. On exit, contexts are left and | |
96 | cleanups performed in reverse order. | |
97 | """ | |
98 | def __init__(me): | |
99 | me._cleanups = [] | |
100 | def __enter__(me): | |
101 | return me | |
102 | def __exit__(me, exty, exval, extb): | |
103 | trap = False | |
104 | for c in reversed(me._cleanups): | |
105 | if c(exty, exval, extb): trap = True | |
106 | return trap | |
107 | def enter(me, ctx): | |
108 | v = ctx.__enter__() | |
109 | me._cleanups.append(ctx.__exit__) | |
110 | return v | |
111 | def add(me, func): | |
112 | me._cleanups.append(lambda exty, exval, extb: func()) | |
113 | ||
114 | def zulu(t = None): | |
115 | """Return the time T (default now) as a string.""" | |
116 | return T.strftime("%Y-%m-%dT%H:%M:%SZ", T.gmtime(t)) | |
117 | ||
118 | R_ZULU = RX.compile(r"^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z$") | |
119 | def unzulu(z): | |
120 | """Convert the time string Z back to a Unix time.""" | |
121 | m = R_ZULU.match(z) | |
122 | if not m: raise ValueError("bad time spec `%s'" % z) | |
123 | yr, mo, dy, hr, mi, se = map(int, m.groups()) | |
124 | return T.mktime((yr, mo, dy, hr, mi, se, 0, 0, 0)) | |
125 | ||
126 | ###-------------------------------------------------------------------------- | |
127 | ### Simple select(2) utilities. | |
128 | ||
129 | class BaseSelector (object): | |
130 | """ | |
131 | A base class for hooking into `select_loop'. | |
132 | ||
133 | See `select_loop' for details of the protocol. | |
134 | """ | |
135 | def preselect(me, rfds, wfds): pass | |
136 | def postselect_read(me, fd): pass | |
137 | def postselect_write(me, fd): pass | |
138 | ||
139 | class WriteLinesSelector (BaseSelector): | |
140 | """Write whole lines to an output file descriptor.""" | |
141 | ||
142 | def __init__(me, fd, nextfn = None, *args, **kw): | |
143 | """ | |
144 | Initialize the WriteLinesSelector to write to the file descriptor FD. | |
145 | ||
146 | The FD is marked non-blocking. | |
147 | ||
148 | The lines are produced by the NEXTFN, which is called without arguments. | |
149 | It can affect the output in three ways: | |
150 | ||
151 | * It can return a string (or almost any other kind of object, which | |
152 | will be converted into a string by `str'), which will be written to | |
153 | the descriptor followed by a newline. Lines are written in the order | |
154 | in which they are produced. | |
155 | ||
156 | * It can return `None', which indicates that there are no more items to | |
157 | be written for the moment. The function will be called again from | |
158 | time to time, to see if it has changed its mind. This is the right | |
159 | thing to do in order to stall output temporarily. | |
160 | ||
161 | * It can raise `StopIteration', which indicates that there will never | |
162 | be any more items. The file descriptor will be closed. | |
163 | ||
164 | Subclasses can override this behaviour by defining a method `_next' and | |
165 | passing `None' as the NEXTFN. | |
166 | """ | |
167 | super(WriteLinesSelector, me).__init__(*args, **kw) | |
168 | set_nonblocking(fd) | |
169 | me._fd = fd | |
170 | if nextfn is not None: me._next = nextfn | |
171 | ||
172 | ## Selector state. | |
173 | ## | |
174 | ## * `_buf' contains a number of output items, already formatted, and | |
175 | ## ready for output in a single batch. It might be empty. | |
176 | ## | |
177 | ## * `_pos' is the current output position in `_buf'. | |
178 | ## | |
179 | ## * `_more' is set unless the `_next' function has raised | |
180 | ## `StopIteration': it indicates that we should close the descriptor | |
181 | ## once the all of the remaining data in the buffer has been sent. | |
182 | me._buf = "" | |
183 | me._pos = 0 | |
184 | me._more = True | |
185 | ||
186 | def _refill(me): | |
187 | """Refill `_buf' by calling `_next'.""" | |
188 | sio = StringIO(); n = 0 | |
189 | while n < 4096: | |
190 | try: item = me._next() | |
191 | except StopIteration: me._more = False; break | |
192 | if item is None: break | |
193 | item = str(item) | |
194 | sio.write(item); sio.write("\n"); n += len(item) + 1 | |
195 | me._buf = sio.getvalue(); me._pos = 0 | |
196 | ||
197 | def preselect(me, rfds, wfds): | |
198 | if me._fd == -1: return | |
199 | if me._buf == "" and me._more: me._refill() | |
200 | if me._buf != "" or not me._more: wfds.append(me._fd) | |
201 | ||
202 | def postselect_write(me, fd): | |
203 | if fd != me._fd: return | |
204 | while True: | |
205 | if me._pos >= len(me._buf): | |
206 | if me._more: me._refill() | |
207 | if not me._more: OS.close(me._fd); me._fd = -1; break | |
208 | if not me._buf: break | |
209 | try: n = OS.write(me._fd, me._buf[me._pos:]) | |
210 | except OSError, err: | |
211 | if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break | |
212 | elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; break | |
213 | else: raise | |
214 | me._pos += n | |
215 | ||
216 | class ReadLinesSelector (BaseSelector): | |
217 | """Report whole lines from an input file descriptor as they arrive.""" | |
218 | ||
219 | def __init__(me, fd, linefn = None, *args, **kw): | |
220 | """ | |
221 | Initialize the ReadLinesSelector to read from the file descriptor FD. | |
222 | ||
223 | The FD is marked non-blocking. | |
224 | ||
225 | For each whole line, and the final partial line (if any), the selector | |
226 | calls LINEFN with the line as an argument (without the terminating | |
227 | newline, if any). | |
228 | ||
229 | Subclasses can override this behaviour by defining a method `_line' and | |
230 | passing `None' as the LINEFN. | |
231 | """ | |
232 | super(ReadLinesSelector, me).__init__(*args, **kw) | |
233 | set_nonblocking(fd) | |
234 | me._fd = fd | |
235 | me._buf = "" | |
236 | if linefn is not None: me._line = linefn | |
237 | ||
238 | def preselect(me, rfds, wfds): | |
239 | if me._fd != -1: rfds.append(me._fd) | |
240 | ||
241 | def postselect_read(me, fd): | |
242 | if fd != me._fd: return | |
243 | while True: | |
244 | try: buf = OS.read(me._fd, 4096) | |
245 | except OSError, err: | |
246 | if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break | |
247 | else: raise | |
248 | if buf == "": | |
249 | OS.close(me._fd); me._fd = -1 | |
250 | if me._buf: me._line(me._buf) | |
251 | break | |
252 | buf = me._buf + buf | |
253 | i = 0 | |
254 | while True: | |
255 | try: j = buf.index("\n", i) | |
256 | except ValueError: break | |
257 | me._line(buf[i:j]) | |
258 | i = j + 1 | |
259 | me._buf = buf[i:] | |
260 | ||
261 | def select_loop(selectors): | |
262 | """ | |
263 | Multiplex I/O between the various SELECTORS. | |
264 | ||
265 | A `selector' SEL is an object which implements the selector protocol, which | |
266 | consists of three methods. | |
267 | ||
268 | * SEL.preselect(RFDS, WFDS) -- add any file descriptors which the | |
269 | selector is interested in reading from to the list RFDS, and add file | |
270 | descriptors it's interested in writing to to the list WFDS. | |
271 | ||
272 | * SEL.postselect_read(FD) -- informs the selector that FD is ready for | |
273 | reading. | |
274 | ||
275 | * SEL.postselect_write(FD) -- informs the selector that FD is ready for | |
276 | writing. | |
277 | ||
278 | The `select_loop' function loops as follows. | |
279 | ||
280 | * It calls the `preselect' method on each SELECTOR to determine what I/O | |
281 | events it thinks are interesting. | |
282 | ||
283 | * It waits for some interesting event to happen. | |
284 | ||
285 | * It calls the `postselect_read' and/or `postselect_write' methods on all | |
286 | of the selectors for each file descriptor which is ready. | |
287 | ||
288 | The loop ends when no selector is interested in any events. This is simple | |
289 | but rather inefficient. | |
290 | """ | |
291 | while True: | |
292 | rfds, wfds = [], [] | |
293 | for sel in selectors: sel.preselect(rfds, wfds) | |
294 | if not rfds and not wfds: break | |
295 | rfds, wfds, _ = SEL.select(rfds, wfds, []) | |
296 | for fd in rfds: | |
297 | for sel in selectors: sel.postselect_read(fd) | |
298 | for fd in wfds: | |
299 | for sel in selectors: sel.postselect_write(fd) | |
300 | ||
301 | ###-------------------------------------------------------------------------- | |
302 | ### Running subprocesses. | |
303 | ||
304 | def wait_outcome(st): | |
305 | """ | |
306 | Given a ST from `waitpid' (or similar), return a human-readable outcome. | |
307 | """ | |
308 | if OS.WIFSIGNALED(st): return "killed by signal %d" % OS.WTERMSIG(st) | |
309 | elif OS.WIFEXITED(st): | |
310 | rc = OS.WEXITSTATUS(st) | |
311 | if rc: return "failed: rc = %d" % rc | |
312 | else: return "completed successfully" | |
313 | else: return "died with incomprehensible status 0x%04x" % st | |
314 | ||
315 | class SubprocessFailure (Exception): | |
316 | """An exception indicating that a subprocess failed.""" | |
317 | def __init__(me, what, st): | |
318 | me.st = st | |
319 | me.what = what | |
320 | if OS.WIFEXITED(st): me.rc, me.sig = OS.WEXITSTATUS(st), None | |
321 | elif OS.WIFSIGNALED(st): me.rc, me.sig = None, OS.WTERMSIG(st) | |
322 | else: me.rc, me.sig = None, None | |
323 | def __str__(me): | |
324 | return "subprocess `%s' %s" % (me.what, wait_outcome(me.st)) | |
325 | ||
326 | INHERIT = Tag('INHERIT') | |
327 | PIPE = Tag('PIPE') | |
328 | DISCARD = Tag('DISCARD') | |
329 | @CTX.contextmanager | |
330 | def subprocess(command, | |
331 | stdin = INHERIT, stdout = INHERIT, stderr = INHERIT, | |
332 | cwd = INHERIT, jobserver = DISCARD): | |
333 | """ | |
334 | Hairy context manager for running subprocesses. | |
335 | ||
336 | The COMMAND is a list of arguments; COMMAND[0] names the program to be | |
337 | invoked. (There's currently no way to run a program with an unusual | |
338 | `argv[0]'.) | |
339 | ||
340 | The keyword arguments `stdin', `stdout', and `stderr' explain what to do | |
341 | with the standard file descriptors. | |
342 | ||
343 | * `INHERIT' means that they should be left alone: the child will use a | |
344 | copy of the parent's descriptor. This is the default. | |
345 | ||
346 | * `DISCARD' means that the descriptor should be re-opened onto | |
347 | `/dev/null' (for reading or writing as appropriate). | |
348 | ||
349 | * `PIPE' means that the descriptor should be re-opened as (the read or | |
350 | write end, as appropriate, of) a pipe, and the other end returned to | |
351 | the context body. | |
352 | ||
353 | Simiarly, the JOBSERVER may be `INHERIT' to pass the jobserver descriptors | |
354 | and environment variable down to the child, or `DISCARD' to close it. The | |
355 | default is `DISCARD'. | |
356 | ||
357 | The CWD may be `INHERIT' to run the child with the same working directory | |
358 | as the parent, or a pathname to change to an explicitly given working | |
359 | directory. | |
360 | ||
361 | The context is returned three values, which are file descriptors for other | |
362 | pipe ends for stdin, stdout, and stderr respectively, or -1 if there is no | |
363 | pipe. | |
364 | ||
365 | The context owns the pipe descriptors, and is expected to close them | |
366 | itself. (Timing of closure is significant, particularly for `stdin'.) | |
367 | """ | |
368 | ||
369 | ## Set up. | |
370 | r_in, w_in = -1, -1 | |
371 | r_out, w_out = -1, -1 | |
372 | r_err, w_err = -1, -1 | |
373 | spew("running subprocess `%s'" % " ".join(command)) | |
374 | ||
375 | ## Clean up as necessary... | |
376 | try: | |
377 | ||
378 | ## Set up stdin. | |
379 | if stdin is PIPE: r_in, w_in = OS.pipe() | |
380 | elif stdin is DISCARD: r_in = OS.open("/dev/null", OS.O_RDONLY) | |
381 | elif stdin is not INHERIT: | |
382 | raise ValueError("bad `stdin' value `%r'" % stdin) | |
383 | ||
384 | ## Set up stdout. | |
385 | if stdout is PIPE: r_out, w_out = OS.pipe() | |
386 | elif stdout is DISCARD: w_out = OS.open("/dev/null", OS.O_WRONLY) | |
387 | elif stdout is not INHERIT: | |
388 | raise ValueError("bad `stderr' value `%r'" % stdout) | |
389 | ||
390 | ## Set up stderr. | |
391 | if stderr is PIPE: r_err, w_err = OS.pipe() | |
392 | elif stderr is DISCARD: w_err = OS.open("/dev/null", OS.O_WRONLY) | |
393 | elif stderr is not INHERIT: | |
394 | raise ValueError("bad `stderr' value `%r'" % stderr) | |
395 | ||
396 | ## Start up the child. | |
397 | kid = OS.fork() | |
398 | ||
399 | if kid == 0: | |
400 | ## Child process. | |
401 | ||
402 | ## Fix up stdin. | |
403 | if r_in != -1: OS.dup2(r_in, 0); OS.close(r_in) | |
404 | if w_in != -1: OS.close(w_in) | |
405 | ||
406 | ## Fix up stdout. | |
407 | if w_out != -1: OS.dup2(w_out, 1); OS.close(w_out) | |
408 | if r_out != -1: OS.close(r_out) | |
409 | ||
410 | ## Fix up stderr. | |
411 | if w_err != -1: OS.dup2(w_err, 2); OS.close(w_err) | |
412 | if r_err != -1: OS.close(r_err) | |
413 | ||
414 | ## Change directory. | |
415 | if cwd is not INHERIT: OS.chdir(cwd) | |
416 | ||
417 | ## Fix up the jobserver. | |
418 | if jobserver is DISCARD: SCHED.close_jobserver() | |
419 | ||
420 | ## Run the program. | |
421 | try: OS.execvp(command[0], command) | |
422 | except OSError, err: | |
423 | moan("failed to run `%s': %s" % err.strerror) | |
424 | OS._exit(127) | |
425 | ||
426 | ## Close the other ends of the pipes. | |
427 | if r_in != -1: OS.close(r_in); r_in = -1 | |
428 | if w_out != -1: OS.close(w_out); w_out = -1 | |
429 | if w_err != -1: OS.close(w_err); w_err = -1 | |
430 | ||
431 | ## Return control to the context body. Remember not to close its pipes. | |
432 | yield w_in, r_out, r_err | |
433 | w_in = r_out = r_err = -1 | |
434 | ||
435 | ## Collect the child process's exit status. | |
436 | _, st = OS.waitpid(kid, 0) | |
437 | spew("subprocess `%s' %s" % (" ".join(command), wait_outcome(st))) | |
438 | if st: raise SubprocessFailure(" ".join(command), st) | |
439 | ||
440 | ## Tidy up. | |
441 | finally: | |
442 | ||
443 | ## Close any left-over file descriptors. | |
444 | for fd in [r_in, w_in, r_out, w_out, r_err, w_err]: | |
445 | if fd != -1: OS.close(fd) | |
446 | ||
447 | def set_nonblocking(fd): | |
448 | """Mark the descriptor FD as non-blocking.""" | |
449 | FC.fcntl(fd, FC.F_SETFL, FC.fcntl(fd, FC.F_GETFL) | OS.O_NONBLOCK) | |
450 | ||
451 | class DribbleOut (BaseSelector): | |
452 | """A simple selector to feed a string to a descriptor, in pieces.""" | |
453 | def __init__(me, fd, string, *args, **kw): | |
454 | super(DribbleOut, me).__init__(*args, **kw) | |
455 | me._fd = fd | |
456 | me._string = string | |
457 | me._i = 0 | |
458 | set_nonblocking(me._fd) | |
459 | me.result = None | |
460 | def preselect(me, rfds, wfds): | |
461 | if me._fd != -1: wfds.append(me._fd) | |
462 | def postselect_write(me, fd): | |
463 | if fd != me._fd: return | |
464 | try: n = OS.write(me._fd, me._string) | |
465 | except OSError, err: | |
466 | if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: return | |
467 | elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; return | |
468 | else: raise | |
469 | if n == len(me._string): OS.close(me._fd); me._fd = -1 | |
470 | else: me._string = me._string[n:] | |
471 | ||
472 | class DribbleIn (BaseSelector): | |
473 | """A simple selector to collect all the input as a big string.""" | |
474 | def __init__(me, fd, *args, **kw): | |
475 | super(DribbleIn, me).__init__(*args, **kw) | |
476 | me._fd = fd | |
477 | me._buf = StringIO() | |
478 | set_nonblocking(me._fd) | |
479 | def preselect(me, rfds, wfds): | |
480 | if me._fd != -1: rfds.append(me._fd) | |
481 | def postselect_read(me, fd): | |
482 | if fd != me._fd: return | |
483 | while True: | |
484 | try: buf = OS.read(me._fd, 4096) | |
485 | except OSError, err: | |
486 | if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: break | |
487 | else: raise | |
488 | if buf == "": OS.close(me._fd); me._fd = -1; break | |
489 | else: me._buf.write(buf) | |
490 | @property | |
491 | def result(me): return me._buf.getvalue() | |
492 | ||
493 | RETURN = Tag('RETURN') | |
494 | def run_program(command, | |
495 | stdin = INHERIT, stdout = INHERIT, stderr = INHERIT, | |
496 | *args, **kwargs): | |
497 | """ | |
498 | A simplifying wrapper around `subprocess'. | |
499 | ||
500 | The COMMAND is a list of arguments; COMMAND[0] names the program to be | |
501 | invoked, as for `subprocess'. | |
502 | ||
503 | The keyword arguments `stdin', `stdout', and `stderr' explain what to do | |
504 | with the standard file descriptors. | |
505 | ||
506 | * `INHERIT' means that they should be left alone: the child will use a | |
507 | copy of the parent's descriptor. | |
508 | ||
509 | * `DISCARD' means that the descriptor should be re-opened onto | |
510 | `/dev/null' (for reading or writing as appropriate). | |
511 | ||
512 | * `RETURN', for an output descriptor, means that all of the output | |
513 | produced on that descriptor should be collected and returned as a | |
514 | string. | |
515 | ||
516 | * A string, for stdin, means that the string should be provided on the | |
517 | child's standard input. | |
518 | ||
519 | (The value `PIPE' is not permitted here.) | |
520 | ||
521 | Other arguments are passed on to `subprocess'. | |
522 | ||
523 | If no descriptors are marked `RETURN', then the function returns `None'; if | |
524 | exactly one descriptor is so marked, then the function returns that | |
525 | descriptor's output as a string; otherwise, it returns a tuple of strings | |
526 | for each such descriptor, in the usual order. | |
527 | """ | |
528 | kw = dict(); kw.update(kwargs) | |
529 | selfn = [] | |
530 | ||
531 | if isinstance(stdin, basestring): | |
532 | kw['stdin'] = PIPE; selfn.append(lambda fds: DribbleOut(fds[0], stdin)) | |
533 | elif stdin is INHERIT or stdin is DISCARD: | |
534 | kw['stdin'] = stdin | |
535 | else: | |
536 | raise ValueError("bad `stdin' value `%r'" % stdin) | |
537 | ||
538 | if stdout is RETURN: | |
539 | kw['stdout'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[1])) | |
540 | elif stdout is INHERIT or stdout is DISCARD: | |
541 | kw['stdout'] = stdout | |
542 | else: | |
543 | raise ValueError("bad `stdout' value `%r'" % stdout) | |
544 | ||
545 | if stderr is RETURN: | |
546 | kw['stderr'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[2])) | |
547 | elif stderr is INHERIT or stderr is DISCARD: | |
548 | kw['stderr'] = stderr | |
549 | else: | |
550 | raise ValueError("bad `stderr' value `%r'" % stderr) | |
551 | ||
552 | with subprocess(command, *args, **kw) as fds: | |
553 | sel = [fn(fds) for fn in selfn] | |
554 | select_loop(sel) | |
555 | rr = [] | |
556 | for s in sel: | |
557 | r = s.result | |
558 | if r is not None: rr.append(r) | |
559 | if len(rr) == 0: return None | |
560 | if len(rr) == 1: return rr[0] | |
561 | else: return tuple(rr) | |
562 | ||
563 | ###-------------------------------------------------------------------------- | |
564 | ### Other system-ish utilities. | |
565 | ||
566 | @CTX.contextmanager | |
567 | def safewrite(path): | |
568 | """ | |
569 | Context manager for writing to a file. | |
570 | ||
571 | A new file, named `PATH.new', is opened for writing, and the file object | |
572 | provided to the context body. If the body completes normally, the file is | |
573 | closed and renamed to PATH. If the body raises an exception, the file is | |
574 | still closed, but not renamed into place. | |
575 | """ | |
576 | new = path + ".new" | |
577 | with open(new, "w") as f: yield f | |
578 | OS.rename(new, path) | |
579 | ||
580 | @CTX.contextmanager | |
581 | def safewrite_root(path, mode = None, uid = None, gid = None): | |
582 | """ | |
583 | Context manager for writing to a file with root privileges. | |
584 | ||
585 | This is as for `safewrite', but the file is opened and written as root. | |
586 | """ | |
587 | new = path + ".new" | |
588 | with subprocess(C.ROOTLY + ["tee", new], | |
589 | stdin = PIPE, stdout = DISCARD) as (fd_in, _, _): | |
590 | pipe = OS.fdopen(fd_in, 'w') | |
591 | try: yield pipe | |
592 | finally: pipe.close() | |
593 | if mode is not None: run_program(C.ROOTLY + ["chmod", mode, new]) | |
594 | if uid is not None: | |
595 | run_program(C.ROOTLY + ["chown", | |
596 | uid + (gid is not None and ":" + gid or ""), | |
597 | new]) | |
598 | elif gid is not None: | |
599 | run_program(C.ROOTLY + ["chgrp", gid, new]) | |
600 | run_program(C.ROOTLY + ["mv", new, path]) | |
601 | ||
602 | def mountpoint_p(dir): | |
603 | """Return true if DIR is a mountpoint.""" | |
604 | ||
605 | ## A mountpoint can be distinguished because it is a directory whose device | |
606 | ## number differs from its parent. | |
607 | try: st1 = OS.stat(dir) | |
608 | except OSError, err: | |
609 | if err.errno == E.ENOENT: return False | |
610 | else: raise | |
611 | if not ST.S_ISDIR(st1.st_mode): return False | |
612 | st0 = OS.stat(OS.path.join(dir, "..")) | |
613 | return st0.st_dev != st1.st_dev | |
614 | ||
615 | def mkdir_p(dir, mode = 0777): | |
616 | """ | |
617 | Make a directory DIR, and any parents, as necessary. | |
618 | ||
619 | Unlike `OS.makedirs', this doesn't fail if DIR already exists. | |
620 | """ | |
621 | d = "" | |
622 | for p in dir.split("/"): | |
623 | d = OS.path.join(d, p) | |
624 | if d == "": continue | |
625 | try: OS.mkdir(d, mode) | |
626 | except OSError, err: | |
627 | if err.errno == E.EEXIST: pass | |
628 | else: raise | |
629 | ||
630 | def umount(fs): | |
631 | """ | |
632 | Unmount the filesystem FS. | |
633 | ||
634 | The FS may be the block device holding the filesystem, or (more usually) | |
635 | the mount point. | |
636 | """ | |
637 | ||
638 | ## Sometimes random things can prevent unmounting. Be persistent. | |
639 | for i in xrange(5): | |
640 | try: run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD) | |
641 | except SubprocessFailure, err: | |
642 | if err.rc == 32: pass | |
643 | else: raise | |
644 | else: return | |
645 | T.sleep(0.2) | |
646 | run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD) | |
647 | ||
648 | @CTX.contextmanager | |
649 | def lockfile(lock, exclp = True, waitp = True): | |
650 | """ | |
651 | Acquire an exclusive lock on a named file LOCK while executing the body. | |
652 | ||
653 | If WAITP is true, wait until the lock is available; if false, then fail | |
654 | immediately if the lock can't be acquired. | |
655 | """ | |
656 | fd = -1 | |
657 | flag = 0 | |
658 | if exclp: flag |= FC.LOCK_EX | |
659 | else: flag |= FC.LOCK_SH | |
660 | if not waitp: flag |= FC.LOCK_NB | |
661 | spew("acquiring %s lock on `%s'" % | |
662 | (exclp and "exclusive" or "shared", lock)) | |
663 | try: | |
664 | while True: | |
665 | ||
666 | ## Open the file and take note of which file it is. | |
667 | fd = OS.open(lock, OS.O_RDWR | OS.O_CREAT, 0666) | |
668 | st0 = OS.fstat(fd) | |
669 | ||
670 | ## Acquire the lock, waiting if necessary. | |
671 | FC.lockf(fd, flag) | |
672 | ||
673 | ## Check that the lock file is still the same one. It's permissible | |
674 | ## for the lock holder to release the lock by unlinking or renaming the | |
675 | ## lock file, in which case there might be a different lockfile there | |
676 | ## now which we need to acquire instead. | |
677 | ## | |
678 | ## It's tempting to `optimize' this code by opening a new file | |
679 | ## descriptor here so as to elide the additional call to fstat(2) | |
680 | ## above. But this doesn't work: if we successfully acquire the lock, | |
681 | ## we then have two file descriptors open on the lock file, so we have | |
682 | ## to close one -- but, under the daft fcntl(2) rules, even closing | |
683 | ## `nfd' will release the lock immediately. | |
684 | try: | |
685 | st1 = OS.stat(lock) | |
686 | except OSError, err: | |
687 | if err.errno == E.ENOENT: pass | |
688 | else: raise | |
689 | if st0.st_dev == st1.st_dev and st0.st_ino == st1.st_ino: break | |
690 | OS.close(fd) | |
691 | ||
692 | ## We have the lock, so away we go. | |
693 | spew("lock `%s' acquired" % lock) | |
694 | yield None | |
695 | spew("lock `%s' released" % lock) | |
696 | ||
697 | finally: | |
698 | if fd != -1: OS.close(fd) | |
699 | ||
700 | def block_device_p(dev): | |
701 | """Return true if DEV names a block device.""" | |
702 | try: st = OS.stat(dev) | |
703 | except OSError, err: | |
704 | if err.errno == E.ENOENT: return False | |
705 | else: raise | |
706 | else: return ST.S_ISBLK(st.st_mode) | |
707 | ||
708 | ###-------------------------------------------------------------------------- | |
709 | ### Running parallel jobs. | |
710 | ||
711 | ## Return codes from `check' | |
712 | SLEEP = Tag('SLEEP') | |
713 | READY = Tag('READY') | |
714 | FAILED = Tag('FAILED') | |
715 | DONE = Tag('DONE') | |
716 | ||
717 | class BaseJob (object): | |
718 | """ | |
719 | Base class for jobs. | |
720 | ||
721 | Subclasses must implement `run' and `_mkname', and probably ought to extend | |
722 | `prepare' and `check'. | |
723 | """ | |
724 | ||
725 | ## A magic token to prevent sneaky uninterned jobs. | |
726 | _MAGIC = Tag('MAGIC') | |
727 | ||
728 | ## A map from job names to objects. | |
729 | _MAP = {} | |
730 | ||
731 | ## Number of tail lines of the log to print on failure. | |
732 | LOGLINES = 20 | |
733 | ||
734 | def __init__(me, _token, *args, **kw): | |
735 | """ | |
736 | Initialize a job. | |
737 | ||
738 | Jobs are interned! Don't construct instances (of subclasses) directly: | |
739 | use the `ensure' class method. | |
740 | """ | |
741 | assert _token is me._MAGIC | |
742 | super(BaseJob, me).__init__(*args, **kw) | |
743 | ||
744 | ## Dependencies on other jobs. | |
745 | me._deps = None | |
746 | me._waiting = set() | |
747 | ||
748 | ## Attributes maintained by the JobServer. | |
749 | me.done = False | |
750 | me.started = False | |
751 | me.win = None | |
752 | me._token = None | |
753 | me._known = False | |
754 | me._st = None | |
755 | me._logkid = -1 | |
756 | me._logfile = None | |
757 | ||
758 | def prepare(me): | |
759 | """ | |
760 | Establish any prerequisite jobs. | |
761 | ||
762 | Delaying this allows command-line settings to override those chosen by | |
763 | dependent jobs. | |
764 | """ | |
765 | pass | |
766 | ||
767 | @classmethod | |
768 | def ensure(cls, *args, **kw): | |
769 | """ | |
770 | Return the unique job with the given parameters. | |
771 | ||
772 | If a matching job already exists, then return it. Otherwise, create the | |
773 | new job, register it in the table, and notify the scheduler about it. | |
774 | """ | |
775 | me = cls(_token = cls._MAGIC, *args, **kw) | |
776 | try: | |
777 | job = cls._MAP[me.name] | |
778 | except KeyError: | |
779 | cls._MAP[me.name] = me | |
780 | SCHED.add(me) | |
781 | return me | |
782 | else: | |
783 | return job | |
784 | ||
785 | ## Naming. | |
786 | @property | |
787 | def name(me): | |
788 | """Return the job's name, as calculated by `_mkname'.""" | |
789 | try: name = me._name | |
790 | except AttributeError: name = me._name = me._mkname() | |
791 | return name | |
792 | ||
793 | ## Subclass responsibilities. | |
794 | def _mkname(me): | |
795 | """ | |
796 | Return the job's name. | |
797 | ||
798 | By default, this is an unhelpful string which is distinct for every job. | |
799 | Subclasses should normally override this method to return a name as an | |
800 | injective function of the job parameters. | |
801 | """ | |
802 | return "%s.%x" % (me.__class__.__name__, id(me)) | |
803 | ||
804 | def check(me): | |
805 | """ | |
806 | Return whether the job is ready to run. | |
807 | ||
808 | Returns a pair STATE, REASON. The REASON is a human-readable string | |
809 | explaining what's going on, or `None' if it's not worth explaining. The | |
810 | STATE is one of the following. | |
811 | ||
812 | * `READY' -- the job can be run at any time. | |
813 | ||
814 | * `FAILED' -- the job can't be started. Usually, this means that some | |
815 | prerequisite job failed, there was some error in the job's | |
816 | parameters, or the environment is unsuitable for the job to run. | |
817 | ||
818 | * `DONE' -- the job has nothing to do. Usually, this means that the | |
819 | thing the job acts on is already up-to-date. It's bad form to do | |
820 | even minor work in `check'. | |
821 | ||
822 | * `SLEEP' -- the job can't be run right now. It has arranged to be | |
823 | retried if conditions change. (Spurious wakeups are permitted and | |
824 | must be handled correctly.) | |
825 | ||
826 | The default behaviour checks the set of dependencies, as built by the | |
827 | `await' method, and returns `SLEEP' or `FAILED' as appropriate, or | |
828 | `READY' if all the prerequisite jobs have completed successfully. | |
829 | """ | |
830 | for job in me._deps: | |
831 | if not job.done: | |
832 | job._waiting.add(me) | |
833 | return SLEEP, "waiting for job `%s'" % job.name | |
834 | elif not job.win and not OPT.ignerr: | |
835 | return FAILED, "dependent on failed job `%s'" % job.name | |
836 | return READY, None | |
837 | ||
838 | ## Subclass utilities. | |
839 | def await(me, job): | |
840 | """Make sure that JOB completes before allowing this job to start.""" | |
841 | me._deps.add(job) | |
842 | ||
843 | def _logtail(me): | |
844 | """ | |
845 | Dump the last `LOGLINES' lines of the logfile. | |
846 | ||
847 | This is called if the job fails and was being run quietly, to provide the | |
848 | user with some context for the failure. | |
849 | """ | |
850 | ||
851 | ## Gather blocks from the end of the log until we have enough lines. | |
852 | with open(me._logfile, 'r') as f: | |
853 | nlines = 0 | |
854 | bufs = [] | |
855 | bufsz = 4096 | |
856 | f.seek(0, 2); off = f.tell() | |
857 | spew("start: off = %d" % off) | |
858 | while nlines <= me.LOGLINES and off > 0: | |
859 | off = max(0, off - bufsz) | |
860 | f.seek(off, 0) | |
861 | spew("try at off = %d" % off) | |
862 | buf = f.read(bufsz) | |
863 | nlines += buf.count("\n") | |
864 | spew("now lines = %d" % nlines) | |
865 | bufs.append(buf) | |
866 | buf = ''.join(reversed(bufs)) | |
867 | ||
868 | ## We probably overshot. Skip the extra lines from the start. | |
869 | i = 0 | |
870 | while nlines > me.LOGLINES: i = buf.index("\n", i) + 1; nlines -= 1 | |
871 | ||
872 | ## If we ended up trimming the log, print an ellipsis. | |
873 | if off > 0 or i > 0: print "%-*s * [...]" % (TAGWD, me.name) | |
874 | ||
875 | ## Print the log tail. | |
876 | lines = buf[i:].split("\n") | |
877 | if lines and lines[-1] == '': lines.pop() | |
878 | for line in lines: print "%-*s %s" % (TAGWD, me.name, line) | |
879 | ||
880 | class BaseJobToken (object): | |
881 | """ | |
882 | A job token is the authorization for a job to be run. | |
883 | ||
884 | Subclasses must implement `recycle' to allow some other job to use the | |
885 | token. | |
886 | """ | |
887 | pass | |
888 | ||
889 | class TrivialJobToken (BaseJobToken): | |
890 | """ | |
891 | A trivial reusable token, for when issuing jobs in parallel without limit. | |
892 | ||
893 | There only needs to be one of these. | |
894 | """ | |
895 | def recycle(me): | |
896 | spew("no token needed; nothing to recycle") | |
897 | TRIVIAL_TOKEN = TrivialJobToken() | |
898 | ||
899 | class JobServerToken (BaseJobToken): | |
900 | """A job token storing a byte from the jobserver pipe.""" | |
901 | def __init__(me, char, pipefd, *args, **kw): | |
902 | super(JobServerToken, me).__init__(*args, **kw) | |
903 | me._char = char | |
904 | me._fd = pipefd | |
905 | def recycle(me): | |
906 | spew("returning token to jobserver pipe") | |
907 | OS.write(me._fd, me._char) | |
908 | ||
909 | class PrivateJobToken (BaseJobToken): | |
910 | """ | |
911 | The private job token belonging to a scheduler. | |
912 | ||
913 | When running under a GNU Make jobserver, there is a token for each byte in | |
914 | the pipe, and an additional one which represents the slot we're actually | |
915 | running in. This class represents that additional token. | |
916 | """ | |
917 | def __init__(me, sched, *args, **kw): | |
918 | super(PrivateJobToken, me).__init__(*args, **kw) | |
919 | me._sched = sched | |
920 | def recycle(me): | |
921 | assert me._sched._privtoken is None | |
922 | spew("recycling private token") | |
923 | me._sched._privtoken = me | |
924 | ||
925 | TAGWD = 29 | |
926 | LOGKEEP = 20 | |
927 | ||
928 | class JobScheduler (object): | |
929 | """ | |
930 | The main machinery for running and ordering jobs. | |
931 | ||
932 | This handles all of the details of job scheduling. | |
933 | """ | |
934 | ||
935 | def __init__(me, rfd = -1, wfd = -1, npar = 1): | |
936 | """ | |
937 | Initialize a scheduler. | |
938 | ||
939 | * RFD and WFD are the read and write ends of the jobserver pipe, as | |
940 | determined from the `MAKEFLAGS' environment variable, or -1. | |
941 | ||
942 | * NPAR is the maximum number of jobs to run in parallel, or `True' if | |
943 | there is no maximum (i.e., we're in `forkbomb' mode). | |
944 | """ | |
945 | ||
946 | ## Set the parallelism state. The `_rfd' and `_wfd' are the read and | |
947 | ## write ends of the jobserver pipe, or -1 if there is no jobserver. | |
948 | ## `_par' is true if we're meant to run jobs in parallel. The case _par | |
949 | ## and _rfd = -1 means unconstrained parallelism. | |
950 | ## | |
951 | ## The jobserver pipe contains a byte for each shared job slot. A | |
952 | ## scheduler reads a byte from the pipe for each job it wants to run | |
953 | ## (nearly -- see `_privtoken' below), and puts the byte back when the | |
954 | ## job finishes. The GNU Make jobserver protocol specification insists | |
955 | ## that we preserve the value of the byte in the pipe (though doesn't | |
956 | ## currently make any use of this flexibility), so we record it in a | |
957 | ## `JobToken' object's `_char' attribute. | |
958 | me._par = rfd != -1 or npar is True or npar != 1 | |
959 | spew("par is %r" % me._par) | |
960 | if rfd == -1 and npar > 1: | |
961 | rfd, wfd = OS.pipe() | |
962 | OS.write(wfd, (npar - 1)*'+') | |
963 | OS.environ["MAKEFLAGS"] = \ | |
964 | (" -j --jobserver-auth=%(rfd)d,%(wfd)d " + | |
965 | "--jobserver-fds=%(rfd)d,%(wfd)d") % dict(rfd = rfd, wfd = wfd) | |
966 | me._rfd = rfd; me._wfd = wfd | |
967 | ||
968 | ## The scheduler state. A job starts in the `_check' list. Each | |
969 | ## iteration of the scheduler loop will inspect the jobs here and see | |
970 | ## whether it's ready to run: if not, it gets put in the `_sleep' list, | |
971 | ## where it will languish until something moves it back; if it is ready, | |
972 | ## it gets moved to the `_ready' list to wait for a token from the | |
973 | ## jobserver. At that point the job can be started, and it moves to the | |
974 | ## `_kidmap', which associates a process-id with each running job. | |
975 | ## Finally, jobs which have completed are simply forgotten. The `_njobs' | |
976 | ## counter keeps track of how many jobs are outstanding, so that we can | |
977 | ## stop when there are none left. | |
978 | me._check = set() | |
979 | me._sleep = set() | |
980 | me._ready = set() | |
981 | me._kidmap = {} | |
982 | me._logkidmap = {} | |
983 | me._njobs = 0 | |
984 | ||
985 | ## As well as the jobserver pipe, we implicitly have one extra job slot, | |
986 | ## which is the one we took when we were started by our parent. The | |
987 | ## right to do processing in this slot is represnted by the `private | |
988 | ## token' here, distinguished from tokens from the jobserver pipe by | |
989 | ## having `None' as its `_char' value. | |
990 | me._privtoken = PrivateJobToken(me) | |
991 | ||
992 | def add(me, job): | |
993 | """Notice a new job and arrange for it to (try to) run.""" | |
994 | if job._known: return | |
995 | spew("adding new job `%s'" % job.name) | |
996 | job._known = True | |
997 | me._check.add(job) | |
998 | me._njobs += 1 | |
999 | ||
1000 | def close_jobserver(me): | |
1001 | """ | |
1002 | Close the jobserver file descriptors. | |
1003 | ||
1004 | This should be called within child processes to prevent them from messing | |
1005 | with the jobserver. | |
1006 | """ | |
1007 | if me._rfd != -1: OS.close(me._rfd); me._rfd = -1 | |
1008 | if me._wfd != -1: OS.close(me._wfd); me._wfd = -1 | |
1009 | try: del OS.environ["MAKEFLAGS"] | |
1010 | except KeyError: pass | |
1011 | ||
1012 | def _killall(me): | |
1013 | """Zap all jobs which aren't yet running.""" | |
1014 | for jobset in [me._sleep, me._check, me._ready]: | |
1015 | while jobset: | |
1016 | job = jobset.pop() | |
1017 | job.done = True | |
1018 | job.win = False | |
1019 | me._njobs -= 1 | |
1020 | ||
1021 | def _retire(me, job, win, outcome): | |
1022 | """ | |
1023 | Declare that a job has stopped, and deal with the consequences. | |
1024 | ||
1025 | JOB is the completed job, which should not be on any of the job queues. | |
1026 | WIN is true if the job succeeded, and false otherwise. OUTCOME is a | |
1027 | human-readable string explaining how the job came to its end, or `None' | |
1028 | if no message should be reported. | |
1029 | """ | |
1030 | ||
1031 | global RC | |
1032 | ||
1033 | ## Return the job's token to the pool. | |
1034 | if job._token is not None: job._token.recycle() | |
1035 | job._token = None | |
1036 | me._njobs -= 1 | |
1037 | ||
1038 | ## Update and maybe report the job's status. | |
1039 | job.done = True | |
1040 | job.win = win | |
1041 | if outcome is not None and not OPT.silent: | |
1042 | if OPT.quiet and not job.win and job._logfile: job._logtail() | |
1043 | if not job.win or not OPT.quiet: | |
1044 | print "%-*s %c (%s)" % \ | |
1045 | (TAGWD, job.name, job.win and '|' or '*', outcome) | |
1046 | ||
1047 | ## If the job failed, and we care, arrange to exit nonzero. | |
1048 | if not win and not OPT.ignerr: RC = 2 | |
1049 | ||
1050 | ## If the job failed, and we're supposed to give up after the first | |
1051 | ## error, then zap all of the waiting jobs. | |
1052 | if not job.win and not OPT.keepon and not OPT.ignerr: me._killall() | |
1053 | ||
1054 | ## If this job has dependents then wake them up and see whether they're | |
1055 | ## ready to run. | |
1056 | for j in job._waiting: | |
1057 | try: me._sleep.remove(j) | |
1058 | except KeyError: pass | |
1059 | else: | |
1060 | spew("waking dependent job `%s'" % j.name) | |
1061 | me._check.add(j) | |
1062 | ||
1063 | def _reap(me, kid, st): | |
1064 | """ | |
1065 | Deal with the child with process-id KID having exited with status ST. | |
1066 | """ | |
1067 | ||
1068 | ## Figure out what kind of child this is. Note that it has finished. | |
1069 | try: job = me._kidmap[kid] | |
1070 | except KeyError: | |
1071 | try: job = me._logkidmap[kid] | |
1072 | except KeyError: | |
1073 | spew("unknown child %d exits with status 0x%04x" % (kid, st)) | |
1074 | return | |
1075 | else: | |
1076 | ## It's a logging child. | |
1077 | del me._logkidmap[kid] | |
1078 | job._logkid = DONE | |
1079 | spew("logging process for job `%s' exits with status 0x%04x" % | |
1080 | (job.name, st)) | |
1081 | else: | |
1082 | job._st = st | |
1083 | del me._kidmap[kid] | |
1084 | spew("main process for job `%s' exits with status 0x%04x" % | |
1085 | (job.name, st)) | |
1086 | ||
1087 | ## If either of the job's associated processes is still running then we | |
1088 | ## should stop now and give the other one a chance. | |
1089 | if job._st is None or job._logkid is not DONE: | |
1090 | spew("deferring retirement for job `%s'" % job.name) | |
1091 | return | |
1092 | spew("completing deferred retirement for job `%s'" % job.name) | |
1093 | ||
1094 | ## Update and (maybe) report the job status. | |
1095 | if job._st == 0: win = True; outcome = None | |
1096 | else: win = False; outcome = wait_outcome(job._st) | |
1097 | ||
1098 | ## Retire the job. | |
1099 | me._retire(job, win, outcome) | |
1100 | ||
1101 | def _reapkids(me): | |
1102 | """Reap all finished child processes.""" | |
1103 | while True: | |
1104 | try: kid, st = OS.waitpid(-1, OS.WNOHANG) | |
1105 | except OSError, err: | |
1106 | if err.errno == E.ECHILD: break | |
1107 | else: raise | |
1108 | if kid == 0: break | |
1109 | me._reap(kid, st) | |
1110 | ||
1111 | def run_job(me, job): | |
1112 | """Start running the JOB.""" | |
1113 | ||
1114 | job.started = True | |
1115 | if OPT.dryrun: return None, None | |
1116 | ||
1117 | ## Make pipes to collect the job's output and error reports. | |
1118 | r_out, w_out = OS.pipe() | |
1119 | r_err, w_err = OS.pipe() | |
1120 | ||
1121 | ## Find a log file to write. Avoid races over the log names; but this | |
1122 | ## means that the log descriptor needs to be handled somewhat carefully. | |
1123 | logdir = OS.path.join(C.STATE, "log"); mkdir_p(logdir) | |
1124 | logseq = 1 | |
1125 | while True: | |
1126 | logfile = OS.path.join(logdir, "%s-%s#%d" % (job.name, TODAY, logseq)) | |
1127 | try: | |
1128 | logfd = OS.open(logfile, OS.O_WRONLY | OS.O_CREAT | OS.O_EXCL, 0666) | |
1129 | except OSError, err: | |
1130 | if err.errno == E.EEXIST: logseq += 1; continue | |
1131 | else: raise | |
1132 | else: | |
1133 | break | |
1134 | job._logfile = logfile | |
1135 | ||
1136 | ## Make sure there's no pending output, or we might get two copies. (I | |
1137 | ## don't know how to flush all output streams in Python, but this is good | |
1138 | ## enough for our purposes.) | |
1139 | SYS.stdout.flush() | |
1140 | ||
1141 | ## Set up the logging child first. If we can't, take down the whole job. | |
1142 | try: job._logkid = OS.fork() | |
1143 | except OSError, err: OS.close(logfd); return None, err | |
1144 | if not job._logkid: | |
1145 | ## The main logging loop. | |
1146 | ||
1147 | ## Close the jobserver descriptors, and the write ends of the pipes. | |
1148 | me.close_jobserver() | |
1149 | OS.close(w_out); OS.close(w_err) | |
1150 | ||
1151 | ## Capture the job's stdout and stderr and wait for everything to | |
1152 | ## happen. | |
1153 | def log_lines(fd, marker): | |
1154 | def fn(line): | |
1155 | if not OPT.quiet: | |
1156 | OS.write(1, "%-*s %s %s\n" % (TAGWD, job.name, marker, line)) | |
1157 | OS.write(logfd, "%s %s\n" % (marker, line)) | |
1158 | return ReadLinesSelector(fd, fn) | |
1159 | select_loop([log_lines(r_out, "|"), log_lines(r_err, "*")]) | |
1160 | ||
1161 | ## We're done. (Closing the descriptors here would be like polishing | |
1162 | ## the floors before the building is demolished.) | |
1163 | OS._exit(0) | |
1164 | ||
1165 | ## Back in the main process: record the logging child. At this point we | |
1166 | ## no longer need the logfile descriptor. | |
1167 | me._logkidmap[job._logkid] = job | |
1168 | OS.close(logfd) | |
1169 | ||
1170 | ## Start the main job process. | |
1171 | try: kid = OS.fork() | |
1172 | except OSError, err: return None, err | |
1173 | if not kid: | |
1174 | ## The main job. | |
1175 | ||
1176 | ## Close the read ends of the pipes, and move the write ends to the | |
1177 | ## right places. (This will go wrong if we were started without enough | |
1178 | ## descriptors. Fingers crossed.) | |
1179 | OS.dup2(w_out, 1); OS.dup2(w_err, 2) | |
1180 | OS.close(r_out); OS.close(w_out) | |
1181 | OS.close(r_err); OS.close(w_err) | |
1182 | spew("running job `%s' as pid %d" % (job.name, OS.getpid())) | |
1183 | ||
1184 | ## Run the job, catching nonlocal flow. | |
1185 | try: | |
1186 | job.run() | |
1187 | except ExpectedError, err: | |
1188 | moan(str(err)) | |
1189 | OS._exit(2) | |
1190 | except Exception, err: | |
1191 | TB.print_exc(SYS.stderr) | |
1192 | OS._exit(3) | |
1193 | except BaseException, err: | |
1194 | moan("caught unexpected exception: %r" % err) | |
1195 | OS._exit(112) | |
1196 | else: | |
1197 | spew("job `%s' ran to completion" % job.name) | |
1198 | ||
1199 | ## Clean up old logs. | |
1200 | match = [] | |
1201 | pat = RX.compile(r"^%s-(\d{4})-(\d{2})-(\d{2})\#(\d+)$" % | |
1202 | RX.escape(job.name)) | |
1203 | for f in OS.listdir(logdir): | |
1204 | m = pat.match(f) | |
1205 | if m: match.append((f, int(m.group(1)), int(m.group(2)), | |
1206 | int(m.group(3)), int(m.group(4)))) | |
1207 | match.sort(key = lambda (_, y, m, d, q): (y, m, d, q)) | |
1208 | if len(match) > LOGKEEP: | |
1209 | for (f, _, _, _, _) in match[:-LOGKEEP]: | |
1210 | try: OS.unlink(OS.path.join(logdir, f)) | |
1211 | except OSError, err: | |
1212 | if err.errno == E.ENOENT: pass | |
1213 | else: raise | |
1214 | ||
1215 | ## All done. | |
1216 | OS._exit(0) | |
1217 | ||
1218 | ## Back in the main process: close both the pipes and return the child | |
1219 | ## process. | |
1220 | OS.close(r_out); OS.close(w_out) | |
1221 | OS.close(r_err); OS.close(w_err) | |
1222 | if OPT.quiet: print "%-*s | (started)" % (TAGWD, job.name) | |
1223 | return kid, None | |
1224 | ||
1225 | def run(me): | |
1226 | """Run the scheduler.""" | |
1227 | ||
1228 | spew("JobScheduler starts") | |
1229 | ||
1230 | while True: | |
1231 | ## The main scheduler loop. We go through three main phases: | |
1232 | ## | |
1233 | ## * Inspect the jobs in the `check' list to see whether they can | |
1234 | ## run. After this, the `check' list will be empty. | |
1235 | ## | |
1236 | ## * If there are running jobs, check to see whether any of them have | |
1237 | ## stopped, and deal with the results. Also, if there are jobs | |
1238 | ## ready to start and a job token has become available, then | |
1239 | ## retrieve the token. (Doing these at the same time is the tricky | |
1240 | ## part.) | |
1241 | ## | |
1242 | ## * If there is a job ready to run, and we retrieved a token, then | |
1243 | ## start running the job. | |
1244 | ||
1245 | ## Check the pending jobs to see if they can make progress: run each | |
1246 | ## job's `check' method and move it to the appropriate queue. (It's OK | |
1247 | ## if `check' methods add more jobs to the list, as long as things | |
1248 | ## settle down eventually.) | |
1249 | while True: | |
1250 | try: job = me._check.pop() | |
1251 | except KeyError: break | |
1252 | if job._deps is None: | |
1253 | job._deps = set() | |
1254 | job.prepare() | |
1255 | state, reason = job.check() | |
1256 | tail = reason is not None and ": %s" % reason or "" | |
1257 | if state == READY: | |
1258 | spew("job `%s' ready to run%s" % (job.name, tail)) | |
1259 | me._ready.add(job) | |
1260 | elif state is FAILED: | |
1261 | spew("job `%s' refused to run%s" % (job.name, tail)) | |
1262 | me._retire(job, False, "refused to run%s" % tail) | |
1263 | elif state is DONE: | |
1264 | spew("job `%s' has nothing to do%s" % (job.name, tail)) | |
1265 | me._retire(job, True, reason) | |
1266 | elif state is SLEEP: | |
1267 | spew("job `%s' can't run yet%s" % (job.name, tail)) | |
1268 | me._sleep.add(job) | |
1269 | else: | |
1270 | raise ValueError("unexpected job check from `%s': %r, %r" % | |
1271 | (job.name, state, reason)) | |
1272 | ||
1273 | ## If there are no jobs left, then we're done. | |
1274 | if not me._njobs: | |
1275 | spew("all jobs completed") | |
1276 | break | |
1277 | ||
1278 | ## Make sure we can make progress. There are no jobs on the check list | |
1279 | ## any more, because we just cleared it. We assume that jobs which are | |
1280 | ## ready to run will eventually receive a token. So we only end up in | |
1281 | ## trouble if there are jobs asleep, but none running or ready to run. | |
1282 | ##spew("#jobs = %d" % me._njobs) | |
1283 | ##spew("sleeping: %s" % ", ".join([j.name for j in me._sleep])) | |
1284 | ##spew("ready: %s" % ", ".join([j.name for j in me._ready])) | |
1285 | ##spew("running: %s" % ", ".join([j.name for j in me._kidmap.itervalues()])) | |
1286 | assert not me._sleep or me._kidmap or me._logkidmap or me._ready | |
1287 | ||
1288 | ## Wait for something to happen. | |
1289 | if not me._ready or (not me._par and me._privtoken is None): | |
1290 | ## If we have no jobs ready to run, then we must wait for an existing | |
1291 | ## child to exit. Hopefully, a sleeping job will be able to make | |
1292 | ## progress after this. | |
1293 | ## | |
1294 | ## Alternatively, if we're not supposed to be running jobs in | |
1295 | ## parallel and we don't have the private token, then we have no | |
1296 | ## choice but to wait for the running job to complete. | |
1297 | ## | |
1298 | ## There's no check here for `ECHILD'. We really shouldn't be here | |
1299 | ## if there are no children to wait for. (The check list must be | |
1300 | ## empty because we just drained it. If the ready list is empty, | |
1301 | ## then all of the jobs must be running or sleeping; but the | |
1302 | ## assertion above means that either there are no jobs at all, in | |
1303 | ## which case we should have stopped, or at least one is running, in | |
1304 | ## which case it's safe to wait for it. The other case is that we're | |
1305 | ## running jobs sequentially, and one is currently running, so | |
1306 | ## there's nothing for it but to wait for it -- and hope that it will | |
1307 | ## wake up one of the sleeping jobs. The remaining possibility is | |
1308 | ## that we've miscounted somewhere, which will cause a crash.) | |
1309 | if not me._ready: | |
1310 | spew("no new jobs ready: waiting for outstanding jobs to complete") | |
1311 | else: | |
1312 | spew("job running without parallelism: waiting for it to finish") | |
1313 | kid, st = OS.waitpid(-1, 0) | |
1314 | me._reap(kid, st) | |
1315 | me._reapkids() | |
1316 | continue | |
1317 | ||
1318 | ## We have jobs ready to run, so try to acquire a token. | |
1319 | if me._rfd == -1 and me._par: | |
1320 | ## We're running with unlimited parallelism, so we don't need a token | |
1321 | ## to run a job. | |
1322 | spew("running new job without token") | |
1323 | token = TRIVIAL_TOKEN | |
1324 | elif me._privtoken: | |
1325 | ## Our private token is available, so we can use that to start | |
1326 | ## a new job. | |
1327 | spew("private token available: assigning to new job") | |
1328 | token = me._privtoken | |
1329 | me._privtoken = None | |
1330 | else: | |
1331 | ## We have to read from the jobserver pipe. Unfortunately, we're not | |
1332 | ## allowed to set the pipe nonblocking, because make is also using it | |
1333 | ## and will get into a serious mess. And we must deal with `SIGCHLD' | |
1334 | ## arriving at any moment. We use the same approach as GNU Make. We | |
1335 | ## start by making a copy of the jobserver descriptor: it's this | |
1336 | ## descriptor we actually try to read from. We set a signal handler | |
1337 | ## to close this descriptor if a child exits. And we try one last | |
1338 | ## time to reap any children which have exited just before we try | |
1339 | ## reading the jobserver pipe. This way we're covered: | |
1340 | ## | |
1341 | ## * If a child exits during the main loop, before we establish the | |
1342 | ## descriptor copy then we'll notice when we try reaping | |
1343 | ## children. | |
1344 | ## | |
1345 | ## * If a child exits between the last-chance reap and the read, | |
1346 | ## the signal handler will close the descriptor and the `read' | |
1347 | ## call will fail with `EBADF'. | |
1348 | ## | |
1349 | ## * If a child exits while we're inside the `read' system call, | |
1350 | ## then the syscall will fail with `EINTR'. | |
1351 | ## | |
1352 | ## The only problem is that we can't do this from Python, because | |
1353 | ## Python signal handlers are delayed. This is what the `jobclient' | |
1354 | ## module is for. | |
1355 | ## | |
1356 | ## The `jobclient' function is called as | |
1357 | ## | |
1358 | ## jobclient(FD) | |
1359 | ## | |
1360 | ## It returns a tuple of three values: TOKEN, PID, STATUS. If TOKEN | |
1361 | ## is not `None', then reading the pipe succeeded; if TOKEN is empty, | |
1362 | ## then the pipe returned EOF, so we should abort; otherwise, TOKEN | |
1363 | ## is a singleton string holding the token character. If PID is not | |
1364 | ## `None', then PID is the process id of a child which exited, and | |
1365 | ## STATUS is its exit status. | |
1366 | spew("waiting for token from jobserver") | |
1367 | tokch, kid, st = JC.jobclient(me._rfd) | |
1368 | ||
1369 | if kid is not None: | |
1370 | me._reap(kid, st) | |
1371 | me._reapkids() | |
1372 | if tokch is None: | |
1373 | spew("no token; trying again") | |
1374 | continue | |
1375 | elif token == '': | |
1376 | error("jobserver pipe closed; giving up") | |
1377 | me._killall() | |
1378 | continue | |
1379 | spew("received token from jobserver") | |
1380 | token = JobServerToken(tokch, me._wfd) | |
1381 | ||
1382 | ## We have a token, so we should start up the job. | |
1383 | job = me._ready.pop() | |
1384 | job._token = token | |
1385 | spew("start new job `%s'" % job.name) | |
1386 | kid, err = me.run_job(job) | |
1387 | if err is not None: | |
1388 | me._retire(job, False, "failed to fork: %s" % err) | |
1389 | continue | |
1390 | if kid is None: me._retire(job, True, "dry run") | |
1391 | else: me._kidmap[kid] = job | |
1392 | ||
1393 | ## We ran out of work to do. | |
1394 | spew("JobScheduler done") | |
1395 | ||
1396 | ###-------------------------------------------------------------------------- | |
1397 | ### Configuration. | |
1398 | ||
1399 | R_CONFIG = RX.compile(r"^([a-zA-Z0-9_]+)='(.*)'$") | |
1400 | ||
1401 | class Config (object): | |
1402 | ||
1403 | def _conv_str(s): return s | |
1404 | def _conv_list(s): return s.split() | |
1405 | def _conv_set(s): return set(s.split()) | |
1406 | ||
1407 | _CONVERT = { | |
1408 | "ROOTLY": _conv_list, | |
1409 | "DISTS": _conv_set, | |
1410 | "MYARCH": _conv_set, | |
1411 | "NATIVE_ARCHS": _conv_set, | |
1412 | "FOREIGN_ARCHS": _conv_set, | |
1413 | "FOREIGN_GNUARCHS": _conv_list, | |
1414 | "ALL_ARCHS": _conv_set, | |
1415 | "NATIVE_CHROOTS": _conv_set, | |
1416 | "FOREIGN_CHROOTS": _conv_set, | |
1417 | "ALL_CHROOTS": _conv_set, | |
1418 | "BASE_PACKAGES": _conv_list, | |
1419 | "EXTRA_PACKAGES": _conv_list, | |
1420 | "CROSS_PACKAGES": _conv_list, | |
1421 | "CROSS_PATHS": _conv_list, | |
1422 | "APTCONF": _conv_list, | |
1423 | "LOCALPKGS": _conv_list, | |
1424 | "SCHROOT_COPYFILES": _conv_list, | |
1425 | "SCHROOT_NSSDATABASES": _conv_list | |
1426 | } | |
1427 | ||
1428 | _CONV_MAP = { | |
1429 | "*_APTCONFSRC": ("APTCONFSRC", _conv_str), | |
1430 | "*_DEPS": ("PKGDEPS", _conv_list), | |
1431 | "*_QEMUHOST": ("QEMUHOST", _conv_str), | |
1432 | "*_QEMUARCH": ("QEMUARCH", _conv_str), | |
1433 | "*_ALIASES": ("DISTALIAS", _conv_str) | |
1434 | } | |
1435 | ||
1436 | _conv_str = staticmethod(_conv_str) | |
1437 | _conv_list = staticmethod(_conv_list) | |
1438 | _conv_set = staticmethod(_conv_set) | |
1439 | ||
1440 | def __init__(me): | |
1441 | raw = r""" | |
1442 | """; raw = open('state/config.sh').read(); _ignore = """ @@@config@@@ | |
1443 | """ | |
1444 | me._conf = {} | |
1445 | for line in raw.split("\n"): | |
1446 | line = line.strip() | |
1447 | if not line or line.startswith('#'): continue | |
1448 | m = R_CONFIG.match(line) | |
1449 | if not m: raise ExpectedError("bad config line `%s'" % line) | |
1450 | k, v = m.group(1), m.group(2).replace("'\\''", "'") | |
1451 | d = me._conf | |
1452 | try: conv = me._CONVERT[k] | |
1453 | except KeyError: | |
1454 | i = 0 | |
1455 | while True: | |
1456 | try: i = k.index("_", i + 1) | |
1457 | except ValueError: conv = me._conv_str; break | |
1458 | try: map, conv = me._CONV_MAP["*" + k[i:]] | |
1459 | except KeyError: pass | |
1460 | else: | |
1461 | d = me._conf.setdefault(map, dict()) | |
1462 | k = k[:i] | |
1463 | if k.startswith("_"): k = k[1:] | |
1464 | break | |
1465 | d[k] = conv(v) | |
1466 | ||
1467 | def __getattr__(me, attr): | |
1468 | try: return me._conf[attr] | |
1469 | except KeyError, err: raise AttributeError(err.args[0]) | |
1470 | ||
1471 | with toplevel_handler(): C = Config() | |
1472 | ||
1473 | ###-------------------------------------------------------------------------- | |
1474 | ### Chroot maintenance utilities. | |
1475 | ||
1476 | CREATE = Tag("CREATE") | |
1477 | FORCE = Tag("FORCE") | |
1478 | ||
1479 | def check_fresh(fresh, update): | |
1480 | """ | |
1481 | Compare a refresh mode FRESH against an UPDATE time. | |
1482 | ||
1483 | Return a (STATUS, REASON) pair, suitable for returning from a job `check' | |
1484 | method. | |
1485 | ||
1486 | The FRESH argument may be one of the following: | |
1487 | ||
1488 | * `CREATE' is satisfied if the thing exists at all: it returns `READY' if | |
1489 | the thing doesn't yet exist (UPDATE is `None'), or `DONE' otherwise. | |
1490 | ||
1491 | * `FORCE' is never satisfied: it always returns `READY'. | |
1492 | ||
1493 | * an integer N is satisfied if UPDATE time is at most N seconds earlier | |
1494 | than the present: if returns `READY' if the UPDATE is too old, or | |
1495 | `DONE' otherwise. | |
1496 | """ | |
1497 | if update is None: return READY, "must create" | |
1498 | elif fresh is FORCE: return READY, "update forced" | |
1499 | elif fresh is CREATE: return DONE, "already created" | |
1500 | elif NOW - unzulu(update) > fresh: return READY, "too stale: updating" | |
1501 | else: return DONE, "already sufficiently up-to-date" | |
1502 | ||
1503 | def lockfile_path(file): | |
1504 | """ | |
1505 | Return the full path for a lockfile named FILE. | |
1506 | ||
1507 | Create the lock directory if necessary. | |
1508 | """ | |
1509 | lockdir = OS.path.join(C.STATE, "lock"); mkdir_p(lockdir) | |
1510 | return OS.path.join(lockdir, file) | |
1511 | ||
1512 | def chroot_src_lockfile(dist, arch): | |
1513 | """ | |
1514 | Return the lockfile for the source-chroot for DIST on ARCH. | |
1515 | ||
1516 | It is not allowed to acquire a source-chroot lock while holding any other | |
1517 | locks. | |
1518 | """ | |
1519 | return lockfile_path("source.%s-%s" % (dist, arch)) | |
1520 | ||
1521 | def chroot_src_lv(dist, arch): | |
1522 | """ | |
1523 | Return the logical volume name for the source-chroot for DIST on ARCH. | |
1524 | """ | |
1525 | return "%s%s-%s" % (C.LVPREFIX, dist, arch) | |
1526 | ||
1527 | def chroot_src_blkdev(dist, arch): | |
1528 | """ | |
1529 | Return the block-device name for the source-chroot for DIST on ARCH. | |
1530 | """ | |
1531 | return OS.path.join("/dev", C.VG, chroot_src_lv(dist, arch)) | |
1532 | ||
1533 | def chroot_src_mntpt(dist, arch): | |
1534 | """ | |
1535 | Return mountpoint path for setting up the source-chroot for DIST on ARCH. | |
1536 | ||
1537 | Note that this is not the mountpoint that schroot(1) uses. | |
1538 | """ | |
1539 | mnt = OS.path.join(C.STATE, "mnt", "%s-%s" % (dist, arch)) | |
1540 | mkdir_p(mnt) | |
1541 | return mnt | |
1542 | ||
1543 | def chroot_session_mntpt(session): | |
1544 | """Return the mountpoint for an schroot session.""" | |
1545 | return OS.path.join("/schroot", session) | |
1546 | ||
1547 | def crosstools_lockfile(dist, arch): | |
1548 | """ | |
1549 | Return the lockfile for the cross-build tools for DIST, hosted by ARCH. | |
1550 | ||
1551 | When locking multiple cross-build tools, you must acquire the locks in | |
1552 | lexicographically ascending order. | |
1553 | """ | |
1554 | return lockfile_path("cross-tools.%s-%s" % (dist, arch)) | |
1555 | ||
1556 | def switch_prefix(string, map): | |
1557 | """ | |
1558 | Replace the prefix of a STRING, according to the given MAP. | |
1559 | ||
1560 | MAP is a sequence of (OLD, NEW) pairs. For each such pair in turn, test | |
1561 | whether STRING starts with OLD: if so, return STRING, but with the prefix | |
1562 | OLD replaced by NEW. If no OLD prefix matches, then raise a `ValueError'. | |
1563 | """ | |
1564 | for old, new in map: | |
1565 | if string.startswith(old): return new + string[len(old):] | |
1566 | raise ValueError("expected `%s' to start with one of %s" % | |
1567 | ", ".join(["`%s'" % old for old, new in map])) | |
1568 | ||
1569 | def host_to_chroot(path): | |
1570 | """ | |
1571 | Convert a host path under `C.LOCAL' to the corresponding chroot path under | |
1572 | `/usr/local.schroot'. | |
1573 | """ | |
1574 | return switch_prefix(path, [(C.LOCAL + "/", "/usr/local.schroot/")]) | |
1575 | ||
1576 | def chroot_to_host(path): | |
1577 | """ | |
1578 | Convert a chroot path under `/usr/local.schroot' to the corresponding | |
1579 | host path under `C.LOCAL'. | |
1580 | """ | |
1581 | return switch_prefix(path, [("/usr/local.schroot/", C.LOCAL + "/")]) | |
1582 | ||
1583 | def split_dist_arch(spec): | |
1584 | """Split a SPEC of the form `DIST-ARCH' into the pair (DIST, ARCH).""" | |
1585 | dash = spec.index("-") | |
1586 | return spec[:dash], spec[dash + 1:] | |
1587 | ||
1588 | def elf_binary_p(arch, path): | |
1589 | """Return whether PATH is an ELF binary for ARCH.""" | |
1590 | if not OS.path.isfile(path): return False | |
1591 | with open(path, 'rb') as f: magic = f.read(20) | |
1592 | if magic[0:4] != "\x7fELF": return False | |
1593 | if magic[8:16] != 8*"\0": return False | |
1594 | if arch == "i386": | |
1595 | if magic[4:7] != "\x01\x01\x01": return False | |
1596 | if magic[18:20] != "\x03\x00": return False | |
1597 | elif arch == "amd64": | |
1598 | if magic[4:7] != "\x02\x01\x01": return False | |
1599 | if magic[18:20] != "\x3e\x00": return False | |
1600 | else: | |
1601 | raise ValueError("unsupported donor architecture `%s'" % arch) | |
1602 | return True | |
1603 | ||
1604 | def progress(msg): | |
1605 | """ | |
1606 | Print a progress message MSG. | |
1607 | ||
1608 | This is intended to be called within a job's `run' method, so it doesn't | |
1609 | check `OPT.quiet' or `OPT.silent'. | |
1610 | """ | |
1611 | OS.write(1, ";; %s\n" % msg) | |
1612 | ||
1613 | class NoSuchChroot (Exception): | |
1614 | """ | |
1615 | Exception indicating that a chroot does not exist. | |
1616 | ||
1617 | Specifically, it means that it doesn't even have a logical volume. | |
1618 | """ | |
1619 | def __init__(me, dist, arch): | |
1620 | me.dist = dist | |
1621 | me.arch = arch | |
1622 | def __str__(me): | |
1623 | return "chroot for `%s' on `%s' not found" % (me.dist, me.arch) | |
1624 | ||
1625 | @CTX.contextmanager | |
1626 | def mount_chroot_src(dist, arch): | |
1627 | """ | |
1628 | Context manager for mounting the source-chroot for DIST on ARCH. | |
1629 | ||
1630 | The context manager automatically unmounts the filesystem again when the | |
1631 | body exits. You must hold the appropriate source-chroot lock before | |
1632 | calling this routine. | |
1633 | """ | |
1634 | dev = chroot_src_blkdev(dist, arch) | |
1635 | if not block_device_p(dev): raise NoSuchChroot(dist, arch) | |
1636 | mnt = chroot_src_mntpt(dist, arch) | |
1637 | try: | |
1638 | run_program(C.ROOTLY + ["mount", dev, mnt]) | |
1639 | yield mnt | |
1640 | finally: | |
1641 | umount(mnt) | |
1642 | ||
1643 | @CTX.contextmanager | |
1644 | def chroot_session(dist, arch, sourcep = False): | |
1645 | """ | |
1646 | Context manager for running an schroot(1) session. | |
1647 | ||
1648 | Returns the (ugly, automatically generated) session name to the context | |
1649 | body. By default, a snapshot session is started: set SOURCEP true to start | |
1650 | a source-chroot session. You must hold the appropriate source-chroot lock | |
1651 | before starting a source-chroot session. | |
1652 | ||
1653 | The context manager automatically closes the session again when the body | |
1654 | exits. | |
1655 | """ | |
1656 | chroot = chroot_src_lv(dist, arch) | |
1657 | if sourcep: chroot = "source:" + chroot | |
1658 | session = run_program(["schroot", "-uroot", "-b", "-c", chroot], | |
1659 | stdout = RETURN).rstrip("\n") | |
1660 | try: | |
1661 | root = OS.path.join(chroot_session_mntpt(session), "fs") | |
1662 | yield session, root | |
1663 | finally: | |
1664 | run_program(["schroot", "-e", "-c", session]) | |
1665 | ||
1666 | def run_root(command, **kw): | |
1667 | """Run a COMMAND as root. Arguments are as for `run_program'.""" | |
1668 | return run_program(C.ROOTLY + command, **kw) | |
1669 | ||
1670 | def run_schroot_session(session, command, rootp = False, **kw): | |
1671 | """ | |
1672 | Run a COMMAND within an schroot(1) session. | |
1673 | ||
1674 | Arguments are as for `run_program'. | |
1675 | """ | |
1676 | if rootp: | |
1677 | return run_program(["schroot", "-uroot", "-r", | |
1678 | "-c", session, "--"] + command, **kw) | |
1679 | else: | |
1680 | return run_program(["schroot", "-r", | |
1681 | "-c", session, "--"] + command, **kw) | |
1682 | ||
1683 | def run_schroot_source(dist, arch, command, **kw): | |
1684 | """ | |
1685 | Run a COMMAND through schroot(1), in the source-chroot for DIST on ARCH. | |
1686 | ||
1687 | Arguments are as for `run_program'. You must hold the appropriate source- | |
1688 | chroot lock before calling this routine. | |
1689 | """ | |
1690 | return run_program(["schroot", "-uroot", | |
1691 | "-c", "source:%s" % chroot_src_lv(dist, arch), | |
1692 | "--"] + command, **kw) | |
1693 | ||
1694 | ###-------------------------------------------------------------------------- | |
1695 | ### Metadata files. | |
1696 | ||
1697 | class MetadataClass (type): | |
1698 | """ | |
1699 | Metaclass for metadata classes. | |
1700 | ||
1701 | Notice a `VARS' attribute in the class dictionary, and augment it with a | |
1702 | `_VARSET' attribute, constructed as a set containing the same items. (We | |
1703 | need them both: the set satisfies fast lookups, while the original sequence | |
1704 | remembers the ordering.) | |
1705 | """ | |
1706 | def __new__(me, name, supers, dict): | |
1707 | try: vars = dict['VARS'] | |
1708 | except KeyError: pass | |
1709 | else: dict['_VARSET'] = set(vars) | |
1710 | return super(MetadataClass, me).__new__(me, name, supers, dict) | |
1711 | ||
1712 | class BaseMetadata (object): | |
1713 | """ | |
1714 | Base class for metadate objects. | |
1715 | ||
1716 | Metadata bundles are simple collections of key/value pairs. Keys should | |
1717 | usually be Python identifiers because they're used to name attributes. | |
1718 | Values are strings, but shouldn't have leading or trailing whitespace, and | |
1719 | can't contain newlines. | |
1720 | ||
1721 | Metadata bundles are written to files. The format is simple enough: empty | |
1722 | lines and lines starting with `#' are ignored; otherwise, the line must | |
1723 | have the form | |
1724 | ||
1725 | KEY = VALUE | |
1726 | ||
1727 | where KEY does not contain `='; spaces around the `=' are optional, and | |
1728 | spaces around the KEY and VALUE are stripped. The order of keys is | |
1729 | unimportant; keys are always written in a standard order on output. | |
1730 | """ | |
1731 | __metaclass__ = MetadataClass | |
1732 | ||
1733 | def __init__(me, **kw): | |
1734 | """Initialize a metadata bundle from keyword arguments.""" | |
1735 | for k, v in kw.iteritems(): | |
1736 | setattr(me, k, v) | |
1737 | for v in me.VARS: | |
1738 | try: getattr(me, v) | |
1739 | except AttributeError: setattr(me, v, None) | |
1740 | ||
1741 | def __setattr__(me, attr, value): | |
1742 | """ | |
1743 | Try to set an attribute. | |
1744 | ||
1745 | Only attribute names listed in the `VARS' class attribute are permitted. | |
1746 | """ | |
1747 | if attr not in me._VARSET: raise AttributeError, attr | |
1748 | super(BaseMetadata, me).__setattr__(attr, value) | |
1749 | ||
1750 | @classmethod | |
1751 | def read(cls, path): | |
1752 | """Return a new metadata bundle read from a named PATH.""" | |
1753 | map = {} | |
1754 | with open(path) as f: | |
1755 | for line in f: | |
1756 | line = line.strip() | |
1757 | if line == "" or line.startswith("#"): continue | |
1758 | k, v = line.split("=", 1) | |
1759 | map[k.strip()] = v.strip() | |
1760 | return cls(**map) | |
1761 | ||
1762 | def _write(me, file): | |
1763 | """ | |
1764 | Write the metadata bundle to the FILE (a file-like object). | |
1765 | ||
1766 | This is intended for use by subclasses which want to override the default | |
1767 | I/O behaviour of the main `write' method. | |
1768 | """ | |
1769 | file.write("### -*-conf-*-\n") | |
1770 | for k in me.VARS: | |
1771 | try: v = getattr(me, k) | |
1772 | except AttributeError: pass | |
1773 | else: | |
1774 | if v is not None: file.write("%s = %s\n" % (k, v)) | |
1775 | ||
1776 | def write(me, path): | |
1777 | """ | |
1778 | Write the metadata bundle to a given PATH. | |
1779 | ||
1780 | The file is replaced atomically. | |
1781 | """ | |
1782 | with safewrite(path) as f: me._write(f) | |
1783 | ||
1784 | def __repr__(me): | |
1785 | return "#<%s: %s>" % (me.__class__.__name__, | |
1786 | ", ".join("%s=%r" % (k, getattr(me, k, None)) | |
1787 | for k in me.VARS)) | |
1788 | ||
1789 | class ChrootMetadata (BaseMetadata): | |
1790 | VARS = ['dist', 'arch', 'update'] | |
1791 | ||
1792 | @classmethod | |
1793 | def read(cls, dist, arch): | |
1794 | try: | |
1795 | with lockfile(chroot_src_lockfile(dist, arch), exclp = False): | |
1796 | with mount_chroot_src(dist, arch) as mnt: | |
1797 | return super(ChrootMetadata, cls).read(OS.path.join(mnt, "META")) | |
1798 | except IOError, err: | |
1799 | if err.errno == E.ENOENT: pass | |
1800 | else: raise | |
1801 | except NoSuchChroot: pass | |
1802 | return cls(dist = dist, arch = arch) | |
1803 | ||
1804 | def write(me): | |
1805 | with mount_chroot_src(me.dist, me.arch) as mnt: | |
1806 | with safewrite_root(OS.path.join(mnt, "META")) as f: | |
1807 | me._write(f) | |
1808 | ||
1809 | class CrossToolsMetadata (BaseMetadata): | |
1810 | VARS = ['dist', 'arch', 'update'] | |
1811 | ||
1812 | @classmethod | |
1813 | def read(cls, dist, arch): | |
1814 | try: | |
1815 | return super(CrossToolsMetadata, cls)\ | |
1816 | .read(OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch), "META")) | |
1817 | except IOError, err: | |
1818 | if err.errno == E.ENOENT: pass | |
1819 | else: raise | |
1820 | return cls(dist = dist, arch = arch) | |
1821 | ||
1822 | def write(me, dir = None): | |
1823 | if dir is None: | |
1824 | dir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (me.dist, me.arch)) | |
1825 | with safewrite_root(OS.path.join(dir, "META")) as f: | |
1826 | me._write(f) | |
1827 | ||
1828 | ###-------------------------------------------------------------------------- | |
1829 | ### Constructing a chroot. | |
1830 | ||
1831 | R_DIVERT = RX.compile(r"^diversion of (.*) to .* by install-cross-tools$") | |
1832 | ||
1833 | class ChrootJob (BaseJob): | |
1834 | """ | |
1835 | Create or update a chroot. | |
1836 | """ | |
1837 | ||
1838 | SPECS = C.ALL_CHROOTS | |
1839 | ||
1840 | def __init__(me, spec, fresh = CREATE, *args, **kw): | |
1841 | super(ChrootJob, me).__init__(*args, **kw) | |
1842 | me._dist, me._arch = split_dist_arch(spec) | |
1843 | me._fresh = fresh | |
1844 | me._meta = ChrootMetadata.read(me._dist, me._arch) | |
1845 | me._tools_chroot = me._qemu_chroot = None | |
1846 | ||
1847 | def _mkname(me): return "chroot.%s-%s" % (me._dist, me._arch) | |
1848 | ||
1849 | def prepare(me): | |
1850 | if me._arch in C.FOREIGN_ARCHS: | |
1851 | me._tools_chroot = CrossToolsJob.ensure\ | |
1852 | ("%s-%s" % (me._dist, C.TOOLSARCH), FRESH) | |
1853 | me._qemu_chroot = CrossToolsJob.ensure\ | |
1854 | ("%s-%s" % (me._dist, C.QEMUHOST[me._arch]), FRESH) | |
1855 | me.await(me._tools_chroot) | |
1856 | me.await(me._qemu_chroot) | |
1857 | ||
1858 | def check(me): | |
1859 | status, reason = super(ChrootJob, me).check() | |
1860 | if status is not READY: return status, reason | |
1861 | if (me._tools_chroot is not None and me._tools_chroot.started) or \ | |
1862 | (me._qemu_chroot is not None and me._qemu_chroot.started): | |
1863 | return READY, "prerequisites run" | |
1864 | return check_fresh(me._fresh, me._meta.update) | |
1865 | ||
1866 | def _install_cross_tools(me): | |
1867 | """ | |
1868 | Install or refresh cross-tools in the source-chroot. | |
1869 | ||
1870 | This function version assumes that the source-chroot lock is already | |
1871 | held. | |
1872 | ||
1873 | Note that there isn't a job class corresponding to this function. It's | |
1874 | done automatically as part of source-chroot setup and update for foreign | |
1875 | architectures. | |
1876 | """ | |
1877 | with Cleanup() as clean: | |
1878 | ||
1879 | dist, arch = me._dist, me._arch | |
1880 | ||
1881 | mymulti = run_program(["dpkg-architecture", "-a", C.TOOLSARCH, | |
1882 | "-qDEB_HOST_MULTIARCH"], | |
1883 | stdout = RETURN).rstrip("\n") | |
1884 | gnuarch = run_program(["dpkg-architecture", "-A", arch, | |
1885 | "-qDEB_TARGET_GNU_TYPE"], | |
1886 | stdout = RETURN).rstrip("\n") | |
1887 | ||
1888 | crossdir = OS.path.join(C.LOCAL, "cross", | |
1889 | "%s-%s" % (dist, C.TOOLSARCH)) | |
1890 | ||
1891 | qarch, qhost = C.QEMUARCH[arch], C.QEMUHOST[arch] | |
1892 | qemudir = OS.path.join(C.LOCAL, "cross", | |
1893 | "%s-%s" % (dist, qhost), "QEMU") | |
1894 | ||
1895 | ## Acquire lockfiles in a canonical order to prevent deadlocks. | |
1896 | donors = [C.TOOLSARCH] | |
1897 | if qarch != C.TOOLSARCH: donors.append(qarch) | |
1898 | donors.sort() | |
1899 | for a in donors: | |
1900 | clean.enter(lockfile(crosstools_lockfile(dist, a), exclp = False)) | |
1901 | ||
1902 | ## Open a session. | |
1903 | session, root = clean.enter(chroot_session(dist, arch, sourcep = True)) | |
1904 | ||
1905 | ## Search the cross-tools tree for tools, to decide what to do with | |
1906 | ## each file. Make lists: | |
1907 | ## | |
1908 | ## * `want_div' is simply a set of all files in the chroot which need | |
1909 | ## dpkg diversions to prevent foreign versions of the tools from | |
1910 | ## clobbering our native versions. | |
1911 | ## | |
1912 | ## * `want_link' is a dictionary mapping paths which need symbolic | |
1913 | ## links into the cross-tools trees to their link destinations. | |
1914 | progress("scan cross-tools tree") | |
1915 | want_div = set() | |
1916 | want_link = dict() | |
1917 | cross_prefix = crossdir + "/" | |
1918 | qemu_prefix = qemudir + "/" | |
1919 | toolchain_prefix = OS.path.join(crossdir, "TOOLCHAIN", gnuarch) + "/" | |
1920 | def examine(path): | |
1921 | dest = switch_prefix(path, [(qemu_prefix, "/usr/bin/"), | |
1922 | (toolchain_prefix, "/usr/bin/"), | |
1923 | (cross_prefix, "/")]) | |
1924 | if OS.path.islink(path): src = OS.readlink(path) | |
1925 | else: src = host_to_chroot(path) | |
1926 | want_link[dest] = src | |
1927 | if not OS.path.isdir(path): want_div.add(dest) | |
1928 | examine(OS.path.join(qemudir, "qemu-%s-static" % qarch)) | |
1929 | examine(OS.path.join(crossdir, "lib", mymulti)) | |
1930 | examine(OS.path.join(crossdir, "usr/lib", mymulti)) | |
1931 | examine(OS.path.join(crossdir, "usr/lib/gcc-cross")) | |
1932 | def visit(_, dir, files): | |
1933 | ff = [] | |
1934 | for f in files: | |
1935 | if f == "META" or f == "QEMU" or f == "TOOLCHAIN" or \ | |
1936 | (dir.endswith("/lib") and (f == mymulti or f == "gcc-cross")): | |
1937 | continue | |
1938 | ff.append(f) | |
1939 | path = OS.path.join(dir, f) | |
1940 | if not OS.path.isdir(path): examine(path) | |
1941 | files[:] = ff | |
1942 | OS.path.walk(crossdir, visit, None) | |
1943 | OS.path.walk(OS.path.join(crossdir, "TOOLCHAIN", gnuarch), | |
1944 | visit, None) | |
1945 | ||
1946 | ## Build the set `have_div' of paths which already have diversions. | |
1947 | progress("scan chroot") | |
1948 | have_div = set() | |
1949 | with subprocess(["schroot", "-uroot", "-r", "-c", session, "--", | |
1950 | "dpkg-divert", "--list"], | |
1951 | stdout = PIPE) as (_, fd_out, _): | |
1952 | try: | |
1953 | f = OS.fdopen(fd_out) | |
1954 | for line in f: | |
1955 | m = R_DIVERT.match(line.rstrip("\n")) | |
1956 | if m: have_div.add(m.group(1)) | |
1957 | finally: | |
1958 | f.close() | |
1959 | ||
1960 | ## Build a dictionary `have_link' of symbolic links into the cross- | |
1961 | ## tools trees. Also, be sure to collect all of the relative symbolic | |
1962 | ## links which are in the cross-tools tree. | |
1963 | have_link = dict() | |
1964 | with subprocess(["schroot", "-uroot", "-r", "-c", session, "--", | |
1965 | "sh", "-e", "-c", """ | |
1966 | find / -xdev -lname "/usr/local.schroot/cross/*" -printf "%p %l\n" | |
1967 | """], stdout = PIPE) as (_, fd_out, _): | |
1968 | try: | |
1969 | f = OS.fdopen(fd_out) | |
1970 | for line in f: | |
1971 | dest, src = line.split() | |
1972 | have_link[dest] = src | |
1973 | finally: | |
1974 | f.close() | |
1975 | for path in want_link.iterkeys(): | |
1976 | real = root + path | |
1977 | if not OS.path.islink(real): continue | |
1978 | have_link[path] = OS.readlink(real) | |
1979 | ||
1980 | ## Add diversions for the paths which need one, but don't have one. | |
1981 | ## There's a hack here because the `--no-rename' option was required in | |
1982 | ## the same version in which it was introduced, so there's no single | |
1983 | ## incantation that will work across the boundary. | |
1984 | progress("add missing diversions") | |
1985 | with subprocess(["schroot", "-uroot", "-r", "-c", session, "--", | |
1986 | "sh", "-e", "-c", """ | |
1987 | a="%(arch)s" | |
1988 | ||
1989 | if dpkg-divert >/dev/null 2>&1 --no-rename --help | |
1990 | then no_rename=--no-rename | |
1991 | else no_rename= | |
1992 | fi | |
1993 | ||
1994 | while read path; do | |
1995 | dpkg-divert --package "install-cross-tools" $no_rename \ | |
1996 | --divert "$path.$a" --add "$path" | |
1997 | done | |
1998 | """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _): | |
1999 | try: | |
2000 | f = OS.fdopen(fd_in, 'w') | |
2001 | for path in want_div: | |
2002 | if path not in have_div: f.write(path + "\n") | |
2003 | finally: | |
2004 | f.close() | |
2005 | ||
2006 | ## Go through each diverted tool, and, if it hasn't been moved aside, | |
2007 | ## then /link/ it across now. If we rename it, then the chroot will | |
2008 | ## stop working -- which is why we didn't allow `dpkg-divert' to do the | |
2009 | ## rename. We can tell a tool that hasn't been moved, because it's a | |
2010 | ## symlink into one of the cross trees. | |
2011 | progress("preserve existing foreign files") | |
2012 | chroot_cross_prefix = host_to_chroot(crossdir) + "/" | |
2013 | chroot_qemu_prefix = host_to_chroot(qemudir) + "/" | |
2014 | for path in want_div: | |
2015 | real = root + path; div = real + "." + arch; cross = crossdir + path | |
2016 | if OS.path.exists(div): continue | |
2017 | if not OS.path.exists(real): continue | |
2018 | if OS.path.islink(real): | |
2019 | realdest = OS.readlink(real) | |
2020 | if realdest.startswith(chroot_cross_prefix) or \ | |
2021 | realdest.startswith(chroot_qemu_prefix): | |
2022 | continue | |
2023 | if OS.path.islink(cross) and realdest == OS.readlink(cross): | |
2024 | continue | |
2025 | progress("preserve existing foreign file `%s'" % path) | |
2026 | run_root(["ln", real, div]) | |
2027 | ||
2028 | ## Update all of the symbolic links which are currently wrong: add | |
2029 | ## links which are missing, delete ones which are obsolete, and update | |
2030 | ## ones which have the wrong target. | |
2031 | progress("update symlinks") | |
2032 | for path, src in want_link.iteritems(): | |
2033 | real = root + path | |
2034 | try: old_src = have_link[path] | |
2035 | except KeyError: pass | |
2036 | else: | |
2037 | if src == old_src: continue | |
2038 | new = real + ".new" | |
2039 | progress("link `%s' -> `%s'" % (path, src)) | |
2040 | dir = OS.path.dirname(real) | |
2041 | if not OS.path.isdir(dir): run_root(["mkdir", "-p", dir]) | |
2042 | if OS.path.exists(new): run_root(["rm", "-f", new]) | |
2043 | run_root(["ln", "-s", src, new]) | |
2044 | run_root(["mv", new, real]) | |
2045 | for path in have_link.iterkeys(): | |
2046 | if path in want_link: continue | |
2047 | progress("remove obsolete link `%s' -> `%s'" % path) | |
2048 | real = root + path | |
2049 | run_root(["rm", "-f", real]) | |
2050 | ||
2051 | ## Remove diversions from paths which don't need them any more. Here | |
2052 | ## it's safe to rename, because either the tool isn't there, in which | |
2053 | ## case it obviously wasn't important, or it is, and `dpkg-divert' will | |
2054 | ## atomically replace our link with the foreign version. | |
2055 | progress("remove obsolete diversions") | |
2056 | with subprocess(["schroot", "-uroot", "-r", "-c", session, "--", | |
2057 | "sh", "-e", "-c", """ | |
2058 | a="%(arch)s" | |
2059 | ||
2060 | while read path; do | |
2061 | dpkg-divert --package "install-cross-tools" --rename \ | |
2062 | --divert "$path.$a" --remove "$path" | |
2063 | done | |
2064 | """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _): | |
2065 | try: | |
2066 | f = OS.fdopen(fd_in, 'w') | |
2067 | for path in have_div: | |
2068 | if path not in want_div: f.write(path + "\n") | |
2069 | finally: | |
2070 | f.close() | |
2071 | ||
2072 | def _make_chroot(me): | |
2073 | """ | |
2074 | Create the source-chroot with chroot metadata META. | |
2075 | ||
2076 | This will recreate a source-chroot from scratch, destroying the existing | |
2077 | logical volume if necessary. | |
2078 | """ | |
2079 | with Cleanup() as clean: | |
2080 | ||
2081 | dist, arch = me._dist, me._arch | |
2082 | clean.enter(lockfile(chroot_src_lockfile(dist, arch))) | |
2083 | ||
2084 | mnt = chroot_src_mntpt(dist, arch) | |
2085 | dev = chroot_src_blkdev(dist, arch) | |
2086 | lv = chroot_src_lv(dist, arch) | |
2087 | newlv = lv + ".new" | |
2088 | ||
2089 | ## Clean up any leftover debris. | |
2090 | if mountpoint_p(mnt): umount(mnt) | |
2091 | if block_device_p(dev): | |
2092 | run_root(["lvremove", "-f", "%s/%s" % (C.VG, lv)]) | |
2093 | ||
2094 | ## Create the logical volume and filesystem. It's important that the | |
2095 | ## logical volume not have its official name until after it contains a | |
2096 | ## mountable filesystem. | |
2097 | progress("create filesystem") | |
2098 | run_root(["lvcreate", "--yes", C.LVSZ, "-n", newlv, C.VG]) | |
2099 | run_root(["mkfs", "-j", "-L%s-%s" % (dist, arch), | |
2100 | OS.path.join("/dev", C.VG, newlv)]) | |
2101 | run_root(["lvrename", C.VG, newlv, lv]) | |
2102 | ||
2103 | ## Start installing the chroot. | |
2104 | with mount_chroot_src(dist, arch) as mnt: | |
2105 | ||
2106 | ## Set the basic structure. | |
2107 | run_root(["mkdir", "-m755", OS.path.join(mnt, "fs")]) | |
2108 | run_root(["chmod", "750", mnt]) | |
2109 | ||
2110 | ## Install the base system. | |
2111 | progress("install base system") | |
2112 | run_root(["eatmydata", "debootstrap"] + | |
2113 | (arch in C.FOREIGN_ARCHS and ["--foreign"] or []) + | |
2114 | ["--arch=" + arch, "--variant=minbase", | |
2115 | "--include=" + ",".join(C.BASE_PACKAGES), | |
2116 | dist, OS.path.join(mnt, "fs"), C.DEBMIRROR]) | |
2117 | ||
2118 | ## If this is a cross-installation, then install the necessary `qemu' | |
2119 | ## and complete the installation. | |
2120 | if arch in C.FOREIGN_ARCHS: | |
2121 | qemu = OS.path.join("cross", "%s-%s" % (dist, C.QEMUHOST[arch]), | |
2122 | "QEMU", "qemu-%s-static" % C.QEMUARCH[arch]) | |
2123 | run_root(["install", OS.path.join(C.LOCAL, qemu), | |
2124 | OS.path.join(mnt, "fs/usr/bin")]) | |
2125 | run_root(["chroot", OS.path.join(mnt, "fs"), | |
2126 | "/debootstrap/debootstrap", "--second-stage"]) | |
2127 | run_root(["ln", "-sf", | |
2128 | OS.path.join("/usr/local.schroot", qemu), | |
2129 | OS.path.join(mnt, "fs/usr/bin")]) | |
2130 | ||
2131 | ## Set up `/usr/local'. | |
2132 | progress("install `/usr/local' symlink") | |
2133 | run_root(["rm", "-rf", OS.path.join(mnt, "fs/usr/local")]) | |
2134 | run_root(["ln", "-s", | |
2135 | OS.path.join("local.schroot", arch), | |
2136 | OS.path.join(mnt, "fs/usr/local")]) | |
2137 | ||
2138 | ## Install the `apt' configuration. | |
2139 | progress("configure package manager") | |
2140 | run_root(["rm", "-f", OS.path.join(mnt, "fs/etc/apt/sources.list")]) | |
2141 | for c in C.APTCONF: | |
2142 | run_root(["ln", "-s", | |
2143 | OS.path.join("/usr/local.schroot/etc/apt/apt.conf.d", c), | |
2144 | OS.path.join(mnt, "fs/etc/apt/apt.conf.d")]) | |
2145 | run_root(["ln", "-s", | |
2146 | "/usr/local.schroot/etc/apt/sources.%s" % dist, | |
2147 | OS.path.join(mnt, "fs/etc/apt/sources.list")]) | |
2148 | ||
2149 | with safewrite_root\ | |
2150 | (OS.path.join(mnt, "fs/etc/apt/apt.conf.d/20arch")) as f: | |
2151 | f.write("""\ | |
2152 | ### -*-conf-*- | |
2153 | ||
2154 | APT { | |
2155 | Architecture "%s"; | |
2156 | }; | |
2157 | """ % arch) | |
2158 | ||
2159 | ## Set up the locale and time zone from the host system. | |
2160 | progress("configure locales and timezone") | |
2161 | run_root(["cp", "/etc/locale.gen", "/etc/timezone", | |
2162 | OS.path.join(mnt, "fs/etc")]) | |
2163 | with open("/etc/timezone") as f: tz = f.readline().strip() | |
2164 | run_root(["ln", "-sf", | |
2165 | OS.path.join("/usr/share/timezone", tz), | |
2166 | OS.path.join(mnt, "fs/etc/localtime")]) | |
2167 | run_root(["cp", "/etc/default/locale", | |
2168 | OS.path.join(mnt, "fs/etc/default")]) | |
2169 | ||
2170 | ## Fix `/etc/mtab'. | |
2171 | progress("set `/etc/mtab'") | |
2172 | run_root(["ln", "-sf", "/proc/mounts", | |
2173 | OS.path.join(mnt, "fs/etc/mtab")]) | |
2174 | ||
2175 | ## Prevent daemons from starting within the chroot. | |
2176 | progress("inhibit daemon startup") | |
2177 | with safewrite_root(OS.path.join(mnt, "fs/usr/sbin/policy-rc.d"), | |
2178 | mode = "755") as f: | |
2179 | f.write("""\ | |
2180 | #! /bin/sh | |
2181 | echo >&2 "policy-rc.d: Services disabled by policy." | |
2182 | exit 101 | |
2183 | """) | |
2184 | ||
2185 | ## Hack the dynamic linker to prefer libraries in `/usr' over | |
2186 | ## `/usr/local'. This prevents `dpkg-shlibdeps' from becoming | |
2187 | ## confused. | |
2188 | progress("configure dynamic linker") | |
2189 | with safewrite_root\ | |
2190 | (OS.path.join(mnt, "fs/etc/ld.so.conf.d/libc.conf")) as f: | |
2191 | f.write("# libc default configuration") | |
2192 | with safewrite_root\ | |
2193 | (OS.path.join(mnt, "fs/etc/ld.so.conf.d/zzz-local.conf")) as f: | |
2194 | f.write("""\ | |
2195 | ### -*-conf-*- | |
2196 | ### Local hack to make /usr/local/ late. | |
2197 | /usr/local/lib | |
2198 | """) | |
2199 | ||
2200 | ## If this is a foreign architecture then we need to set it up. | |
2201 | if arch in C.FOREIGN_ARCHS: | |
2202 | ||
2203 | ## Keep the chroot's native Qemu out of our way: otherwise we'll stop | |
2204 | ## being able to run programs in the chroot. There's a hack here | |
2205 | ## because the `--no-rename' option was required in the same version | |
2206 | ## in which is was introduced, so there's no single incantation that | |
2207 | ## will work across the boundary. | |
2208 | progress("divert emulator") | |
2209 | run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", """ | |
2210 | if dpkg-divert >/dev/null 2>&1 --no-rename --help | |
2211 | then no_rename=--no-rename | |
2212 | else no_rename= | |
2213 | fi | |
2214 | ||
2215 | dpkg-divert --package install-cross-tools $no_rename \ | |
2216 | --divert /usr/bin/%(qemu)s.%(arch)s --add /usr/bin/%(qemu)s | |
2217 | """ % dict(arch = arch, qemu = "qemu-%s-static" % C.QEMUARCH[arch])]) | |
2218 | ||
2219 | ## Install faster native tools. | |
2220 | me._install_cross_tools() | |
2221 | ||
2222 | ## Finishing touches. | |
2223 | progress("finishing touches") | |
2224 | run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", """ | |
2225 | apt-get update | |
2226 | apt-get -y upgrade | |
2227 | apt-get -y install "$@" | |
2228 | ldconfig | |
2229 | apt-get -y autoremove | |
2230 | apt-get clean | |
2231 | """, "."] + C.EXTRA_PACKAGES, stdin = DISCARD) | |
2232 | ||
2233 | ## Mark the chroot as done. | |
2234 | me._meta.update = zulu() | |
2235 | me._meta.write() | |
2236 | ||
2237 | def _update_chroot(me): | |
2238 | """Refresh the source-chroot with chroot metadata META.""" | |
2239 | with Cleanup() as clean: | |
2240 | dist, arch = me._dist, me._arch | |
2241 | clean.enter(lockfile(chroot_src_lockfile(dist, arch))) | |
2242 | run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", """ | |
2243 | apt-get update | |
2244 | apt-get -y dist-upgrade | |
2245 | apt-get -y autoremove | |
2246 | apt-get -y clean | |
2247 | """], stdin = DISCARD) | |
2248 | if arch in C.FOREIGN_ARCHS: me._install_cross_tools() | |
2249 | me._meta.update = zulu(); me._meta.write() | |
2250 | ||
2251 | def run(me): | |
2252 | if me._meta.update is not None: me._update_chroot() | |
2253 | else: me._make_chroot() | |
2254 | ||
2255 | ###-------------------------------------------------------------------------- | |
2256 | ### Extracting the cross tools. | |
2257 | ||
2258 | class CrossToolsJob (BaseJob): | |
2259 | """Extract cross-tools from a donor chroot.""" | |
2260 | ||
2261 | SPECS = C.NATIVE_CHROOTS | |
2262 | ||
2263 | def __init__(me, spec, fresh = CREATE, *args, **kw): | |
2264 | super(CrossToolsJob, me).__init__(*args, **kw) | |
2265 | me._dist, me._arch = split_dist_arch(spec) | |
2266 | me._meta = CrossToolsMetadata.read(me._dist, me._arch) | |
2267 | me._fresh = fresh | |
2268 | me._chroot = None | |
2269 | ||
2270 | def _mkname(me): return "cross-tools.%s-%s" % (me._dist, me._arch) | |
2271 | ||
2272 | def prepare(me): | |
2273 | st, r = check_fresh(me._fresh, me._meta.update) | |
2274 | if st is DONE: return | |
2275 | me._chroot = ChrootJob.ensure("%s-%s" % (me._dist, me._arch), FRESH) | |
2276 | me.await(me._chroot) | |
2277 | ||
2278 | def check(me): | |
2279 | status, reason = super(CrossToolsJob, me).check() | |
2280 | if status is not READY: return status, reason | |
2281 | if me._chroot is not None and me._chroot.started: | |
2282 | return READY, "prerequisites run" | |
2283 | return check_fresh(me._fresh, me._meta.update) | |
2284 | ||
2285 | def run(me): | |
2286 | with Cleanup() as clean: | |
2287 | ||
2288 | dist, arch = me._dist, me._arch | |
2289 | ||
2290 | mymulti = run_program(["dpkg-architecture", "-a" + arch, | |
2291 | "-qDEB_HOST_MULTIARCH"], | |
2292 | stdout = RETURN).rstrip("\n") | |
2293 | crossarchs = [run_program(["dpkg-architecture", "-A" + a, | |
2294 | "-qDEB_TARGET_GNU_TYPE"], | |
2295 | stdout = RETURN).rstrip("\n") | |
2296 | for a in C.FOREIGN_ARCHS] | |
2297 | ||
2298 | crossdir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch)) | |
2299 | crossold = crossdir + ".old"; crossnew = crossdir + ".new" | |
2300 | usrbin = OS.path.join(crossnew, "usr/bin") | |
2301 | ||
2302 | clean.enter(lockfile(crosstools_lockfile(dist, arch))) | |
2303 | run_program(["rm", "-rf", crossnew]) | |
2304 | mkdir_p(crossnew) | |
2305 | ||
2306 | ## Open a session to the donor chroot. | |
2307 | progress("establish snapshot") | |
2308 | session, root = clean.enter(chroot_session(dist, arch)) | |
2309 | ||
2310 | ## Make sure the donor tree is up-to-date, and install the extra | |
2311 | ## packages we need. | |
2312 | progress("install tools packages") | |
2313 | run_schroot_session(session, ["eatmydata", "sh", "-e", "-c", """ | |
2314 | apt-get update | |
2315 | apt-get -y upgrade | |
2316 | apt-get -y install "$@" | |
2317 | """, "."] + C.CROSS_PACKAGES, rootp = True, stdin = DISCARD) | |
2318 | ||
2319 | def chase(path): | |
2320 | dest = "" | |
2321 | ||
2322 | ## Work through the remaining components of the PATH. | |
2323 | while path != "": | |
2324 | try: sl = path.index("/") | |
2325 | except ValueError: step = path; path = "" | |
2326 | else: step, path = path[:sl], path[sl + 1:] | |
2327 | ||
2328 | ## Split off and analyse the first component. | |
2329 | if step == "" or step == ".": | |
2330 | ## A redundant `/' or `./'. Skip it. | |
2331 | pass | |
2332 | elif step == "..": | |
2333 | ## A `../'. Strip off the trailing component of DEST. | |
2334 | dest = dest[:dest.rindex("/")] | |
2335 | else: | |
2336 | ## Something else. Transfer the component name to DEST. | |
2337 | dest += "/" + step | |
2338 | ||
2339 | ## If DEST refers to something in the cross-tools tree then we're | |
2340 | ## good. | |
2341 | crossdest = crossnew + dest | |
2342 | try: st = OS.lstat(crossdest) | |
2343 | except OSError, err: | |
2344 | if err.errno == E.ENOENT: | |
2345 | ## No. We need to copy something from the donor tree so that | |
2346 | ## the name works. | |
2347 | ||
2348 | st = OS.lstat(root + dest) | |
2349 | if ST.S_ISDIR(st.st_mode): | |
2350 | OS.mkdir(crossdest) | |
2351 | else: | |
2352 | progress("copy `%s'" % dest) | |
2353 | run_program(["rsync", "-aHR", | |
2354 | "%s/.%s" % (root, dest), | |
2355 | crossnew]) | |
2356 | else: | |
2357 | raise | |
2358 | ||
2359 | ## If DEST refers to a symbolic link, then prepend the link target | |
2360 | ## to PATH so that we can be sure the link will work. | |
2361 | if ST.S_ISLNK(st.st_mode): | |
2362 | link = OS.readlink(crossdest) | |
2363 | if link.startswith("/"): dest = ""; link = link[1:] | |
2364 | else: | |
2365 | try: dest = dest[:dest.rindex("/")] | |
2366 | except ValueError: dest = "" | |
2367 | if path == "": path = link | |
2368 | else: path = "%s/%s" % (path, link) | |
2369 | ||
2370 | ## Work through the shopping list, copying the things it names into the | |
2371 | ## cross-tools tree. | |
2372 | scan = [] | |
2373 | for pat in C.CROSS_PATHS: | |
2374 | pat = pat.replace("MULTI", mymulti) | |
2375 | any = False | |
2376 | for rootpath in GLOB.iglob(root + pat): | |
2377 | any = True | |
2378 | path = rootpath[len(root):] | |
2379 | progress("copy `%s'" % path) | |
2380 | run_program(["rsync", "-aHR", "%s/.%s" % (root, path), crossnew]) | |
2381 | if not any: | |
2382 | raise RuntimeError("no matches for cross-tool pattern `%s'" % pat) | |
2383 | ||
2384 | ## Scan the new tree: chase down symbolic links, copying extra stuff | |
2385 | ## that we'll need; and examine ELF binaries to make sure we get the | |
2386 | ## necessary shared libraries. | |
2387 | def visit(_, dir, files): | |
2388 | for f in files: | |
2389 | path = OS.path.join(dir, f) | |
2390 | inside = switch_prefix(path, [(crossnew + "/", "/")]) | |
2391 | if OS.path.islink(path): chase(inside) | |
2392 | if elf_binary_p(arch, path): scan.append(inside) | |
2393 | OS.path.walk(crossnew, visit, None) | |
2394 | ||
2395 | ## Work through the ELF binaries in `scan', determining which shared | |
2396 | ## libraries they'll need. | |
2397 | ## | |
2398 | ## The rune running in the chroot session reads ELF binary names on | |
2399 | ## stdin, one per line, and runs `ldd' on them to discover the binary's | |
2400 | ## needed libraries and resolve them into pathnames. Each pathname is | |
2401 | ## printed to stderr as a line `+PATHNAME', followed by a final line | |
2402 | ## consisting only of `-' as a terminator. This is necessary so that | |
2403 | ## we can tell when we've finished, because newly discovered libraries | |
2404 | ## need to be fed back to discover their recursive dependencies. (This | |
2405 | ## is why the `WriteLinesSelector' interface is quite so hairy.) | |
2406 | with subprocess(["schroot", "-r", "-c", session, "--", | |
2407 | "sh", "-e", "-c", """ | |
2408 | while read path; do | |
2409 | ldd "$path" | while read a b c d; do | |
2410 | case $a:$b:$c:$d in | |
2411 | not:a:dynamic:executable) ;; | |
2412 | statically:linked::) ;; | |
2413 | /*) echo "+$a" ;; | |
2414 | *:=\\>:/*) echo "+$c" ;; | |
2415 | linux-*) ;; | |
2416 | *) echo >&2 "failed to find shared library \\`$a'"; exit 2 ;; | |
2417 | esac | |
2418 | done | |
2419 | echo - | |
2420 | done | |
2421 | """], stdin = PIPE, stdout = PIPE) as (fd_in, fd_out, _): | |
2422 | ||
2423 | ## Keep track of the number of binaries we've reported to the `ldd' | |
2424 | ## process for which we haven't yet seen all of their dependencies. | |
2425 | ## (This is wrapped in a `Struct' because of Python's daft scoping | |
2426 | ## rules.) | |
2427 | v = Struct(n = 0) | |
2428 | ||
2429 | def line_in(): | |
2430 | ## Provide a line in., so raise `StopIteration' to signal this. | |
2431 | ||
2432 | try: | |
2433 | ## See if there's something to scan. | |
2434 | path = scan.pop() | |
2435 | ||
2436 | except IndexError: | |
2437 | ## There's nothing currently waiting to be scanned. | |
2438 | if v.n: | |
2439 | ## There are still outstanding replies, so stall. | |
2440 | return None | |
2441 | else: | |
2442 | ## There are no outstanding replies left, and we have nothing | |
2443 | ## more to scan, then we must be finished. | |
2444 | raise StopIteration | |
2445 | ||
2446 | else: | |
2447 | ## The `scan' list isn't empty, so return an item from that, and | |
2448 | ## remember that there's one more thing we expect to see answers | |
2449 | ## from. | |
2450 | v.n += 1; return path | |
2451 | ||
2452 | def line_out(line): | |
2453 | ## We've received a line from the `ldd' process. | |
2454 | ||
2455 | if line == "-": | |
2456 | ## It's finished processing one of our binaries. Note this. | |
2457 | ## Maybe it's time to stop | |
2458 | v.n -= 1 | |
2459 | return | |
2460 | ||
2461 | ## Strip the leading marker (which is just there so that the | |
2462 | ## terminating `-' is unambiguous). | |
2463 | assert line.startswith("+") | |
2464 | lib = line[1:] | |
2465 | ||
2466 | ## If we already have this binary then we'll already have submitted | |
2467 | ## it. | |
2468 | path = crossnew + lib | |
2469 | try: OS.lstat(path) | |
2470 | except OSError, err: | |
2471 | if err.errno == E.ENOENT: pass | |
2472 | else: raise | |
2473 | else: return | |
2474 | ||
2475 | ## Copy it into the tools tree, together with any symbolic links | |
2476 | ## along the path. | |
2477 | chase(lib) | |
2478 | ||
2479 | ## If this is an ELF binary (and it ought to be!) then submit it | |
2480 | ## for further scanning. | |
2481 | if elf_binary_p(arch, path): | |
2482 | scan.append(switch_prefix(path, [(crossnew + "/", "/")])) | |
2483 | ||
2484 | ## And run this entire contraption. When this is done, we should | |
2485 | ## have all of the library dependencies for all of our binaries. | |
2486 | select_loop([WriteLinesSelector(fd_in, line_in), | |
2487 | ReadLinesSelector(fd_out, line_out)]) | |
2488 | ||
2489 | ## Set up the cross-compiler and emulator. Start by moving the cross | |
2490 | ## compilers and emulator into their specific places, so they don't end | |
2491 | ## up cluttering chroots for non-matching architectures. | |
2492 | progress("establish TOOLCHAIN and QEMU") | |
2493 | OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN")) | |
2494 | qemudir = OS.path.join(crossnew, "QEMU") | |
2495 | OS.mkdir(qemudir) | |
2496 | for gnu in C.FOREIGN_GNUARCHS: | |
2497 | OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN", gnu)) | |
2498 | for f in OS.listdir(usrbin): | |
2499 | for gnu in C.FOREIGN_GNUARCHS: | |
2500 | gnuprefix = gnu + "-" | |
2501 | if f.startswith(gnuprefix): | |
2502 | tooldir = OS.path.join(crossnew, "TOOLCHAIN", gnu) | |
2503 | OS.rename(OS.path.join(usrbin, f), OS.path.join(tooldir, f)) | |
2504 | OS.symlink(f, OS.path.join(tooldir, f[len(gnuprefix):])) | |
2505 | break | |
2506 | else: | |
2507 | if f.startswith("qemu-") and f.endswith("-static"): | |
2508 | OS.rename(OS.path.join(usrbin, f), OS.path.join(qemudir, f)) | |
2509 | ||
2510 | ## The GNU cross compilers try to find their additional pieces via a | |
2511 | ## relative path, which isn't going to end well. Add a symbolic link | |
2512 | ## at the right place to where the things are actually going to live. | |
2513 | toollib = OS.path.join(crossnew, "TOOLCHAIN", "lib") | |
2514 | OS.mkdir(toollib) | |
2515 | OS.symlink("../../usr/lib/gcc-cross", | |
2516 | OS.path.join(toollib, "gcc-cross")) | |
2517 | ||
2518 | ## We're done. Replace the old cross-tools with our new one. | |
2519 | me._meta.update = zulu() | |
2520 | me._meta.write(crossnew) | |
2521 | if OS.path.exists(crossdir): run_program(["mv", crossdir, crossold]) | |
2522 | OS.rename(crossnew, crossdir) | |
2523 | run_program(["rm", "-rf", crossold]) | |
2524 | ||
2525 | ###-------------------------------------------------------------------------- | |
2526 | ### Buliding and installing local packages. | |
2527 | ||
2528 | def pkg_metadata_lockfile(pkg): | |
2529 | return lockfile_path("pkg-meta.%s" % pkg) | |
2530 | ||
2531 | def pkg_srcdir_lockfile(pkg, ver): | |
2532 | return lockfile_path("pkg-source.%s-%s" % (pkg, ver)) | |
2533 | ||
2534 | def pkg_srcdir(pkg, ver): | |
2535 | return OS.path.join(C.LOCAL, "src", "%s-%s" % (pkg, ver)) | |
2536 | ||
2537 | def pkg_builddir(pkg, ver, arch): | |
2538 | return OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch) | |
2539 | ||
2540 | class PackageMetadata (BaseMetadata): | |
2541 | VARS = ["pkg"] + list(C.ALL_ARCHS) | |
2542 | ||
2543 | @classmethod | |
2544 | def read(cls, pkg): | |
2545 | try: | |
2546 | return super(PackageMetadata, cls)\ | |
2547 | .read(OS.path.join(C.LOCAL, "src", "META.%s" % pkg)) | |
2548 | except IOError, err: | |
2549 | if err.errno == E.ENOENT: pass | |
2550 | else: raise | |
2551 | return cls(pkg = pkg) | |
2552 | ||
2553 | def write(me): | |
2554 | super(PackageMetadata, me)\ | |
2555 | .write(OS.path.join(C.LOCAL, "src", "META.%s" % me.pkg)) | |
2556 | ||
2557 | class PackageSourceJob (BaseJob): | |
2558 | ||
2559 | SPECS = C.LOCALPKGS | |
2560 | ||
2561 | def __init__(me, pkg, fresh = CREATE, *args, **kw): | |
2562 | super(PackageSourceJob, me).__init__(*args, **kw) | |
2563 | me._pkg = pkg | |
2564 | tar = None; ver = None | |
2565 | r = RX.compile("^%s-(\d.*)\.tar.(?:Z|z|gz|bz2|xz|lzma)$" % | |
2566 | RX.escape(pkg)) | |
2567 | for f in OS.listdir("pkg"): | |
2568 | m = r.match(f) | |
2569 | if not m: pass | |
2570 | elif tar is not None: | |
2571 | raise ExpectedError("multiple source tarballs of package `%s'" % pkg) | |
2572 | else: tar, ver = f, m.group(1) | |
2573 | me.version = ver | |
2574 | me.tarball = OS.path.join("pkg", tar) | |
2575 | ||
2576 | def _mkname(me): return "pkg-source.%s" % me._pkg | |
2577 | ||
2578 | def check(me): | |
2579 | status, reason = super(PackageSourceJob, me).check() | |
2580 | if status is not READY: return status, reason | |
2581 | if OS.path.isdir(pkg_srcdir(me._pkg, me.version)): | |
2582 | return DONE, "already unpacked" | |
2583 | else: | |
2584 | return READY, "no source tree" | |
2585 | ||
2586 | def run(me): | |
2587 | with Cleanup() as clean: | |
2588 | pkg, ver, tar = me._pkg, me.version, me.tarball | |
2589 | srcdir = pkg_srcdir(pkg, ver) | |
2590 | newdir = srcdir + ".new" | |
2591 | ||
2592 | progress("unpack `%s'" % me.tarball) | |
2593 | clean.enter(lockfile(pkg_srcdir_lockfile(pkg, ver))) | |
2594 | run_program(["rm", "-rf", newdir]) | |
2595 | mkdir_p(newdir) | |
2596 | run_program(["tar", "xf", OS.path.join(OS.getcwd(), me.tarball)], | |
2597 | cwd = newdir) | |
2598 | things = OS.listdir(newdir) | |
2599 | if len(things) == 1: | |
2600 | OS.rename(OS.path.join(newdir, things[0]), srcdir) | |
2601 | OS.rmdir(newdir) | |
2602 | else: | |
2603 | OS.rename(newdir, srcdir) | |
2604 | ||
2605 | class PackageBuildJob (BaseJob): | |
2606 | ||
2607 | SPECS = ["%s:%s" % (pkg, arch) | |
2608 | for pkg in C.LOCALPKGS | |
2609 | for arch in C.ALL_ARCHS] | |
2610 | ||
2611 | def __init__(me, spec, fresh = CREATE, *args, **kw): | |
2612 | super(PackageBuildJob, me).__init__(*args, **kw) | |
2613 | colon = spec.index(":") | |
2614 | me._pkg, me._arch = spec[:colon], spec[colon + 1:] | |
2615 | ||
2616 | def _mkname(me): return "pkg-build.%s:%s" % (me._pkg, me._arch) | |
2617 | ||
2618 | def prepare(me): | |
2619 | me.await(ChrootJob.ensure("%s-%s" % (C.PRIMARY_DIST, me._arch), CREATE)) | |
2620 | me._meta = PackageMetadata.read(me._pkg) | |
2621 | me._src = PackageSourceJob.ensure(me._pkg, FRESH); me.await(me._src) | |
2622 | me._prereq = [PackageBuildJob.ensure("%s:%s" % (prereq, me._arch), FRESH) | |
2623 | for prereq in C.PKGDEPS[me._pkg]] | |
2624 | for j in me._prereq: me.await(j) | |
2625 | ||
2626 | def check(me): | |
2627 | status, reason = super(PackageBuildJob, me).check() | |
2628 | if status is not READY: return status, reason | |
2629 | if me._src.started: return READY, "fresh source directory" | |
2630 | for j in me._prereq: | |
2631 | if j.started: | |
2632 | return READY, "dependency `%s' freshly installed" % j._pkg | |
2633 | if getattr(me._meta, me._arch) == me._src.version: | |
2634 | return DONE, "already installed" | |
2635 | return READY, "not yet installed" | |
2636 | ||
2637 | def run(me): | |
2638 | with Cleanup() as clean: | |
2639 | pkg, ver, arch = me._pkg, me._src.version, me._arch | |
2640 | ||
2641 | session, _ = clean.enter(chroot_session(C.PRIMARY_DIST, arch)) | |
2642 | builddir = OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch) | |
2643 | chroot_builddir = host_to_chroot(builddir) | |
2644 | run_program(["rm", "-rf", builddir]) | |
2645 | OS.mkdir(builddir) | |
2646 | ||
2647 | progress("prepare %s chroot" % (arch)) | |
2648 | run_schroot_session(session, | |
2649 | ["eatmydata", "apt-get", "update"], | |
2650 | rootp = True, stdin = DISCARD) | |
2651 | run_schroot_session(session, | |
2652 | ["eatmydata", "apt-get", "-y", "upgrade"], | |
2653 | rootp = True, stdin = DISCARD) | |
2654 | run_schroot_session(session, | |
2655 | ["eatmydata", "apt-get", "-y", | |
2656 | "install", "pkg-config"], | |
2657 | rootp = True, stdin = DISCARD) | |
2658 | run_schroot_session(session, | |
2659 | ["mount", "-oremount,rw", "/usr/local.schroot"], | |
2660 | rootp = True, stdin = DISCARD) | |
2661 | ||
2662 | progress("configure `%s' %s for %s" % (pkg, ver, arch)) | |
2663 | run_schroot_session(session, ["sh", "-e", "-c", """ | |
2664 | cd "$1" && | |
2665 | ../configure PKG_CONFIG_PATH=/usr/local/lib/pkgconfig.hidden | |
2666 | """, ".", chroot_builddir]) | |
2667 | ||
2668 | progress("compile `%s' %s for %s" % (pkg, ver, arch)) | |
2669 | run_schroot_session(session, ["sh", "-e", "-c", """ | |
2670 | cd "$1" && make -j4 && make -j4 check | |
2671 | """, ".", chroot_builddir]) | |
2672 | ||
2673 | existing = getattr(me._meta, arch, None) | |
2674 | if existing is not None and existing != ver: | |
2675 | progress("uninstall existing `%s' %s for %s" % (pkg, existing, arch)) | |
2676 | run_schroot_session(session, ["sh", "-e", "-c", """ | |
2677 | cd "$1" && make uninstall | |
2678 | """, ".", OS.path.join(pkg_srcdir(pkg, existing), | |
2679 | "build.%s" % arch)], | |
2680 | rootp = True) | |
2681 | ||
2682 | progress("install `%s' %s for %s" % (pkg, existing, arch)) | |
2683 | run_schroot_session(session, ["sh", "-e", "-c", """ | |
2684 | cd "$1" && make install | |
2685 | mkdir -p /usr/local/lib/pkgconfig.hidden | |
2686 | mv /usr/local/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig.hidden || : | |
2687 | """, ".", chroot_builddir], rootp = True) | |
2688 | ||
2689 | clean.enter(lockfile(pkg_metadata_lockfile(pkg))) | |
2690 | me._meta = PackageMetadata.read(pkg) | |
2691 | setattr(me._meta, arch, ver); me._meta.write() | |
2692 | ||
2693 | with lockfile(chroot_src_lockfile(C.PRIMARY_DIST, arch)): | |
2694 | run_schroot_source(C.PRIMARY_DIST, arch, ["ldconfig"]) | |
2695 | ||
2696 | ###-------------------------------------------------------------------------- | |
2697 | ### Process the configuration and options. | |
2698 | ||
2699 | OPTIONS = OP.OptionParser\ | |
2700 | (usage = "chroot-maint [-diknqs] [-fFRESH] [-jN] JOB[.SPEC,...] ...") | |
2701 | for short, long, props in [ | |
2702 | ("-d", "--debug", { | |
2703 | 'dest': 'debug', 'default': False, 'action': 'store_true', | |
2704 | 'help': "print lots of debugging drivel" }), | |
2705 | ("-f", "--fresh", { | |
2706 | 'dest': 'fresh', 'metavar': 'FRESH', 'default': "create", | |
2707 | 'help': "how fresh (`create', `force', or `N[s|m|h|d|w]')" }), | |
2708 | ("-i", "--ignore-errors", { | |
2709 | 'dest': 'ignerr', 'default': False, 'action': 'store_true', | |
2710 | 'help': "ignore all errors encountered while processing" }), | |
2711 | ("-j", "--jobs", { | |
2712 | 'dest': 'njobs', 'metavar': 'N', 'default': 1, 'type': 'int', | |
2713 | 'help': 'run up to N jobs in parallel' }), | |
2714 | ("-J", "--forkbomb", { | |
2715 | 'dest': 'njobs', 'action': 'store_true', | |
2716 | 'help': 'run as many jobs in parallel as possible' }), | |
2717 | ("-k", "--keep-going", { | |
2718 | 'dest': 'keepon', 'default': False, 'action': 'store_true', | |
2719 | 'help': "keep going even if independent jobs fail" }), | |
2720 | ("-n", "--dry-run", { | |
2721 | 'dest': 'dryrun', 'default': False, 'action': 'store_true', | |
2722 | 'help': "don't actually do anything" }), | |
2723 | ("-q", "--quiet", { | |
2724 | 'dest': 'quiet', 'default': False, 'action': 'store_true', | |
2725 | 'help': "don't print the output from successful jobs" }), | |
2726 | ("-s", "--silent", { | |
2727 | 'dest': 'silent', 'default': False, 'action': 'store_true', | |
2728 | 'help': "don't print progress messages" })]: | |
2729 | OPTIONS.add_option(short, long, **props) | |
2730 | ||
2731 | ###-------------------------------------------------------------------------- | |
2732 | ### Main program. | |
2733 | ||
2734 | R_JOBSERV = RX.compile(r'^--jobserver-(?:fds|auth)=(\d+),(\d+)$') | |
2735 | ||
2736 | JOBMAP = { "chroot": ChrootJob, | |
2737 | "cross-tools": CrossToolsJob, | |
2738 | "pkg-source": PackageSourceJob, | |
2739 | "pkg-build": PackageBuildJob } | |
2740 | ||
2741 | R_FRESH = RX.compile(r"^(?:create|force|(\d+)(|[smhdw]))$") | |
2742 | ||
2743 | def parse_fresh(spec): | |
2744 | m = R_FRESH.match(spec) | |
2745 | if not m: raise ExpectedError("bad freshness `%s'" % spec) | |
2746 | if spec == "create": fresh = CREATE | |
2747 | elif spec == "force": fresh = FORCE | |
2748 | else: | |
2749 | n, u = int(m.group(1)), m.group(2) | |
2750 | if u == "" or u == "s": fresh = n | |
2751 | elif u == "m": fresh = 60*n | |
2752 | elif u == "h": fresh = 3600*n | |
2753 | elif u == "d": fresh = 86400*n | |
2754 | elif u == "w": fresh = 604800*n | |
2755 | else: assert False | |
2756 | return fresh | |
2757 | ||
2758 | with toplevel_handler(): | |
2759 | OPT, args = OPTIONS.parse_args() | |
2760 | rfd, wfd = -1, -1 | |
2761 | njobs = OPT.njobs | |
2762 | try: mkflags = OS.environ['MAKEFLAGS'] | |
2763 | except KeyError: pass | |
2764 | else: | |
2765 | ff = mkflags.split() | |
2766 | for f in ff: | |
2767 | if f == "--": break | |
2768 | m = R_JOBSERV.match(f) | |
2769 | if m: rfd, wfd = int(m.group(1)), int(m.group(2)) | |
2770 | elif f == '-j': njobs = None | |
2771 | elif not f.startswith('-'): | |
2772 | for ch in f: | |
2773 | if ch == 'i': OPT.ignerr = True | |
2774 | elif ch == 'k': OPT.keepon = True | |
2775 | elif ch == 'n': OPT.dryrun = True | |
2776 | elif ch == 's': OPT.silent = True | |
2777 | if OPT.njobs < 1: | |
2778 | raise ExpectedError("running no more than %d jobs is silly" % OPT.njobs) | |
2779 | ||
2780 | FRESH = parse_fresh(OPT.fresh) | |
2781 | ||
2782 | SCHED = JobScheduler(rfd, wfd, njobs) | |
2783 | OS.environ["http_proxy"] = C.PROXY | |
2784 | ||
2785 | jobs = [] | |
2786 | if not args: OPTIONS.print_usage(SYS.stderr); SYS.exit(2) | |
2787 | for arg in args: | |
2788 | try: sl = arg.index("/") | |
2789 | except ValueError: fresh = FRESH | |
2790 | else: arg, fresh = arg[:sl], parse_fresh(arg[sl + 1:]) | |
2791 | try: dot = arg.index(".") | |
2792 | except ValueError: jty, pats = arg, "*" | |
2793 | else: jty, pats = arg[:dot], arg[dot + 1:] | |
2794 | try: jcls = JOBMAP[jty] | |
2795 | except KeyError: raise ExpectedError("unknown job type `%s'" % jty) | |
2796 | specs = [] | |
2797 | for pat in pats.split(","): | |
2798 | any = False | |
2799 | for s in jcls.SPECS: | |
2800 | if FM.fnmatch(s, pat): specs.append(s); any = True | |
2801 | if not any: raise ExpectedError("no match for `%s'" % pat) | |
2802 | for s in specs: | |
2803 | jobs.append(jcls.ensure(s, fresh)) | |
2804 | ||
2805 | SCHED.run() | |
2806 | ||
2807 | SYS.exit(RC) | |
2808 | ||
2809 | ###----- That's all, folks -------------------------------------------------- |