Commit | Line | Data |
---|---|---|
a98c9dba MW |
1 | #! /usr/bin/python |
2 | ### | |
3 | ### Create, upgrade, and maintain (native and cross-) chroots | |
4 | ### | |
5 | ### (c) 2018 Mark Wooding | |
6 | ### | |
7 | ||
8 | ###----- Licensing notice --------------------------------------------------- | |
9 | ### | |
10 | ### This file is part of the distorted.org.uk chroot maintenance tools. | |
11 | ### | |
12 | ### distorted-chroot is free software: you can redistribute it and/or | |
13 | ### modify it under the terms of the GNU General Public License as | |
14 | ### published by the Free Software Foundation; either version 2 of the | |
15 | ### License, or (at your option) any later version. | |
16 | ### | |
17 | ### distorted-chroot is distributed in the hope that it will be useful, | |
18 | ### but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
20 | ### General Public License for more details. | |
21 | ### | |
22 | ### You should have received a copy of the GNU General Public License | |
23 | ### along with distorted-chroot. If not, write to the Free Software | |
24 | ### Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, | |
25 | ### USA. | |
26 | ||
27 | ## still to do: | |
28 | ## tidy up | |
29 | ||
30 | import contextlib as CTX | |
31 | import errno as E | |
32 | import fcntl as FC | |
33 | import fnmatch as FM | |
34 | import glob as GLOB | |
35 | import itertools as I | |
36 | import optparse as OP | |
37 | import os as OS | |
38 | import random as R | |
39 | import re as RX | |
40 | import signal as SIG | |
41 | import select as SEL | |
42 | import stat as ST | |
43 | from cStringIO import StringIO | |
44 | import sys as SYS | |
45 | import time as T | |
46 | import traceback as TB | |
47 | ||
48 | import jobclient as JC | |
49 | ||
50 | QUIS = OS.path.basename(SYS.argv[0]) | |
51 | TODAY = T.strftime("%Y-%m-%d") | |
52 | NOW = T.time() | |
53 | ||
54 | ###-------------------------------------------------------------------------- | |
55 | ### Random utilities. | |
56 | ||
57 | RC = 0 | |
58 | def moan(msg): | |
59 | """Print MSG to stderr as a warning.""" | |
60 | if not OPT.silent: OS.write(2, "%s: %s\n" % (QUIS, msg)) | |
61 | def error(msg): | |
62 | """Print MSG to stderr, and remember to exit nonzero.""" | |
63 | global RC | |
64 | moan(msg) | |
65 | RC = 2 | |
66 | ||
67 | class ExpectedError (Exception): | |
68 | """A fatal error which shouldn't print a backtrace.""" | |
69 | pass | |
70 | ||
71 | @CTX.contextmanager | |
72 | def toplevel_handler(): | |
73 | """Catch `ExpectedError's and report Unixish error messages.""" | |
74 | try: yield None | |
75 | except ExpectedError, err: moan(err); SYS.exit(2) | |
76 | ||
77 | def spew(msg): | |
78 | """Print MSG to stderr as a debug trace.""" | |
79 | if OPT.debug: OS.write(2, ";; %s\n" % msg) | |
80 | ||
81 | class Tag (object): | |
82 | """Unique objects with no internal structure.""" | |
83 | def __init__(me, label): me._label = label | |
84 | def __str__(me): return '#<%s %s>' % (me.__class__.__name__, me._label) | |
85 | def __repr__(me): return '#<%s %s>' % (me.__class__.__name__, me._label) | |
86 | ||
87 | class Struct (object): | |
88 | def __init__(me, **kw): me.__dict__.update(kw) | |
89 | ||
90 | class Cleanup (object): | |
91 | """ | |
92 | A context manager for stacking other context managers. | |
93 | ||
94 | By itself, it does nothing. Attach other context managers with `enter' or | |
95 | loose cleanup functions with `add'. On exit, contexts are left and | |
96 | cleanups performed in reverse order. | |
97 | """ | |
98 | def __init__(me): | |
99 | me._cleanups = [] | |
100 | def __enter__(me): | |
101 | return me | |
102 | def __exit__(me, exty, exval, extb): | |
103 | trap = False | |
104 | for c in reversed(me._cleanups): | |
105 | if c(exty, exval, extb): trap = True | |
106 | return trap | |
107 | def enter(me, ctx): | |
108 | v = ctx.__enter__() | |
109 | me._cleanups.append(ctx.__exit__) | |
110 | return v | |
111 | def add(me, func): | |
112 | me._cleanups.append(lambda exty, exval, extb: func()) | |
113 | ||
114 | def zulu(t = None): | |
115 | """Return the time T (default now) as a string.""" | |
116 | return T.strftime("%Y-%m-%dT%H:%M:%SZ", T.gmtime(t)) | |
117 | ||
118 | R_ZULU = RX.compile(r"^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z$") | |
119 | def unzulu(z): | |
120 | """Convert the time string Z back to a Unix time.""" | |
121 | m = R_ZULU.match(z) | |
122 | if not m: raise ValueError("bad time spec `%s'" % z) | |
123 | yr, mo, dy, hr, mi, se = map(int, m.groups()) | |
124 | return T.mktime((yr, mo, dy, hr, mi, se, 0, 0, 0)) | |
125 | ||
126 | ###-------------------------------------------------------------------------- | |
127 | ### Simple select(2) utilities. | |
128 | ||
129 | class BaseSelector (object): | |
130 | """ | |
131 | A base class for hooking into `select_loop'. | |
132 | ||
133 | See `select_loop' for details of the protocol. | |
134 | """ | |
135 | def preselect(me, rfds, wfds): pass | |
136 | def postselect_read(me, fd): pass | |
137 | def postselect_write(me, fd): pass | |
138 | ||
139 | class WriteLinesSelector (BaseSelector): | |
140 | """Write whole lines to an output file descriptor.""" | |
141 | ||
142 | def __init__(me, fd, nextfn = None, *args, **kw): | |
143 | """ | |
144 | Initialize the WriteLinesSelector to write to the file descriptor FD. | |
145 | ||
146 | The FD is marked non-blocking. | |
147 | ||
148 | The lines are produced by the NEXTFN, which is called without arguments. | |
149 | It can affect the output in three ways: | |
150 | ||
151 | * It can return a string (or almost any other kind of object, which | |
152 | will be converted into a string by `str'), which will be written to | |
153 | the descriptor followed by a newline. Lines are written in the order | |
154 | in which they are produced. | |
155 | ||
156 | * It can return `None', which indicates that there are no more items to | |
157 | be written for the moment. The function will be called again from | |
158 | time to time, to see if it has changed its mind. This is the right | |
159 | thing to do in order to stall output temporarily. | |
160 | ||
161 | * It can raise `StopIteration', which indicates that there will never | |
162 | be any more items. The file descriptor will be closed. | |
163 | ||
164 | Subclasses can override this behaviour by defining a method `_next' and | |
165 | passing `None' as the NEXTFN. | |
166 | """ | |
167 | super(WriteLinesSelector, me).__init__(*args, **kw) | |
168 | set_nonblocking(fd) | |
169 | me._fd = fd | |
170 | if nextfn is not None: me._next = nextfn | |
171 | ||
172 | ## Selector state. | |
173 | ## | |
174 | ## * `_buf' contains a number of output items, already formatted, and | |
175 | ## ready for output in a single batch. It might be empty. | |
176 | ## | |
177 | ## * `_pos' is the current output position in `_buf'. | |
178 | ## | |
179 | ## * `_more' is set unless the `_next' function has raised | |
180 | ## `StopIteration': it indicates that we should close the descriptor | |
181 | ## once the all of the remaining data in the buffer has been sent. | |
182 | me._buf = "" | |
183 | me._pos = 0 | |
184 | me._more = True | |
185 | ||
186 | def _refill(me): | |
187 | """Refill `_buf' by calling `_next'.""" | |
188 | sio = StringIO(); n = 0 | |
189 | while n < 4096: | |
190 | try: item = me._next() | |
191 | except StopIteration: me._more = False; break | |
192 | if item is None: break | |
193 | item = str(item) | |
194 | sio.write(item); sio.write("\n"); n += len(item) + 1 | |
195 | me._buf = sio.getvalue(); me._pos = 0 | |
196 | ||
197 | def preselect(me, rfds, wfds): | |
198 | if me._fd == -1: return | |
199 | if me._buf == "" and me._more: me._refill() | |
200 | if me._buf != "" or not me._more: wfds.append(me._fd) | |
201 | ||
202 | def postselect_write(me, fd): | |
203 | if fd != me._fd: return | |
204 | while True: | |
205 | if me._pos >= len(me._buf): | |
206 | if me._more: me._refill() | |
207 | if not me._more: OS.close(me._fd); me._fd = -1; break | |
208 | if not me._buf: break | |
209 | try: n = OS.write(me._fd, me._buf[me._pos:]) | |
210 | except OSError, err: | |
211 | if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break | |
212 | elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; break | |
213 | else: raise | |
214 | me._pos += n | |
215 | ||
216 | class ReadLinesSelector (BaseSelector): | |
217 | """Report whole lines from an input file descriptor as they arrive.""" | |
218 | ||
219 | def __init__(me, fd, linefn = None, *args, **kw): | |
220 | """ | |
221 | Initialize the ReadLinesSelector to read from the file descriptor FD. | |
222 | ||
223 | The FD is marked non-blocking. | |
224 | ||
225 | For each whole line, and the final partial line (if any), the selector | |
226 | calls LINEFN with the line as an argument (without the terminating | |
227 | newline, if any). | |
228 | ||
229 | Subclasses can override this behaviour by defining a method `_line' and | |
230 | passing `None' as the LINEFN. | |
231 | """ | |
232 | super(ReadLinesSelector, me).__init__(*args, **kw) | |
233 | set_nonblocking(fd) | |
234 | me._fd = fd | |
235 | me._buf = "" | |
236 | if linefn is not None: me._line = linefn | |
237 | ||
238 | def preselect(me, rfds, wfds): | |
239 | if me._fd != -1: rfds.append(me._fd) | |
240 | ||
241 | def postselect_read(me, fd): | |
242 | if fd != me._fd: return | |
243 | while True: | |
244 | try: buf = OS.read(me._fd, 4096) | |
245 | except OSError, err: | |
246 | if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break | |
247 | else: raise | |
248 | if buf == "": | |
249 | OS.close(me._fd); me._fd = -1 | |
250 | if me._buf: me._line(me._buf) | |
251 | break | |
252 | buf = me._buf + buf | |
253 | i = 0 | |
254 | while True: | |
255 | try: j = buf.index("\n", i) | |
256 | except ValueError: break | |
257 | me._line(buf[i:j]) | |
258 | i = j + 1 | |
259 | me._buf = buf[i:] | |
260 | ||
261 | def select_loop(selectors): | |
262 | """ | |
263 | Multiplex I/O between the various SELECTORS. | |
264 | ||
265 | A `selector' SEL is an object which implements the selector protocol, which | |
266 | consists of three methods. | |
267 | ||
268 | * SEL.preselect(RFDS, WFDS) -- add any file descriptors which the | |
269 | selector is interested in reading from to the list RFDS, and add file | |
270 | descriptors it's interested in writing to to the list WFDS. | |
271 | ||
272 | * SEL.postselect_read(FD) -- informs the selector that FD is ready for | |
273 | reading. | |
274 | ||
275 | * SEL.postselect_write(FD) -- informs the selector that FD is ready for | |
276 | writing. | |
277 | ||
278 | The `select_loop' function loops as follows. | |
279 | ||
280 | * It calls the `preselect' method on each SELECTOR to determine what I/O | |
281 | events it thinks are interesting. | |
282 | ||
283 | * It waits for some interesting event to happen. | |
284 | ||
285 | * It calls the `postselect_read' and/or `postselect_write' methods on all | |
286 | of the selectors for each file descriptor which is ready. | |
287 | ||
288 | The loop ends when no selector is interested in any events. This is simple | |
289 | but rather inefficient. | |
290 | """ | |
291 | while True: | |
292 | rfds, wfds = [], [] | |
293 | for sel in selectors: sel.preselect(rfds, wfds) | |
294 | if not rfds and not wfds: break | |
295 | rfds, wfds, _ = SEL.select(rfds, wfds, []) | |
296 | for fd in rfds: | |
297 | for sel in selectors: sel.postselect_read(fd) | |
298 | for fd in wfds: | |
299 | for sel in selectors: sel.postselect_write(fd) | |
300 | ||
301 | ###-------------------------------------------------------------------------- | |
302 | ### Running subprocesses. | |
303 | ||
304 | def wait_outcome(st): | |
305 | """ | |
306 | Given a ST from `waitpid' (or similar), return a human-readable outcome. | |
307 | """ | |
308 | if OS.WIFSIGNALED(st): return "killed by signal %d" % OS.WTERMSIG(st) | |
309 | elif OS.WIFEXITED(st): | |
310 | rc = OS.WEXITSTATUS(st) | |
311 | if rc: return "failed: rc = %d" % rc | |
312 | else: return "completed successfully" | |
313 | else: return "died with incomprehensible status 0x%04x" % st | |
314 | ||
315 | class SubprocessFailure (Exception): | |
316 | """An exception indicating that a subprocess failed.""" | |
317 | def __init__(me, what, st): | |
318 | me.st = st | |
319 | me.what = what | |
320 | if OS.WIFEXITED(st): me.rc, me.sig = OS.WEXITSTATUS(st), None | |
321 | elif OS.WIFSIGNALED(st): me.rc, me.sig = None, OS.WTERMSIG(st) | |
322 | else: me.rc, me.sig = None, None | |
323 | def __str__(me): | |
324 | return "subprocess `%s' %s" % (me.what, wait_outcome(me.st)) | |
325 | ||
326 | INHERIT = Tag('INHERIT') | |
327 | PIPE = Tag('PIPE') | |
328 | DISCARD = Tag('DISCARD') | |
329 | @CTX.contextmanager | |
330 | def subprocess(command, | |
331 | stdin = INHERIT, stdout = INHERIT, stderr = INHERIT, | |
332 | cwd = INHERIT, jobserver = DISCARD): | |
333 | """ | |
334 | Hairy context manager for running subprocesses. | |
335 | ||
336 | The COMMAND is a list of arguments; COMMAND[0] names the program to be | |
337 | invoked. (There's currently no way to run a program with an unusual | |
338 | `argv[0]'.) | |
339 | ||
340 | The keyword arguments `stdin', `stdout', and `stderr' explain what to do | |
341 | with the standard file descriptors. | |
342 | ||
343 | * `INHERIT' means that they should be left alone: the child will use a | |
344 | copy of the parent's descriptor. This is the default. | |
345 | ||
346 | * `DISCARD' means that the descriptor should be re-opened onto | |
347 | `/dev/null' (for reading or writing as appropriate). | |
348 | ||
349 | * `PIPE' means that the descriptor should be re-opened as (the read or | |
350 | write end, as appropriate, of) a pipe, and the other end returned to | |
351 | the context body. | |
352 | ||
353 | Simiarly, the JOBSERVER may be `INHERIT' to pass the jobserver descriptors | |
354 | and environment variable down to the child, or `DISCARD' to close it. The | |
355 | default is `DISCARD'. | |
356 | ||
357 | The CWD may be `INHERIT' to run the child with the same working directory | |
358 | as the parent, or a pathname to change to an explicitly given working | |
359 | directory. | |
360 | ||
361 | The context is returned three values, which are file descriptors for other | |
362 | pipe ends for stdin, stdout, and stderr respectively, or -1 if there is no | |
363 | pipe. | |
364 | ||
365 | The context owns the pipe descriptors, and is expected to close them | |
366 | itself. (Timing of closure is significant, particularly for `stdin'.) | |
367 | """ | |
368 | ||
369 | ## Set up. | |
370 | r_in, w_in = -1, -1 | |
371 | r_out, w_out = -1, -1 | |
372 | r_err, w_err = -1, -1 | |
373 | spew("running subprocess `%s'" % " ".join(command)) | |
374 | ||
375 | ## Clean up as necessary... | |
376 | try: | |
377 | ||
378 | ## Set up stdin. | |
379 | if stdin is PIPE: r_in, w_in = OS.pipe() | |
380 | elif stdin is DISCARD: r_in = OS.open("/dev/null", OS.O_RDONLY) | |
381 | elif stdin is not INHERIT: | |
382 | raise ValueError("bad `stdin' value `%r'" % stdin) | |
383 | ||
384 | ## Set up stdout. | |
385 | if stdout is PIPE: r_out, w_out = OS.pipe() | |
386 | elif stdout is DISCARD: w_out = OS.open("/dev/null", OS.O_WRONLY) | |
387 | elif stdout is not INHERIT: | |
388 | raise ValueError("bad `stderr' value `%r'" % stdout) | |
389 | ||
390 | ## Set up stderr. | |
391 | if stderr is PIPE: r_err, w_err = OS.pipe() | |
392 | elif stderr is DISCARD: w_err = OS.open("/dev/null", OS.O_WRONLY) | |
393 | elif stderr is not INHERIT: | |
394 | raise ValueError("bad `stderr' value `%r'" % stderr) | |
395 | ||
396 | ## Start up the child. | |
397 | kid = OS.fork() | |
398 | ||
399 | if kid == 0: | |
400 | ## Child process. | |
401 | ||
402 | ## Fix up stdin. | |
403 | if r_in != -1: OS.dup2(r_in, 0); OS.close(r_in) | |
404 | if w_in != -1: OS.close(w_in) | |
405 | ||
406 | ## Fix up stdout. | |
407 | if w_out != -1: OS.dup2(w_out, 1); OS.close(w_out) | |
408 | if r_out != -1: OS.close(r_out) | |
409 | ||
410 | ## Fix up stderr. | |
411 | if w_err != -1: OS.dup2(w_err, 2); OS.close(w_err) | |
412 | if r_err != -1: OS.close(r_err) | |
413 | ||
414 | ## Change directory. | |
415 | if cwd is not INHERIT: OS.chdir(cwd) | |
416 | ||
417 | ## Fix up the jobserver. | |
418 | if jobserver is DISCARD: SCHED.close_jobserver() | |
419 | ||
420 | ## Run the program. | |
421 | try: OS.execvp(command[0], command) | |
422 | except OSError, err: | |
423 | moan("failed to run `%s': %s" % err.strerror) | |
424 | OS._exit(127) | |
425 | ||
426 | ## Close the other ends of the pipes. | |
427 | if r_in != -1: OS.close(r_in); r_in = -1 | |
428 | if w_out != -1: OS.close(w_out); w_out = -1 | |
429 | if w_err != -1: OS.close(w_err); w_err = -1 | |
430 | ||
431 | ## Return control to the context body. Remember not to close its pipes. | |
432 | yield w_in, r_out, r_err | |
433 | w_in = r_out = r_err = -1 | |
434 | ||
435 | ## Collect the child process's exit status. | |
436 | _, st = OS.waitpid(kid, 0) | |
437 | spew("subprocess `%s' %s" % (" ".join(command), wait_outcome(st))) | |
438 | if st: raise SubprocessFailure(" ".join(command), st) | |
439 | ||
440 | ## Tidy up. | |
441 | finally: | |
442 | ||
443 | ## Close any left-over file descriptors. | |
444 | for fd in [r_in, w_in, r_out, w_out, r_err, w_err]: | |
445 | if fd != -1: OS.close(fd) | |
446 | ||
447 | def set_nonblocking(fd): | |
448 | """Mark the descriptor FD as non-blocking.""" | |
449 | FC.fcntl(fd, FC.F_SETFL, FC.fcntl(fd, FC.F_GETFL) | OS.O_NONBLOCK) | |
450 | ||
451 | class DribbleOut (BaseSelector): | |
452 | """A simple selector to feed a string to a descriptor, in pieces.""" | |
453 | def __init__(me, fd, string, *args, **kw): | |
454 | super(DribbleOut, me).__init__(*args, **kw) | |
455 | me._fd = fd | |
456 | me._string = string | |
457 | me._i = 0 | |
458 | set_nonblocking(me._fd) | |
459 | me.result = None | |
460 | def preselect(me, rfds, wfds): | |
461 | if me._fd != -1: wfds.append(me._fd) | |
462 | def postselect_write(me, fd): | |
463 | if fd != me._fd: return | |
464 | try: n = OS.write(me._fd, me._string) | |
465 | except OSError, err: | |
466 | if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: return | |
467 | elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; return | |
468 | else: raise | |
469 | if n == len(me._string): OS.close(me._fd); me._fd = -1 | |
470 | else: me._string = me._string[n:] | |
471 | ||
472 | class DribbleIn (BaseSelector): | |
473 | """A simple selector to collect all the input as a big string.""" | |
474 | def __init__(me, fd, *args, **kw): | |
475 | super(DribbleIn, me).__init__(*args, **kw) | |
476 | me._fd = fd | |
477 | me._buf = StringIO() | |
478 | set_nonblocking(me._fd) | |
479 | def preselect(me, rfds, wfds): | |
480 | if me._fd != -1: rfds.append(me._fd) | |
481 | def postselect_read(me, fd): | |
482 | if fd != me._fd: return | |
483 | while True: | |
484 | try: buf = OS.read(me._fd, 4096) | |
485 | except OSError, err: | |
486 | if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: break | |
487 | else: raise | |
488 | if buf == "": OS.close(me._fd); me._fd = -1; break | |
489 | else: me._buf.write(buf) | |
490 | @property | |
491 | def result(me): return me._buf.getvalue() | |
492 | ||
493 | RETURN = Tag('RETURN') | |
494 | def run_program(command, | |
495 | stdin = INHERIT, stdout = INHERIT, stderr = INHERIT, | |
496 | *args, **kwargs): | |
497 | """ | |
498 | A simplifying wrapper around `subprocess'. | |
499 | ||
500 | The COMMAND is a list of arguments; COMMAND[0] names the program to be | |
501 | invoked, as for `subprocess'. | |
502 | ||
503 | The keyword arguments `stdin', `stdout', and `stderr' explain what to do | |
504 | with the standard file descriptors. | |
505 | ||
506 | * `INHERIT' means that they should be left alone: the child will use a | |
507 | copy of the parent's descriptor. | |
508 | ||
509 | * `DISCARD' means that the descriptor should be re-opened onto | |
510 | `/dev/null' (for reading or writing as appropriate). | |
511 | ||
512 | * `RETURN', for an output descriptor, means that all of the output | |
513 | produced on that descriptor should be collected and returned as a | |
514 | string. | |
515 | ||
516 | * A string, for stdin, means that the string should be provided on the | |
517 | child's standard input. | |
518 | ||
519 | (The value `PIPE' is not permitted here.) | |
520 | ||
521 | Other arguments are passed on to `subprocess'. | |
522 | ||
523 | If no descriptors are marked `RETURN', then the function returns `None'; if | |
524 | exactly one descriptor is so marked, then the function returns that | |
525 | descriptor's output as a string; otherwise, it returns a tuple of strings | |
526 | for each such descriptor, in the usual order. | |
527 | """ | |
528 | kw = dict(); kw.update(kwargs) | |
529 | selfn = [] | |
530 | ||
531 | if isinstance(stdin, basestring): | |
532 | kw['stdin'] = PIPE; selfn.append(lambda fds: DribbleOut(fds[0], stdin)) | |
533 | elif stdin is INHERIT or stdin is DISCARD: | |
534 | kw['stdin'] = stdin | |
535 | else: | |
536 | raise ValueError("bad `stdin' value `%r'" % stdin) | |
537 | ||
538 | if stdout is RETURN: | |
539 | kw['stdout'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[1])) | |
540 | elif stdout is INHERIT or stdout is DISCARD: | |
541 | kw['stdout'] = stdout | |
542 | else: | |
543 | raise ValueError("bad `stdout' value `%r'" % stdout) | |
544 | ||
545 | if stderr is RETURN: | |
546 | kw['stderr'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[2])) | |
547 | elif stderr is INHERIT or stderr is DISCARD: | |
548 | kw['stderr'] = stderr | |
549 | else: | |
550 | raise ValueError("bad `stderr' value `%r'" % stderr) | |
551 | ||
552 | with subprocess(command, *args, **kw) as fds: | |
553 | sel = [fn(fds) for fn in selfn] | |
554 | select_loop(sel) | |
555 | rr = [] | |
556 | for s in sel: | |
557 | r = s.result | |
558 | if r is not None: rr.append(r) | |
559 | if len(rr) == 0: return None | |
560 | if len(rr) == 1: return rr[0] | |
561 | else: return tuple(rr) | |
562 | ||
563 | ###-------------------------------------------------------------------------- | |
564 | ### Other system-ish utilities. | |
565 | ||
566 | @CTX.contextmanager | |
567 | def safewrite(path): | |
568 | """ | |
569 | Context manager for writing to a file. | |
570 | ||
571 | A new file, named `PATH.new', is opened for writing, and the file object | |
572 | provided to the context body. If the body completes normally, the file is | |
573 | closed and renamed to PATH. If the body raises an exception, the file is | |
574 | still closed, but not renamed into place. | |
575 | """ | |
576 | new = path + ".new" | |
577 | with open(new, "w") as f: yield f | |
578 | OS.rename(new, path) | |
579 | ||
580 | @CTX.contextmanager | |
581 | def safewrite_root(path, mode = None, uid = None, gid = None): | |
582 | """ | |
583 | Context manager for writing to a file with root privileges. | |
584 | ||
585 | This is as for `safewrite', but the file is opened and written as root. | |
586 | """ | |
587 | new = path + ".new" | |
588 | with subprocess(C.ROOTLY + ["tee", new], | |
589 | stdin = PIPE, stdout = DISCARD) as (fd_in, _, _): | |
590 | pipe = OS.fdopen(fd_in, 'w') | |
591 | try: yield pipe | |
592 | finally: pipe.close() | |
593 | if mode is not None: run_program(C.ROOTLY + ["chmod", mode, new]) | |
594 | if uid is not None: | |
595 | run_program(C.ROOTLY + ["chown", | |
596 | uid + (gid is not None and ":" + gid or ""), | |
597 | new]) | |
598 | elif gid is not None: | |
599 | run_program(C.ROOTLY + ["chgrp", gid, new]) | |
600 | run_program(C.ROOTLY + ["mv", new, path]) | |
601 | ||
602 | def mountpoint_p(dir): | |
603 | """Return true if DIR is a mountpoint.""" | |
604 | ||
605 | ## A mountpoint can be distinguished because it is a directory whose device | |
606 | ## number differs from its parent. | |
607 | try: st1 = OS.stat(dir) | |
608 | except OSError, err: | |
609 | if err.errno == E.ENOENT: return False | |
610 | else: raise | |
611 | if not ST.S_ISDIR(st1.st_mode): return False | |
612 | st0 = OS.stat(OS.path.join(dir, "..")) | |
613 | return st0.st_dev != st1.st_dev | |
614 | ||
615 | def mkdir_p(dir, mode = 0777): | |
616 | """ | |
617 | Make a directory DIR, and any parents, as necessary. | |
618 | ||
619 | Unlike `OS.makedirs', this doesn't fail if DIR already exists. | |
620 | """ | |
1633cd4f MW |
621 | if dir.startswith("/"): d = "/"; dir = dir[1:] |
622 | else: d = "" | |
a98c9dba MW |
623 | for p in dir.split("/"): |
624 | d = OS.path.join(d, p) | |
625 | if d == "": continue | |
626 | try: OS.mkdir(d, mode) | |
627 | except OSError, err: | |
628 | if err.errno == E.EEXIST: pass | |
629 | else: raise | |
630 | ||
631 | def umount(fs): | |
632 | """ | |
633 | Unmount the filesystem FS. | |
634 | ||
635 | The FS may be the block device holding the filesystem, or (more usually) | |
636 | the mount point. | |
637 | """ | |
638 | ||
639 | ## Sometimes random things can prevent unmounting. Be persistent. | |
640 | for i in xrange(5): | |
641 | try: run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD) | |
642 | except SubprocessFailure, err: | |
643 | if err.rc == 32: pass | |
644 | else: raise | |
645 | else: return | |
646 | T.sleep(0.2) | |
647 | run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD) | |
648 | ||
649 | @CTX.contextmanager | |
650 | def lockfile(lock, exclp = True, waitp = True): | |
651 | """ | |
652 | Acquire an exclusive lock on a named file LOCK while executing the body. | |
653 | ||
654 | If WAITP is true, wait until the lock is available; if false, then fail | |
655 | immediately if the lock can't be acquired. | |
656 | """ | |
657 | fd = -1 | |
658 | flag = 0 | |
659 | if exclp: flag |= FC.LOCK_EX | |
660 | else: flag |= FC.LOCK_SH | |
661 | if not waitp: flag |= FC.LOCK_NB | |
662 | spew("acquiring %s lock on `%s'" % | |
663 | (exclp and "exclusive" or "shared", lock)) | |
664 | try: | |
665 | while True: | |
666 | ||
667 | ## Open the file and take note of which file it is. | |
668 | fd = OS.open(lock, OS.O_RDWR | OS.O_CREAT, 0666) | |
669 | st0 = OS.fstat(fd) | |
670 | ||
671 | ## Acquire the lock, waiting if necessary. | |
672 | FC.lockf(fd, flag) | |
673 | ||
674 | ## Check that the lock file is still the same one. It's permissible | |
675 | ## for the lock holder to release the lock by unlinking or renaming the | |
676 | ## lock file, in which case there might be a different lockfile there | |
677 | ## now which we need to acquire instead. | |
678 | ## | |
679 | ## It's tempting to `optimize' this code by opening a new file | |
680 | ## descriptor here so as to elide the additional call to fstat(2) | |
681 | ## above. But this doesn't work: if we successfully acquire the lock, | |
682 | ## we then have two file descriptors open on the lock file, so we have | |
683 | ## to close one -- but, under the daft fcntl(2) rules, even closing | |
684 | ## `nfd' will release the lock immediately. | |
685 | try: | |
686 | st1 = OS.stat(lock) | |
687 | except OSError, err: | |
688 | if err.errno == E.ENOENT: pass | |
689 | else: raise | |
690 | if st0.st_dev == st1.st_dev and st0.st_ino == st1.st_ino: break | |
691 | OS.close(fd) | |
692 | ||
693 | ## We have the lock, so away we go. | |
694 | spew("lock `%s' acquired" % lock) | |
695 | yield None | |
696 | spew("lock `%s' released" % lock) | |
697 | ||
698 | finally: | |
699 | if fd != -1: OS.close(fd) | |
700 | ||
701 | def block_device_p(dev): | |
702 | """Return true if DEV names a block device.""" | |
703 | try: st = OS.stat(dev) | |
704 | except OSError, err: | |
705 | if err.errno == E.ENOENT: return False | |
706 | else: raise | |
707 | else: return ST.S_ISBLK(st.st_mode) | |
708 | ||
709 | ###-------------------------------------------------------------------------- | |
710 | ### Running parallel jobs. | |
711 | ||
712 | ## Return codes from `check' | |
713 | SLEEP = Tag('SLEEP') | |
714 | READY = Tag('READY') | |
715 | FAILED = Tag('FAILED') | |
716 | DONE = Tag('DONE') | |
717 | ||
718 | class BaseJob (object): | |
719 | """ | |
720 | Base class for jobs. | |
721 | ||
722 | Subclasses must implement `run' and `_mkname', and probably ought to extend | |
723 | `prepare' and `check'. | |
724 | """ | |
725 | ||
726 | ## A magic token to prevent sneaky uninterned jobs. | |
727 | _MAGIC = Tag('MAGIC') | |
728 | ||
729 | ## A map from job names to objects. | |
730 | _MAP = {} | |
731 | ||
732 | ## Number of tail lines of the log to print on failure. | |
733 | LOGLINES = 20 | |
734 | ||
735 | def __init__(me, _token, *args, **kw): | |
736 | """ | |
737 | Initialize a job. | |
738 | ||
739 | Jobs are interned! Don't construct instances (of subclasses) directly: | |
740 | use the `ensure' class method. | |
741 | """ | |
742 | assert _token is me._MAGIC | |
743 | super(BaseJob, me).__init__(*args, **kw) | |
744 | ||
745 | ## Dependencies on other jobs. | |
746 | me._deps = None | |
747 | me._waiting = set() | |
748 | ||
749 | ## Attributes maintained by the JobServer. | |
750 | me.done = False | |
751 | me.started = False | |
752 | me.win = None | |
753 | me._token = None | |
754 | me._known = False | |
755 | me._st = None | |
756 | me._logkid = -1 | |
757 | me._logfile = None | |
758 | ||
759 | def prepare(me): | |
760 | """ | |
761 | Establish any prerequisite jobs. | |
762 | ||
763 | Delaying this allows command-line settings to override those chosen by | |
764 | dependent jobs. | |
765 | """ | |
766 | pass | |
767 | ||
768 | @classmethod | |
769 | def ensure(cls, *args, **kw): | |
770 | """ | |
771 | Return the unique job with the given parameters. | |
772 | ||
773 | If a matching job already exists, then return it. Otherwise, create the | |
774 | new job, register it in the table, and notify the scheduler about it. | |
775 | """ | |
776 | me = cls(_token = cls._MAGIC, *args, **kw) | |
777 | try: | |
778 | job = cls._MAP[me.name] | |
779 | except KeyError: | |
780 | cls._MAP[me.name] = me | |
781 | SCHED.add(me) | |
782 | return me | |
783 | else: | |
784 | return job | |
785 | ||
786 | ## Naming. | |
787 | @property | |
788 | def name(me): | |
789 | """Return the job's name, as calculated by `_mkname'.""" | |
790 | try: name = me._name | |
791 | except AttributeError: name = me._name = me._mkname() | |
792 | return name | |
793 | ||
794 | ## Subclass responsibilities. | |
795 | def _mkname(me): | |
796 | """ | |
797 | Return the job's name. | |
798 | ||
799 | By default, this is an unhelpful string which is distinct for every job. | |
800 | Subclasses should normally override this method to return a name as an | |
801 | injective function of the job parameters. | |
802 | """ | |
803 | return "%s.%x" % (me.__class__.__name__, id(me)) | |
804 | ||
805 | def check(me): | |
806 | """ | |
807 | Return whether the job is ready to run. | |
808 | ||
809 | Returns a pair STATE, REASON. The REASON is a human-readable string | |
810 | explaining what's going on, or `None' if it's not worth explaining. The | |
811 | STATE is one of the following. | |
812 | ||
813 | * `READY' -- the job can be run at any time. | |
814 | ||
815 | * `FAILED' -- the job can't be started. Usually, this means that some | |
816 | prerequisite job failed, there was some error in the job's | |
817 | parameters, or the environment is unsuitable for the job to run. | |
818 | ||
819 | * `DONE' -- the job has nothing to do. Usually, this means that the | |
820 | thing the job acts on is already up-to-date. It's bad form to do | |
821 | even minor work in `check'. | |
822 | ||
823 | * `SLEEP' -- the job can't be run right now. It has arranged to be | |
824 | retried if conditions change. (Spurious wakeups are permitted and | |
825 | must be handled correctly.) | |
826 | ||
827 | The default behaviour checks the set of dependencies, as built by the | |
828 | `await' method, and returns `SLEEP' or `FAILED' as appropriate, or | |
829 | `READY' if all the prerequisite jobs have completed successfully. | |
830 | """ | |
831 | for job in me._deps: | |
832 | if not job.done: | |
833 | job._waiting.add(me) | |
834 | return SLEEP, "waiting for job `%s'" % job.name | |
835 | elif not job.win and not OPT.ignerr: | |
836 | return FAILED, "dependent on failed job `%s'" % job.name | |
837 | return READY, None | |
838 | ||
839 | ## Subclass utilities. | |
840 | def await(me, job): | |
841 | """Make sure that JOB completes before allowing this job to start.""" | |
842 | me._deps.add(job) | |
843 | ||
844 | def _logtail(me): | |
845 | """ | |
846 | Dump the last `LOGLINES' lines of the logfile. | |
847 | ||
848 | This is called if the job fails and was being run quietly, to provide the | |
849 | user with some context for the failure. | |
850 | """ | |
851 | ||
852 | ## Gather blocks from the end of the log until we have enough lines. | |
853 | with open(me._logfile, 'r') as f: | |
854 | nlines = 0 | |
855 | bufs = [] | |
856 | bufsz = 4096 | |
857 | f.seek(0, 2); off = f.tell() | |
858 | spew("start: off = %d" % off) | |
859 | while nlines <= me.LOGLINES and off > 0: | |
860 | off = max(0, off - bufsz) | |
861 | f.seek(off, 0) | |
862 | spew("try at off = %d" % off) | |
863 | buf = f.read(bufsz) | |
864 | nlines += buf.count("\n") | |
865 | spew("now lines = %d" % nlines) | |
866 | bufs.append(buf) | |
867 | buf = ''.join(reversed(bufs)) | |
868 | ||
869 | ## We probably overshot. Skip the extra lines from the start. | |
870 | i = 0 | |
871 | while nlines > me.LOGLINES: i = buf.index("\n", i) + 1; nlines -= 1 | |
872 | ||
873 | ## If we ended up trimming the log, print an ellipsis. | |
874 | if off > 0 or i > 0: print "%-*s * [...]" % (TAGWD, me.name) | |
875 | ||
876 | ## Print the log tail. | |
877 | lines = buf[i:].split("\n") | |
878 | if lines and lines[-1] == '': lines.pop() | |
879 | for line in lines: print "%-*s %s" % (TAGWD, me.name, line) | |
880 | ||
881 | class BaseJobToken (object): | |
882 | """ | |
883 | A job token is the authorization for a job to be run. | |
884 | ||
885 | Subclasses must implement `recycle' to allow some other job to use the | |
886 | token. | |
887 | """ | |
888 | pass | |
889 | ||
890 | class TrivialJobToken (BaseJobToken): | |
891 | """ | |
892 | A trivial reusable token, for when issuing jobs in parallel without limit. | |
893 | ||
894 | There only needs to be one of these. | |
895 | """ | |
896 | def recycle(me): | |
897 | spew("no token needed; nothing to recycle") | |
898 | TRIVIAL_TOKEN = TrivialJobToken() | |
899 | ||
900 | class JobServerToken (BaseJobToken): | |
901 | """A job token storing a byte from the jobserver pipe.""" | |
902 | def __init__(me, char, pipefd, *args, **kw): | |
903 | super(JobServerToken, me).__init__(*args, **kw) | |
904 | me._char = char | |
905 | me._fd = pipefd | |
906 | def recycle(me): | |
907 | spew("returning token to jobserver pipe") | |
908 | OS.write(me._fd, me._char) | |
909 | ||
910 | class PrivateJobToken (BaseJobToken): | |
911 | """ | |
912 | The private job token belonging to a scheduler. | |
913 | ||
914 | When running under a GNU Make jobserver, there is a token for each byte in | |
915 | the pipe, and an additional one which represents the slot we're actually | |
916 | running in. This class represents that additional token. | |
917 | """ | |
918 | def __init__(me, sched, *args, **kw): | |
919 | super(PrivateJobToken, me).__init__(*args, **kw) | |
920 | me._sched = sched | |
921 | def recycle(me): | |
922 | assert me._sched._privtoken is None | |
923 | spew("recycling private token") | |
924 | me._sched._privtoken = me | |
925 | ||
926 | TAGWD = 29 | |
927 | LOGKEEP = 20 | |
928 | ||
929 | class JobScheduler (object): | |
930 | """ | |
931 | The main machinery for running and ordering jobs. | |
932 | ||
933 | This handles all of the details of job scheduling. | |
934 | """ | |
935 | ||
936 | def __init__(me, rfd = -1, wfd = -1, npar = 1): | |
937 | """ | |
938 | Initialize a scheduler. | |
939 | ||
940 | * RFD and WFD are the read and write ends of the jobserver pipe, as | |
941 | determined from the `MAKEFLAGS' environment variable, or -1. | |
942 | ||
943 | * NPAR is the maximum number of jobs to run in parallel, or `True' if | |
944 | there is no maximum (i.e., we're in `forkbomb' mode). | |
945 | """ | |
946 | ||
947 | ## Set the parallelism state. The `_rfd' and `_wfd' are the read and | |
948 | ## write ends of the jobserver pipe, or -1 if there is no jobserver. | |
949 | ## `_par' is true if we're meant to run jobs in parallel. The case _par | |
950 | ## and _rfd = -1 means unconstrained parallelism. | |
951 | ## | |
952 | ## The jobserver pipe contains a byte for each shared job slot. A | |
953 | ## scheduler reads a byte from the pipe for each job it wants to run | |
954 | ## (nearly -- see `_privtoken' below), and puts the byte back when the | |
955 | ## job finishes. The GNU Make jobserver protocol specification insists | |
956 | ## that we preserve the value of the byte in the pipe (though doesn't | |
957 | ## currently make any use of this flexibility), so we record it in a | |
958 | ## `JobToken' object's `_char' attribute. | |
959 | me._par = rfd != -1 or npar is True or npar != 1 | |
960 | spew("par is %r" % me._par) | |
961 | if rfd == -1 and npar > 1: | |
962 | rfd, wfd = OS.pipe() | |
963 | OS.write(wfd, (npar - 1)*'+') | |
964 | OS.environ["MAKEFLAGS"] = \ | |
965 | (" -j --jobserver-auth=%(rfd)d,%(wfd)d " + | |
966 | "--jobserver-fds=%(rfd)d,%(wfd)d") % dict(rfd = rfd, wfd = wfd) | |
967 | me._rfd = rfd; me._wfd = wfd | |
968 | ||
969 | ## The scheduler state. A job starts in the `_check' list. Each | |
970 | ## iteration of the scheduler loop will inspect the jobs here and see | |
971 | ## whether it's ready to run: if not, it gets put in the `_sleep' list, | |
972 | ## where it will languish until something moves it back; if it is ready, | |
973 | ## it gets moved to the `_ready' list to wait for a token from the | |
974 | ## jobserver. At that point the job can be started, and it moves to the | |
975 | ## `_kidmap', which associates a process-id with each running job. | |
976 | ## Finally, jobs which have completed are simply forgotten. The `_njobs' | |
977 | ## counter keeps track of how many jobs are outstanding, so that we can | |
978 | ## stop when there are none left. | |
979 | me._check = set() | |
980 | me._sleep = set() | |
981 | me._ready = set() | |
982 | me._kidmap = {} | |
983 | me._logkidmap = {} | |
984 | me._njobs = 0 | |
985 | ||
986 | ## As well as the jobserver pipe, we implicitly have one extra job slot, | |
987 | ## which is the one we took when we were started by our parent. The | |
988 | ## right to do processing in this slot is represnted by the `private | |
989 | ## token' here, distinguished from tokens from the jobserver pipe by | |
990 | ## having `None' as its `_char' value. | |
991 | me._privtoken = PrivateJobToken(me) | |
992 | ||
993 | def add(me, job): | |
994 | """Notice a new job and arrange for it to (try to) run.""" | |
995 | if job._known: return | |
996 | spew("adding new job `%s'" % job.name) | |
997 | job._known = True | |
998 | me._check.add(job) | |
999 | me._njobs += 1 | |
1000 | ||
1001 | def close_jobserver(me): | |
1002 | """ | |
1003 | Close the jobserver file descriptors. | |
1004 | ||
1005 | This should be called within child processes to prevent them from messing | |
1006 | with the jobserver. | |
1007 | """ | |
1008 | if me._rfd != -1: OS.close(me._rfd); me._rfd = -1 | |
1009 | if me._wfd != -1: OS.close(me._wfd); me._wfd = -1 | |
1010 | try: del OS.environ["MAKEFLAGS"] | |
1011 | except KeyError: pass | |
1012 | ||
1013 | def _killall(me): | |
1014 | """Zap all jobs which aren't yet running.""" | |
1015 | for jobset in [me._sleep, me._check, me._ready]: | |
1016 | while jobset: | |
1017 | job = jobset.pop() | |
1018 | job.done = True | |
1019 | job.win = False | |
1020 | me._njobs -= 1 | |
1021 | ||
1022 | def _retire(me, job, win, outcome): | |
1023 | """ | |
1024 | Declare that a job has stopped, and deal with the consequences. | |
1025 | ||
1026 | JOB is the completed job, which should not be on any of the job queues. | |
1027 | WIN is true if the job succeeded, and false otherwise. OUTCOME is a | |
1028 | human-readable string explaining how the job came to its end, or `None' | |
1029 | if no message should be reported. | |
1030 | """ | |
1031 | ||
1032 | global RC | |
1033 | ||
1034 | ## Return the job's token to the pool. | |
1035 | if job._token is not None: job._token.recycle() | |
1036 | job._token = None | |
1037 | me._njobs -= 1 | |
1038 | ||
1039 | ## Update and maybe report the job's status. | |
1040 | job.done = True | |
1041 | job.win = win | |
1042 | if outcome is not None and not OPT.silent: | |
1043 | if OPT.quiet and not job.win and job._logfile: job._logtail() | |
1044 | if not job.win or not OPT.quiet: | |
1045 | print "%-*s %c (%s)" % \ | |
1046 | (TAGWD, job.name, job.win and '|' or '*', outcome) | |
1047 | ||
1048 | ## If the job failed, and we care, arrange to exit nonzero. | |
1049 | if not win and not OPT.ignerr: RC = 2 | |
1050 | ||
1051 | ## If the job failed, and we're supposed to give up after the first | |
1052 | ## error, then zap all of the waiting jobs. | |
1053 | if not job.win and not OPT.keepon and not OPT.ignerr: me._killall() | |
1054 | ||
1055 | ## If this job has dependents then wake them up and see whether they're | |
1056 | ## ready to run. | |
1057 | for j in job._waiting: | |
1058 | try: me._sleep.remove(j) | |
1059 | except KeyError: pass | |
1060 | else: | |
1061 | spew("waking dependent job `%s'" % j.name) | |
1062 | me._check.add(j) | |
1063 | ||
1064 | def _reap(me, kid, st): | |
1065 | """ | |
1066 | Deal with the child with process-id KID having exited with status ST. | |
1067 | """ | |
1068 | ||
1069 | ## Figure out what kind of child this is. Note that it has finished. | |
1070 | try: job = me._kidmap[kid] | |
1071 | except KeyError: | |
1072 | try: job = me._logkidmap[kid] | |
1073 | except KeyError: | |
1074 | spew("unknown child %d exits with status 0x%04x" % (kid, st)) | |
1075 | return | |
1076 | else: | |
1077 | ## It's a logging child. | |
1078 | del me._logkidmap[kid] | |
1079 | job._logkid = DONE | |
1080 | spew("logging process for job `%s' exits with status 0x%04x" % | |
1081 | (job.name, st)) | |
1082 | else: | |
1083 | job._st = st | |
1084 | del me._kidmap[kid] | |
1085 | spew("main process for job `%s' exits with status 0x%04x" % | |
1086 | (job.name, st)) | |
1087 | ||
1088 | ## If either of the job's associated processes is still running then we | |
1089 | ## should stop now and give the other one a chance. | |
1090 | if job._st is None or job._logkid is not DONE: | |
1091 | spew("deferring retirement for job `%s'" % job.name) | |
1092 | return | |
1093 | spew("completing deferred retirement for job `%s'" % job.name) | |
1094 | ||
1095 | ## Update and (maybe) report the job status. | |
1096 | if job._st == 0: win = True; outcome = None | |
1097 | else: win = False; outcome = wait_outcome(job._st) | |
1098 | ||
1099 | ## Retire the job. | |
1100 | me._retire(job, win, outcome) | |
1101 | ||
1102 | def _reapkids(me): | |
1103 | """Reap all finished child processes.""" | |
1104 | while True: | |
1105 | try: kid, st = OS.waitpid(-1, OS.WNOHANG) | |
1106 | except OSError, err: | |
1107 | if err.errno == E.ECHILD: break | |
1108 | else: raise | |
1109 | if kid == 0: break | |
1110 | me._reap(kid, st) | |
1111 | ||
1112 | def run_job(me, job): | |
1113 | """Start running the JOB.""" | |
1114 | ||
1115 | job.started = True | |
1116 | if OPT.dryrun: return None, None | |
1117 | ||
1118 | ## Make pipes to collect the job's output and error reports. | |
1119 | r_out, w_out = OS.pipe() | |
1120 | r_err, w_err = OS.pipe() | |
1121 | ||
1122 | ## Find a log file to write. Avoid races over the log names; but this | |
1123 | ## means that the log descriptor needs to be handled somewhat carefully. | |
1124 | logdir = OS.path.join(C.STATE, "log"); mkdir_p(logdir) | |
1125 | logseq = 1 | |
1126 | while True: | |
1127 | logfile = OS.path.join(logdir, "%s-%s#%d" % (job.name, TODAY, logseq)) | |
1128 | try: | |
1129 | logfd = OS.open(logfile, OS.O_WRONLY | OS.O_CREAT | OS.O_EXCL, 0666) | |
1130 | except OSError, err: | |
1131 | if err.errno == E.EEXIST: logseq += 1; continue | |
1132 | else: raise | |
1133 | else: | |
1134 | break | |
1135 | job._logfile = logfile | |
1136 | ||
1137 | ## Make sure there's no pending output, or we might get two copies. (I | |
1138 | ## don't know how to flush all output streams in Python, but this is good | |
1139 | ## enough for our purposes.) | |
1140 | SYS.stdout.flush() | |
1141 | ||
1142 | ## Set up the logging child first. If we can't, take down the whole job. | |
1143 | try: job._logkid = OS.fork() | |
1144 | except OSError, err: OS.close(logfd); return None, err | |
1145 | if not job._logkid: | |
1146 | ## The main logging loop. | |
1147 | ||
1148 | ## Close the jobserver descriptors, and the write ends of the pipes. | |
1149 | me.close_jobserver() | |
1150 | OS.close(w_out); OS.close(w_err) | |
1151 | ||
1152 | ## Capture the job's stdout and stderr and wait for everything to | |
1153 | ## happen. | |
1154 | def log_lines(fd, marker): | |
1155 | def fn(line): | |
1156 | if not OPT.quiet: | |
1157 | OS.write(1, "%-*s %s %s\n" % (TAGWD, job.name, marker, line)) | |
1158 | OS.write(logfd, "%s %s\n" % (marker, line)) | |
1159 | return ReadLinesSelector(fd, fn) | |
1160 | select_loop([log_lines(r_out, "|"), log_lines(r_err, "*")]) | |
1161 | ||
1162 | ## We're done. (Closing the descriptors here would be like polishing | |
1163 | ## the floors before the building is demolished.) | |
1164 | OS._exit(0) | |
1165 | ||
1166 | ## Back in the main process: record the logging child. At this point we | |
1167 | ## no longer need the logfile descriptor. | |
1168 | me._logkidmap[job._logkid] = job | |
1169 | OS.close(logfd) | |
1170 | ||
1171 | ## Start the main job process. | |
1172 | try: kid = OS.fork() | |
1173 | except OSError, err: return None, err | |
1174 | if not kid: | |
1175 | ## The main job. | |
1176 | ||
1177 | ## Close the read ends of the pipes, and move the write ends to the | |
1178 | ## right places. (This will go wrong if we were started without enough | |
1179 | ## descriptors. Fingers crossed.) | |
1180 | OS.dup2(w_out, 1); OS.dup2(w_err, 2) | |
1181 | OS.close(r_out); OS.close(w_out) | |
1182 | OS.close(r_err); OS.close(w_err) | |
1183 | spew("running job `%s' as pid %d" % (job.name, OS.getpid())) | |
1184 | ||
1185 | ## Run the job, catching nonlocal flow. | |
1186 | try: | |
1187 | job.run() | |
1188 | except ExpectedError, err: | |
1189 | moan(str(err)) | |
1190 | OS._exit(2) | |
1191 | except Exception, err: | |
1192 | TB.print_exc(SYS.stderr) | |
1193 | OS._exit(3) | |
1194 | except BaseException, err: | |
1195 | moan("caught unexpected exception: %r" % err) | |
1196 | OS._exit(112) | |
1197 | else: | |
1198 | spew("job `%s' ran to completion" % job.name) | |
1199 | ||
1200 | ## Clean up old logs. | |
1201 | match = [] | |
1202 | pat = RX.compile(r"^%s-(\d{4})-(\d{2})-(\d{2})\#(\d+)$" % | |
1203 | RX.escape(job.name)) | |
1204 | for f in OS.listdir(logdir): | |
1205 | m = pat.match(f) | |
1206 | if m: match.append((f, int(m.group(1)), int(m.group(2)), | |
1207 | int(m.group(3)), int(m.group(4)))) | |
1208 | match.sort(key = lambda (_, y, m, d, q): (y, m, d, q)) | |
1209 | if len(match) > LOGKEEP: | |
1210 | for (f, _, _, _, _) in match[:-LOGKEEP]: | |
1211 | try: OS.unlink(OS.path.join(logdir, f)) | |
1212 | except OSError, err: | |
1213 | if err.errno == E.ENOENT: pass | |
1214 | else: raise | |
1215 | ||
1216 | ## All done. | |
1217 | OS._exit(0) | |
1218 | ||
1219 | ## Back in the main process: close both the pipes and return the child | |
1220 | ## process. | |
1221 | OS.close(r_out); OS.close(w_out) | |
1222 | OS.close(r_err); OS.close(w_err) | |
1223 | if OPT.quiet: print "%-*s | (started)" % (TAGWD, job.name) | |
1224 | return kid, None | |
1225 | ||
1226 | def run(me): | |
1227 | """Run the scheduler.""" | |
1228 | ||
1229 | spew("JobScheduler starts") | |
1230 | ||
1231 | while True: | |
1232 | ## The main scheduler loop. We go through three main phases: | |
1233 | ## | |
1234 | ## * Inspect the jobs in the `check' list to see whether they can | |
1235 | ## run. After this, the `check' list will be empty. | |
1236 | ## | |
1237 | ## * If there are running jobs, check to see whether any of them have | |
1238 | ## stopped, and deal with the results. Also, if there are jobs | |
1239 | ## ready to start and a job token has become available, then | |
1240 | ## retrieve the token. (Doing these at the same time is the tricky | |
1241 | ## part.) | |
1242 | ## | |
1243 | ## * If there is a job ready to run, and we retrieved a token, then | |
1244 | ## start running the job. | |
1245 | ||
1246 | ## Check the pending jobs to see if they can make progress: run each | |
1247 | ## job's `check' method and move it to the appropriate queue. (It's OK | |
1248 | ## if `check' methods add more jobs to the list, as long as things | |
1249 | ## settle down eventually.) | |
1250 | while True: | |
1251 | try: job = me._check.pop() | |
1252 | except KeyError: break | |
1253 | if job._deps is None: | |
1254 | job._deps = set() | |
1255 | job.prepare() | |
1256 | state, reason = job.check() | |
1257 | tail = reason is not None and ": %s" % reason or "" | |
1258 | if state == READY: | |
1259 | spew("job `%s' ready to run%s" % (job.name, tail)) | |
1260 | me._ready.add(job) | |
1261 | elif state is FAILED: | |
1262 | spew("job `%s' refused to run%s" % (job.name, tail)) | |
1263 | me._retire(job, False, "refused to run%s" % tail) | |
1264 | elif state is DONE: | |
1265 | spew("job `%s' has nothing to do%s" % (job.name, tail)) | |
1266 | me._retire(job, True, reason) | |
1267 | elif state is SLEEP: | |
1268 | spew("job `%s' can't run yet%s" % (job.name, tail)) | |
1269 | me._sleep.add(job) | |
1270 | else: | |
1271 | raise ValueError("unexpected job check from `%s': %r, %r" % | |
1272 | (job.name, state, reason)) | |
1273 | ||
1274 | ## If there are no jobs left, then we're done. | |
1275 | if not me._njobs: | |
1276 | spew("all jobs completed") | |
1277 | break | |
1278 | ||
1279 | ## Make sure we can make progress. There are no jobs on the check list | |
1280 | ## any more, because we just cleared it. We assume that jobs which are | |
1281 | ## ready to run will eventually receive a token. So we only end up in | |
1282 | ## trouble if there are jobs asleep, but none running or ready to run. | |
1283 | ##spew("#jobs = %d" % me._njobs) | |
1284 | ##spew("sleeping: %s" % ", ".join([j.name for j in me._sleep])) | |
1285 | ##spew("ready: %s" % ", ".join([j.name for j in me._ready])) | |
1286 | ##spew("running: %s" % ", ".join([j.name for j in me._kidmap.itervalues()])) | |
1287 | assert not me._sleep or me._kidmap or me._logkidmap or me._ready | |
1288 | ||
1289 | ## Wait for something to happen. | |
1290 | if not me._ready or (not me._par and me._privtoken is None): | |
1291 | ## If we have no jobs ready to run, then we must wait for an existing | |
1292 | ## child to exit. Hopefully, a sleeping job will be able to make | |
1293 | ## progress after this. | |
1294 | ## | |
1295 | ## Alternatively, if we're not supposed to be running jobs in | |
1296 | ## parallel and we don't have the private token, then we have no | |
1297 | ## choice but to wait for the running job to complete. | |
1298 | ## | |
1299 | ## There's no check here for `ECHILD'. We really shouldn't be here | |
1300 | ## if there are no children to wait for. (The check list must be | |
1301 | ## empty because we just drained it. If the ready list is empty, | |
1302 | ## then all of the jobs must be running or sleeping; but the | |
1303 | ## assertion above means that either there are no jobs at all, in | |
1304 | ## which case we should have stopped, or at least one is running, in | |
1305 | ## which case it's safe to wait for it. The other case is that we're | |
1306 | ## running jobs sequentially, and one is currently running, so | |
1307 | ## there's nothing for it but to wait for it -- and hope that it will | |
1308 | ## wake up one of the sleeping jobs. The remaining possibility is | |
1309 | ## that we've miscounted somewhere, which will cause a crash.) | |
1310 | if not me._ready: | |
1311 | spew("no new jobs ready: waiting for outstanding jobs to complete") | |
1312 | else: | |
1313 | spew("job running without parallelism: waiting for it to finish") | |
1314 | kid, st = OS.waitpid(-1, 0) | |
1315 | me._reap(kid, st) | |
1316 | me._reapkids() | |
1317 | continue | |
1318 | ||
1319 | ## We have jobs ready to run, so try to acquire a token. | |
1320 | if me._rfd == -1 and me._par: | |
1321 | ## We're running with unlimited parallelism, so we don't need a token | |
1322 | ## to run a job. | |
1323 | spew("running new job without token") | |
1324 | token = TRIVIAL_TOKEN | |
1325 | elif me._privtoken: | |
1326 | ## Our private token is available, so we can use that to start | |
1327 | ## a new job. | |
1328 | spew("private token available: assigning to new job") | |
1329 | token = me._privtoken | |
1330 | me._privtoken = None | |
1331 | else: | |
1332 | ## We have to read from the jobserver pipe. Unfortunately, we're not | |
1333 | ## allowed to set the pipe nonblocking, because make is also using it | |
1334 | ## and will get into a serious mess. And we must deal with `SIGCHLD' | |
1335 | ## arriving at any moment. We use the same approach as GNU Make. We | |
1336 | ## start by making a copy of the jobserver descriptor: it's this | |
1337 | ## descriptor we actually try to read from. We set a signal handler | |
1338 | ## to close this descriptor if a child exits. And we try one last | |
1339 | ## time to reap any children which have exited just before we try | |
1340 | ## reading the jobserver pipe. This way we're covered: | |
1341 | ## | |
1342 | ## * If a child exits during the main loop, before we establish the | |
1343 | ## descriptor copy then we'll notice when we try reaping | |
1344 | ## children. | |
1345 | ## | |
1346 | ## * If a child exits between the last-chance reap and the read, | |
1347 | ## the signal handler will close the descriptor and the `read' | |
1348 | ## call will fail with `EBADF'. | |
1349 | ## | |
1350 | ## * If a child exits while we're inside the `read' system call, | |
1351 | ## then the syscall will fail with `EINTR'. | |
1352 | ## | |
1353 | ## The only problem is that we can't do this from Python, because | |
1354 | ## Python signal handlers are delayed. This is what the `jobclient' | |
1355 | ## module is for. | |
1356 | ## | |
1357 | ## The `jobclient' function is called as | |
1358 | ## | |
1359 | ## jobclient(FD) | |
1360 | ## | |
1361 | ## It returns a tuple of three values: TOKEN, PID, STATUS. If TOKEN | |
1362 | ## is not `None', then reading the pipe succeeded; if TOKEN is empty, | |
1363 | ## then the pipe returned EOF, so we should abort; otherwise, TOKEN | |
1364 | ## is a singleton string holding the token character. If PID is not | |
1365 | ## `None', then PID is the process id of a child which exited, and | |
1366 | ## STATUS is its exit status. | |
1367 | spew("waiting for token from jobserver") | |
1368 | tokch, kid, st = JC.jobclient(me._rfd) | |
1369 | ||
1370 | if kid is not None: | |
1371 | me._reap(kid, st) | |
1372 | me._reapkids() | |
1373 | if tokch is None: | |
1374 | spew("no token; trying again") | |
1375 | continue | |
1376 | elif token == '': | |
1377 | error("jobserver pipe closed; giving up") | |
1378 | me._killall() | |
1379 | continue | |
1380 | spew("received token from jobserver") | |
1381 | token = JobServerToken(tokch, me._wfd) | |
1382 | ||
1383 | ## We have a token, so we should start up the job. | |
1384 | job = me._ready.pop() | |
1385 | job._token = token | |
1386 | spew("start new job `%s'" % job.name) | |
1387 | kid, err = me.run_job(job) | |
1388 | if err is not None: | |
1389 | me._retire(job, False, "failed to fork: %s" % err) | |
1390 | continue | |
1391 | if kid is None: me._retire(job, True, "dry run") | |
1392 | else: me._kidmap[kid] = job | |
1393 | ||
1394 | ## We ran out of work to do. | |
1395 | spew("JobScheduler done") | |
1396 | ||
1397 | ###-------------------------------------------------------------------------- | |
1398 | ### Configuration. | |
1399 | ||
1400 | R_CONFIG = RX.compile(r"^([a-zA-Z0-9_]+)='(.*)'$") | |
1401 | ||
1402 | class Config (object): | |
1403 | ||
1404 | def _conv_str(s): return s | |
1405 | def _conv_list(s): return s.split() | |
1406 | def _conv_set(s): return set(s.split()) | |
1407 | ||
1408 | _CONVERT = { | |
1409 | "ROOTLY": _conv_list, | |
1410 | "DISTS": _conv_set, | |
1411 | "MYARCH": _conv_set, | |
1412 | "NATIVE_ARCHS": _conv_set, | |
1413 | "FOREIGN_ARCHS": _conv_set, | |
1414 | "FOREIGN_GNUARCHS": _conv_list, | |
1415 | "ALL_ARCHS": _conv_set, | |
1416 | "NATIVE_CHROOTS": _conv_set, | |
1417 | "FOREIGN_CHROOTS": _conv_set, | |
1418 | "ALL_CHROOTS": _conv_set, | |
1419 | "BASE_PACKAGES": _conv_list, | |
1420 | "EXTRA_PACKAGES": _conv_list, | |
1421 | "CROSS_PACKAGES": _conv_list, | |
1422 | "CROSS_PATHS": _conv_list, | |
1423 | "APTCONF": _conv_list, | |
1424 | "LOCALPKGS": _conv_list, | |
1425 | "SCHROOT_COPYFILES": _conv_list, | |
1426 | "SCHROOT_NSSDATABASES": _conv_list | |
1427 | } | |
1428 | ||
1429 | _CONV_MAP = { | |
1430 | "*_APTCONFSRC": ("APTCONFSRC", _conv_str), | |
1431 | "*_DEPS": ("PKGDEPS", _conv_list), | |
1432 | "*_QEMUHOST": ("QEMUHOST", _conv_str), | |
1433 | "*_QEMUARCH": ("QEMUARCH", _conv_str), | |
d9f036c4 | 1434 | "*_QEMUDIST": ("QEMUDIST", _conv_str), |
a98c9dba MW |
1435 | "*_ALIASES": ("DISTALIAS", _conv_str) |
1436 | } | |
1437 | ||
1438 | _conv_str = staticmethod(_conv_str) | |
1439 | _conv_list = staticmethod(_conv_list) | |
1440 | _conv_set = staticmethod(_conv_set) | |
1441 | ||
1442 | def __init__(me): | |
1443 | raw = r""" | |
1444 | """; raw = open('state/config.sh').read(); _ignore = """ @@@config@@@ | |
1445 | """ | |
1446 | me._conf = {} | |
1447 | for line in raw.split("\n"): | |
1448 | line = line.strip() | |
1449 | if not line or line.startswith('#'): continue | |
1450 | m = R_CONFIG.match(line) | |
1451 | if not m: raise ExpectedError("bad config line `%s'" % line) | |
1452 | k, v = m.group(1), m.group(2).replace("'\\''", "'") | |
1453 | d = me._conf | |
1454 | try: conv = me._CONVERT[k] | |
1455 | except KeyError: | |
1456 | i = 0 | |
1457 | while True: | |
1458 | try: i = k.index("_", i + 1) | |
1459 | except ValueError: conv = me._conv_str; break | |
1460 | try: map, conv = me._CONV_MAP["*" + k[i:]] | |
1461 | except KeyError: pass | |
1462 | else: | |
1463 | d = me._conf.setdefault(map, dict()) | |
1464 | k = k[:i] | |
1465 | if k.startswith("_"): k = k[1:] | |
1466 | break | |
1467 | d[k] = conv(v) | |
1468 | ||
1469 | def __getattr__(me, attr): | |
1470 | try: return me._conf[attr] | |
1471 | except KeyError, err: raise AttributeError(err.args[0]) | |
1472 | ||
1473 | with toplevel_handler(): C = Config() | |
1474 | ||
1475 | ###-------------------------------------------------------------------------- | |
1476 | ### Chroot maintenance utilities. | |
1477 | ||
1478 | CREATE = Tag("CREATE") | |
1479 | FORCE = Tag("FORCE") | |
1480 | ||
a6395bc3 MW |
1481 | DEBCONF_TWEAKS = """ |
1482 | DEBIAN_FRONTEND=noninteractive; export DEBIAN_FRONTEND | |
1483 | DEBIAN_PRIORITY=critical export DEBIAN_PRIORITY | |
1484 | DEBCONF_NONINTERACTIVE_SEEN=true; export DEBCONF_NONINTERACTIVE_SEEN | |
1485 | """ | |
1486 | ||
a98c9dba MW |
1487 | def check_fresh(fresh, update): |
1488 | """ | |
1489 | Compare a refresh mode FRESH against an UPDATE time. | |
1490 | ||
1491 | Return a (STATUS, REASON) pair, suitable for returning from a job `check' | |
1492 | method. | |
1493 | ||
1494 | The FRESH argument may be one of the following: | |
1495 | ||
1496 | * `CREATE' is satisfied if the thing exists at all: it returns `READY' if | |
1497 | the thing doesn't yet exist (UPDATE is `None'), or `DONE' otherwise. | |
1498 | ||
1499 | * `FORCE' is never satisfied: it always returns `READY'. | |
1500 | ||
1501 | * an integer N is satisfied if UPDATE time is at most N seconds earlier | |
1502 | than the present: if returns `READY' if the UPDATE is too old, or | |
1503 | `DONE' otherwise. | |
1504 | """ | |
1505 | if update is None: return READY, "must create" | |
1506 | elif fresh is FORCE: return READY, "update forced" | |
1507 | elif fresh is CREATE: return DONE, "already created" | |
1508 | elif NOW - unzulu(update) > fresh: return READY, "too stale: updating" | |
1509 | else: return DONE, "already sufficiently up-to-date" | |
1510 | ||
1511 | def lockfile_path(file): | |
1512 | """ | |
1513 | Return the full path for a lockfile named FILE. | |
1514 | ||
1515 | Create the lock directory if necessary. | |
1516 | """ | |
1517 | lockdir = OS.path.join(C.STATE, "lock"); mkdir_p(lockdir) | |
1518 | return OS.path.join(lockdir, file) | |
1519 | ||
1520 | def chroot_src_lockfile(dist, arch): | |
1521 | """ | |
1522 | Return the lockfile for the source-chroot for DIST on ARCH. | |
1523 | ||
1524 | It is not allowed to acquire a source-chroot lock while holding any other | |
1525 | locks. | |
1526 | """ | |
1527 | return lockfile_path("source.%s-%s" % (dist, arch)) | |
1528 | ||
1529 | def chroot_src_lv(dist, arch): | |
1530 | """ | |
1531 | Return the logical volume name for the source-chroot for DIST on ARCH. | |
1532 | """ | |
1533 | return "%s%s-%s" % (C.LVPREFIX, dist, arch) | |
1534 | ||
1535 | def chroot_src_blkdev(dist, arch): | |
1536 | """ | |
1537 | Return the block-device name for the source-chroot for DIST on ARCH. | |
1538 | """ | |
1539 | return OS.path.join("/dev", C.VG, chroot_src_lv(dist, arch)) | |
1540 | ||
1541 | def chroot_src_mntpt(dist, arch): | |
1542 | """ | |
1543 | Return mountpoint path for setting up the source-chroot for DIST on ARCH. | |
1544 | ||
1545 | Note that this is not the mountpoint that schroot(1) uses. | |
1546 | """ | |
1547 | mnt = OS.path.join(C.STATE, "mnt", "%s-%s" % (dist, arch)) | |
1548 | mkdir_p(mnt) | |
1549 | return mnt | |
1550 | ||
1551 | def chroot_session_mntpt(session): | |
1552 | """Return the mountpoint for an schroot session.""" | |
1553 | return OS.path.join("/schroot", session) | |
1554 | ||
1555 | def crosstools_lockfile(dist, arch): | |
1556 | """ | |
1557 | Return the lockfile for the cross-build tools for DIST, hosted by ARCH. | |
1558 | ||
1559 | When locking multiple cross-build tools, you must acquire the locks in | |
1560 | lexicographically ascending order. | |
1561 | """ | |
1562 | return lockfile_path("cross-tools.%s-%s" % (dist, arch)) | |
1563 | ||
1564 | def switch_prefix(string, map): | |
1565 | """ | |
1566 | Replace the prefix of a STRING, according to the given MAP. | |
1567 | ||
1568 | MAP is a sequence of (OLD, NEW) pairs. For each such pair in turn, test | |
1569 | whether STRING starts with OLD: if so, return STRING, but with the prefix | |
1570 | OLD replaced by NEW. If no OLD prefix matches, then raise a `ValueError'. | |
1571 | """ | |
1572 | for old, new in map: | |
1573 | if string.startswith(old): return new + string[len(old):] | |
1574 | raise ValueError("expected `%s' to start with one of %s" % | |
1575 | ", ".join(["`%s'" % old for old, new in map])) | |
1576 | ||
1577 | def host_to_chroot(path): | |
1578 | """ | |
1579 | Convert a host path under `C.LOCAL' to the corresponding chroot path under | |
1580 | `/usr/local.schroot'. | |
1581 | """ | |
1582 | return switch_prefix(path, [(C.LOCAL + "/", "/usr/local.schroot/")]) | |
1583 | ||
1584 | def chroot_to_host(path): | |
1585 | """ | |
1586 | Convert a chroot path under `/usr/local.schroot' to the corresponding | |
1587 | host path under `C.LOCAL'. | |
1588 | """ | |
1589 | return switch_prefix(path, [("/usr/local.schroot/", C.LOCAL + "/")]) | |
1590 | ||
1591 | def split_dist_arch(spec): | |
1592 | """Split a SPEC of the form `DIST-ARCH' into the pair (DIST, ARCH).""" | |
1593 | dash = spec.index("-") | |
1594 | return spec[:dash], spec[dash + 1:] | |
1595 | ||
1596 | def elf_binary_p(arch, path): | |
1597 | """Return whether PATH is an ELF binary for ARCH.""" | |
1598 | if not OS.path.isfile(path): return False | |
1599 | with open(path, 'rb') as f: magic = f.read(20) | |
1600 | if magic[0:4] != "\x7fELF": return False | |
1601 | if magic[8:16] != 8*"\0": return False | |
1602 | if arch == "i386": | |
1603 | if magic[4:7] != "\x01\x01\x01": return False | |
1604 | if magic[18:20] != "\x03\x00": return False | |
1605 | elif arch == "amd64": | |
1606 | if magic[4:7] != "\x02\x01\x01": return False | |
1607 | if magic[18:20] != "\x3e\x00": return False | |
1608 | else: | |
1609 | raise ValueError("unsupported donor architecture `%s'" % arch) | |
1610 | return True | |
1611 | ||
1612 | def progress(msg): | |
1613 | """ | |
1614 | Print a progress message MSG. | |
1615 | ||
1616 | This is intended to be called within a job's `run' method, so it doesn't | |
1617 | check `OPT.quiet' or `OPT.silent'. | |
1618 | """ | |
1619 | OS.write(1, ";; %s\n" % msg) | |
1620 | ||
1621 | class NoSuchChroot (Exception): | |
1622 | """ | |
1623 | Exception indicating that a chroot does not exist. | |
1624 | ||
1625 | Specifically, it means that it doesn't even have a logical volume. | |
1626 | """ | |
1627 | def __init__(me, dist, arch): | |
1628 | me.dist = dist | |
1629 | me.arch = arch | |
1630 | def __str__(me): | |
1631 | return "chroot for `%s' on `%s' not found" % (me.dist, me.arch) | |
1632 | ||
1633 | @CTX.contextmanager | |
1634 | def mount_chroot_src(dist, arch): | |
1635 | """ | |
1636 | Context manager for mounting the source-chroot for DIST on ARCH. | |
1637 | ||
1638 | The context manager automatically unmounts the filesystem again when the | |
1639 | body exits. You must hold the appropriate source-chroot lock before | |
1640 | calling this routine. | |
1641 | """ | |
1642 | dev = chroot_src_blkdev(dist, arch) | |
1643 | if not block_device_p(dev): raise NoSuchChroot(dist, arch) | |
1644 | mnt = chroot_src_mntpt(dist, arch) | |
1645 | try: | |
1646 | run_program(C.ROOTLY + ["mount", dev, mnt]) | |
1647 | yield mnt | |
1648 | finally: | |
1649 | umount(mnt) | |
1650 | ||
1651 | @CTX.contextmanager | |
1652 | def chroot_session(dist, arch, sourcep = False): | |
1653 | """ | |
1654 | Context manager for running an schroot(1) session. | |
1655 | ||
1656 | Returns the (ugly, automatically generated) session name to the context | |
1657 | body. By default, a snapshot session is started: set SOURCEP true to start | |
1658 | a source-chroot session. You must hold the appropriate source-chroot lock | |
1659 | before starting a source-chroot session. | |
1660 | ||
1661 | The context manager automatically closes the session again when the body | |
1662 | exits. | |
1663 | """ | |
1664 | chroot = chroot_src_lv(dist, arch) | |
1665 | if sourcep: chroot = "source:" + chroot | |
1666 | session = run_program(["schroot", "-uroot", "-b", "-c", chroot], | |
1667 | stdout = RETURN).rstrip("\n") | |
1668 | try: | |
1669 | root = OS.path.join(chroot_session_mntpt(session), "fs") | |
1670 | yield session, root | |
1671 | finally: | |
1672 | run_program(["schroot", "-e", "-c", session]) | |
1673 | ||
1674 | def run_root(command, **kw): | |
1675 | """Run a COMMAND as root. Arguments are as for `run_program'.""" | |
1676 | return run_program(C.ROOTLY + command, **kw) | |
1677 | ||
1678 | def run_schroot_session(session, command, rootp = False, **kw): | |
1679 | """ | |
1680 | Run a COMMAND within an schroot(1) session. | |
1681 | ||
1682 | Arguments are as for `run_program'. | |
1683 | """ | |
1684 | if rootp: | |
1685 | return run_program(["schroot", "-uroot", "-r", | |
1686 | "-c", session, "--"] + command, **kw) | |
1687 | else: | |
1688 | return run_program(["schroot", "-r", | |
1689 | "-c", session, "--"] + command, **kw) | |
1690 | ||
1691 | def run_schroot_source(dist, arch, command, **kw): | |
1692 | """ | |
1693 | Run a COMMAND through schroot(1), in the source-chroot for DIST on ARCH. | |
1694 | ||
1695 | Arguments are as for `run_program'. You must hold the appropriate source- | |
1696 | chroot lock before calling this routine. | |
1697 | """ | |
1698 | return run_program(["schroot", "-uroot", | |
1699 | "-c", "source:%s" % chroot_src_lv(dist, arch), | |
1700 | "--"] + command, **kw) | |
1701 | ||
1702 | ###-------------------------------------------------------------------------- | |
1703 | ### Metadata files. | |
1704 | ||
1705 | class MetadataClass (type): | |
1706 | """ | |
1707 | Metaclass for metadata classes. | |
1708 | ||
1709 | Notice a `VARS' attribute in the class dictionary, and augment it with a | |
1710 | `_VARSET' attribute, constructed as a set containing the same items. (We | |
1711 | need them both: the set satisfies fast lookups, while the original sequence | |
1712 | remembers the ordering.) | |
1713 | """ | |
1714 | def __new__(me, name, supers, dict): | |
1715 | try: vars = dict['VARS'] | |
1716 | except KeyError: pass | |
1717 | else: dict['_VARSET'] = set(vars) | |
1718 | return super(MetadataClass, me).__new__(me, name, supers, dict) | |
1719 | ||
1720 | class BaseMetadata (object): | |
1721 | """ | |
1722 | Base class for metadate objects. | |
1723 | ||
1724 | Metadata bundles are simple collections of key/value pairs. Keys should | |
1725 | usually be Python identifiers because they're used to name attributes. | |
1726 | Values are strings, but shouldn't have leading or trailing whitespace, and | |
1727 | can't contain newlines. | |
1728 | ||
1729 | Metadata bundles are written to files. The format is simple enough: empty | |
1730 | lines and lines starting with `#' are ignored; otherwise, the line must | |
1731 | have the form | |
1732 | ||
1733 | KEY = VALUE | |
1734 | ||
1735 | where KEY does not contain `='; spaces around the `=' are optional, and | |
1736 | spaces around the KEY and VALUE are stripped. The order of keys is | |
1737 | unimportant; keys are always written in a standard order on output. | |
1738 | """ | |
1739 | __metaclass__ = MetadataClass | |
1740 | ||
1741 | def __init__(me, **kw): | |
1742 | """Initialize a metadata bundle from keyword arguments.""" | |
1743 | for k, v in kw.iteritems(): | |
1744 | setattr(me, k, v) | |
1745 | for v in me.VARS: | |
1746 | try: getattr(me, v) | |
1747 | except AttributeError: setattr(me, v, None) | |
1748 | ||
1749 | def __setattr__(me, attr, value): | |
1750 | """ | |
1751 | Try to set an attribute. | |
1752 | ||
1753 | Only attribute names listed in the `VARS' class attribute are permitted. | |
1754 | """ | |
1755 | if attr not in me._VARSET: raise AttributeError, attr | |
1756 | super(BaseMetadata, me).__setattr__(attr, value) | |
1757 | ||
1758 | @classmethod | |
1759 | def read(cls, path): | |
1760 | """Return a new metadata bundle read from a named PATH.""" | |
1761 | map = {} | |
1762 | with open(path) as f: | |
1763 | for line in f: | |
1764 | line = line.strip() | |
1765 | if line == "" or line.startswith("#"): continue | |
1766 | k, v = line.split("=", 1) | |
1767 | map[k.strip()] = v.strip() | |
1768 | return cls(**map) | |
1769 | ||
1770 | def _write(me, file): | |
1771 | """ | |
1772 | Write the metadata bundle to the FILE (a file-like object). | |
1773 | ||
1774 | This is intended for use by subclasses which want to override the default | |
1775 | I/O behaviour of the main `write' method. | |
1776 | """ | |
1777 | file.write("### -*-conf-*-\n") | |
1778 | for k in me.VARS: | |
1779 | try: v = getattr(me, k) | |
1780 | except AttributeError: pass | |
1781 | else: | |
1782 | if v is not None: file.write("%s = %s\n" % (k, v)) | |
1783 | ||
1784 | def write(me, path): | |
1785 | """ | |
1786 | Write the metadata bundle to a given PATH. | |
1787 | ||
1788 | The file is replaced atomically. | |
1789 | """ | |
1790 | with safewrite(path) as f: me._write(f) | |
1791 | ||
1792 | def __repr__(me): | |
1793 | return "#<%s: %s>" % (me.__class__.__name__, | |
1794 | ", ".join("%s=%r" % (k, getattr(me, k, None)) | |
1795 | for k in me.VARS)) | |
1796 | ||
1797 | class ChrootMetadata (BaseMetadata): | |
1798 | VARS = ['dist', 'arch', 'update'] | |
1799 | ||
1800 | @classmethod | |
1801 | def read(cls, dist, arch): | |
1802 | try: | |
1803 | with lockfile(chroot_src_lockfile(dist, arch), exclp = False): | |
1804 | with mount_chroot_src(dist, arch) as mnt: | |
1805 | return super(ChrootMetadata, cls).read(OS.path.join(mnt, "META")) | |
1806 | except IOError, err: | |
1807 | if err.errno == E.ENOENT: pass | |
1808 | else: raise | |
1809 | except NoSuchChroot: pass | |
1810 | return cls(dist = dist, arch = arch) | |
1811 | ||
1812 | def write(me): | |
1813 | with mount_chroot_src(me.dist, me.arch) as mnt: | |
1814 | with safewrite_root(OS.path.join(mnt, "META")) as f: | |
1815 | me._write(f) | |
1816 | ||
1817 | class CrossToolsMetadata (BaseMetadata): | |
1818 | VARS = ['dist', 'arch', 'update'] | |
1819 | ||
1820 | @classmethod | |
1821 | def read(cls, dist, arch): | |
1822 | try: | |
1823 | return super(CrossToolsMetadata, cls)\ | |
1824 | .read(OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch), "META")) | |
1825 | except IOError, err: | |
1826 | if err.errno == E.ENOENT: pass | |
1827 | else: raise | |
1828 | return cls(dist = dist, arch = arch) | |
1829 | ||
1830 | def write(me, dir = None): | |
1831 | if dir is None: | |
1832 | dir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (me.dist, me.arch)) | |
1833 | with safewrite_root(OS.path.join(dir, "META")) as f: | |
1834 | me._write(f) | |
1835 | ||
1836 | ###-------------------------------------------------------------------------- | |
1837 | ### Constructing a chroot. | |
1838 | ||
1839 | R_DIVERT = RX.compile(r"^diversion of (.*) to .* by install-cross-tools$") | |
1840 | ||
1841 | class ChrootJob (BaseJob): | |
1842 | """ | |
1843 | Create or update a chroot. | |
1844 | """ | |
1845 | ||
1846 | SPECS = C.ALL_CHROOTS | |
1847 | ||
1848 | def __init__(me, spec, fresh = CREATE, *args, **kw): | |
1849 | super(ChrootJob, me).__init__(*args, **kw) | |
1850 | me._dist, me._arch = split_dist_arch(spec) | |
1851 | me._fresh = fresh | |
1852 | me._meta = ChrootMetadata.read(me._dist, me._arch) | |
1853 | me._tools_chroot = me._qemu_chroot = None | |
1854 | ||
1855 | def _mkname(me): return "chroot.%s-%s" % (me._dist, me._arch) | |
1856 | ||
1857 | def prepare(me): | |
1858 | if me._arch in C.FOREIGN_ARCHS: | |
1859 | me._tools_chroot = CrossToolsJob.ensure\ | |
1860 | ("%s-%s" % (me._dist, C.TOOLSARCH), FRESH) | |
1861 | me._qemu_chroot = CrossToolsJob.ensure\ | |
d9f036c4 MW |
1862 | ("%s-%s" % (C.QEMUDIST.get(me._dist, me._dist), |
1863 | C.QEMUHOST[me._arch]), FRESH) | |
a98c9dba MW |
1864 | me.await(me._tools_chroot) |
1865 | me.await(me._qemu_chroot) | |
1866 | ||
1867 | def check(me): | |
1868 | status, reason = super(ChrootJob, me).check() | |
1869 | if status is not READY: return status, reason | |
1870 | if (me._tools_chroot is not None and me._tools_chroot.started) or \ | |
1871 | (me._qemu_chroot is not None and me._qemu_chroot.started): | |
1872 | return READY, "prerequisites run" | |
1873 | return check_fresh(me._fresh, me._meta.update) | |
1874 | ||
1875 | def _install_cross_tools(me): | |
1876 | """ | |
1877 | Install or refresh cross-tools in the source-chroot. | |
1878 | ||
1879 | This function version assumes that the source-chroot lock is already | |
1880 | held. | |
1881 | ||
1882 | Note that there isn't a job class corresponding to this function. It's | |
1883 | done automatically as part of source-chroot setup and update for foreign | |
1884 | architectures. | |
1885 | """ | |
1886 | with Cleanup() as clean: | |
1887 | ||
1888 | dist, arch = me._dist, me._arch | |
1889 | ||
1890 | mymulti = run_program(["dpkg-architecture", "-a", C.TOOLSARCH, | |
1891 | "-qDEB_HOST_MULTIARCH"], | |
1892 | stdout = RETURN).rstrip("\n") | |
1893 | gnuarch = run_program(["dpkg-architecture", "-A", arch, | |
1894 | "-qDEB_TARGET_GNU_TYPE"], | |
1895 | stdout = RETURN).rstrip("\n") | |
1896 | ||
1897 | crossdir = OS.path.join(C.LOCAL, "cross", | |
1898 | "%s-%s" % (dist, C.TOOLSARCH)) | |
1899 | ||
d9f036c4 MW |
1900 | qarch, qhost, qdist = \ |
1901 | C.QEMUARCH[arch], C.QEMUHOST[arch], C.QEMUDIST.get(dist, dist) | |
a98c9dba | 1902 | qemudir = OS.path.join(C.LOCAL, "cross", |
d9f036c4 | 1903 | "%s-%s" % (qdist, qhost), "QEMU") |
a98c9dba MW |
1904 | |
1905 | ## Acquire lockfiles in a canonical order to prevent deadlocks. | |
1906 | donors = [C.TOOLSARCH] | |
1907 | if qarch != C.TOOLSARCH: donors.append(qarch) | |
1908 | donors.sort() | |
1909 | for a in donors: | |
1910 | clean.enter(lockfile(crosstools_lockfile(dist, a), exclp = False)) | |
1911 | ||
1912 | ## Open a session. | |
1913 | session, root = clean.enter(chroot_session(dist, arch, sourcep = True)) | |
1914 | ||
1915 | ## Search the cross-tools tree for tools, to decide what to do with | |
1916 | ## each file. Make lists: | |
1917 | ## | |
1918 | ## * `want_div' is simply a set of all files in the chroot which need | |
1919 | ## dpkg diversions to prevent foreign versions of the tools from | |
1920 | ## clobbering our native versions. | |
1921 | ## | |
1922 | ## * `want_link' is a dictionary mapping paths which need symbolic | |
1923 | ## links into the cross-tools trees to their link destinations. | |
1924 | progress("scan cross-tools tree") | |
1925 | want_div = set() | |
1926 | want_link = dict() | |
1927 | cross_prefix = crossdir + "/" | |
1928 | qemu_prefix = qemudir + "/" | |
1929 | toolchain_prefix = OS.path.join(crossdir, "TOOLCHAIN", gnuarch) + "/" | |
1930 | def examine(path): | |
1931 | dest = switch_prefix(path, [(qemu_prefix, "/usr/bin/"), | |
1932 | (toolchain_prefix, "/usr/bin/"), | |
1933 | (cross_prefix, "/")]) | |
1934 | if OS.path.islink(path): src = OS.readlink(path) | |
1935 | else: src = host_to_chroot(path) | |
1936 | want_link[dest] = src | |
1937 | if not OS.path.isdir(path): want_div.add(dest) | |
1938 | examine(OS.path.join(qemudir, "qemu-%s-static" % qarch)) | |
1939 | examine(OS.path.join(crossdir, "lib", mymulti)) | |
1940 | examine(OS.path.join(crossdir, "usr/lib", mymulti)) | |
1941 | examine(OS.path.join(crossdir, "usr/lib/gcc-cross")) | |
1942 | def visit(_, dir, files): | |
1943 | ff = [] | |
1944 | for f in files: | |
1945 | if f == "META" or f == "QEMU" or f == "TOOLCHAIN" or \ | |
1946 | (dir.endswith("/lib") and (f == mymulti or f == "gcc-cross")): | |
1947 | continue | |
1948 | ff.append(f) | |
1949 | path = OS.path.join(dir, f) | |
feb9ff8d | 1950 | if OS.path.islink(path) or not OS.path.isdir(path): examine(path) |
a98c9dba MW |
1951 | files[:] = ff |
1952 | OS.path.walk(crossdir, visit, None) | |
1953 | OS.path.walk(OS.path.join(crossdir, "TOOLCHAIN", gnuarch), | |
1954 | visit, None) | |
1955 | ||
1956 | ## Build the set `have_div' of paths which already have diversions. | |
1957 | progress("scan chroot") | |
1958 | have_div = set() | |
1959 | with subprocess(["schroot", "-uroot", "-r", "-c", session, "--", | |
1960 | "dpkg-divert", "--list"], | |
1961 | stdout = PIPE) as (_, fd_out, _): | |
1962 | try: | |
1963 | f = OS.fdopen(fd_out) | |
1964 | for line in f: | |
1965 | m = R_DIVERT.match(line.rstrip("\n")) | |
1966 | if m: have_div.add(m.group(1)) | |
1967 | finally: | |
1968 | f.close() | |
1969 | ||
1970 | ## Build a dictionary `have_link' of symbolic links into the cross- | |
1971 | ## tools trees. Also, be sure to collect all of the relative symbolic | |
1972 | ## links which are in the cross-tools tree. | |
1973 | have_link = dict() | |
1974 | with subprocess(["schroot", "-uroot", "-r", "-c", session, "--", | |
1975 | "sh", "-e", "-c", """ | |
1976 | find / -xdev -lname "/usr/local.schroot/cross/*" -printf "%p %l\n" | |
1977 | """], stdout = PIPE) as (_, fd_out, _): | |
1978 | try: | |
1979 | f = OS.fdopen(fd_out) | |
1980 | for line in f: | |
1981 | dest, src = line.split() | |
1982 | have_link[dest] = src | |
1983 | finally: | |
1984 | f.close() | |
1985 | for path in want_link.iterkeys(): | |
1986 | real = root + path | |
1987 | if not OS.path.islink(real): continue | |
1988 | have_link[path] = OS.readlink(real) | |
1989 | ||
1990 | ## Add diversions for the paths which need one, but don't have one. | |
1991 | ## There's a hack here because the `--no-rename' option was required in | |
1992 | ## the same version in which it was introduced, so there's no single | |
1993 | ## incantation that will work across the boundary. | |
1994 | progress("add missing diversions") | |
1995 | with subprocess(["schroot", "-uroot", "-r", "-c", session, "--", | |
1996 | "sh", "-e", "-c", """ | |
1997 | a="%(arch)s" | |
1998 | ||
1999 | if dpkg-divert >/dev/null 2>&1 --no-rename --help | |
2000 | then no_rename=--no-rename | |
2001 | else no_rename= | |
2002 | fi | |
2003 | ||
2004 | while read path; do | |
2005 | dpkg-divert --package "install-cross-tools" $no_rename \ | |
2006 | --divert "$path.$a" --add "$path" | |
2007 | done | |
2008 | """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _): | |
2009 | try: | |
2010 | f = OS.fdopen(fd_in, 'w') | |
2011 | for path in want_div: | |
2012 | if path not in have_div: f.write(path + "\n") | |
2013 | finally: | |
2014 | f.close() | |
2015 | ||
2016 | ## Go through each diverted tool, and, if it hasn't been moved aside, | |
2017 | ## then /link/ it across now. If we rename it, then the chroot will | |
2018 | ## stop working -- which is why we didn't allow `dpkg-divert' to do the | |
2019 | ## rename. We can tell a tool that hasn't been moved, because it's a | |
2020 | ## symlink into one of the cross trees. | |
2021 | progress("preserve existing foreign files") | |
2022 | chroot_cross_prefix = host_to_chroot(crossdir) + "/" | |
2023 | chroot_qemu_prefix = host_to_chroot(qemudir) + "/" | |
2024 | for path in want_div: | |
2025 | real = root + path; div = real + "." + arch; cross = crossdir + path | |
2026 | if OS.path.exists(div): continue | |
2027 | if not OS.path.exists(real): continue | |
2028 | if OS.path.islink(real): | |
2029 | realdest = OS.readlink(real) | |
2030 | if realdest.startswith(chroot_cross_prefix) or \ | |
2031 | realdest.startswith(chroot_qemu_prefix): | |
2032 | continue | |
2033 | if OS.path.islink(cross) and realdest == OS.readlink(cross): | |
2034 | continue | |
2035 | progress("preserve existing foreign file `%s'" % path) | |
2036 | run_root(["ln", real, div]) | |
2037 | ||
2038 | ## Update all of the symbolic links which are currently wrong: add | |
2039 | ## links which are missing, delete ones which are obsolete, and update | |
2040 | ## ones which have the wrong target. | |
2041 | progress("update symlinks") | |
2042 | for path, src in want_link.iteritems(): | |
2043 | real = root + path | |
2044 | try: old_src = have_link[path] | |
2045 | except KeyError: pass | |
2046 | else: | |
2047 | if src == old_src: continue | |
2048 | new = real + ".new" | |
2049 | progress("link `%s' -> `%s'" % (path, src)) | |
2050 | dir = OS.path.dirname(real) | |
2051 | if not OS.path.isdir(dir): run_root(["mkdir", "-p", dir]) | |
2052 | if OS.path.exists(new): run_root(["rm", "-f", new]) | |
2053 | run_root(["ln", "-s", src, new]) | |
2054 | run_root(["mv", new, real]) | |
2055 | for path in have_link.iterkeys(): | |
2056 | if path in want_link: continue | |
a98c9dba | 2057 | real = root + path |
12ef8239 MW |
2058 | progress("remove obsolete link `%s' -> `%s'" % |
2059 | (path, OS.readlink(real))) | |
a98c9dba MW |
2060 | run_root(["rm", "-f", real]) |
2061 | ||
2062 | ## Remove diversions from paths which don't need them any more. Here | |
2063 | ## it's safe to rename, because either the tool isn't there, in which | |
2064 | ## case it obviously wasn't important, or it is, and `dpkg-divert' will | |
2065 | ## atomically replace our link with the foreign version. | |
2066 | progress("remove obsolete diversions") | |
2067 | with subprocess(["schroot", "-uroot", "-r", "-c", session, "--", | |
2068 | "sh", "-e", "-c", """ | |
2069 | a="%(arch)s" | |
2070 | ||
2071 | while read path; do | |
2072 | dpkg-divert --package "install-cross-tools" --rename \ | |
2073 | --divert "$path.$a" --remove "$path" | |
2074 | done | |
2075 | """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _): | |
2076 | try: | |
2077 | f = OS.fdopen(fd_in, 'w') | |
2078 | for path in have_div: | |
2079 | if path not in want_div: f.write(path + "\n") | |
2080 | finally: | |
2081 | f.close() | |
2082 | ||
2083 | def _make_chroot(me): | |
2084 | """ | |
2085 | Create the source-chroot with chroot metadata META. | |
2086 | ||
2087 | This will recreate a source-chroot from scratch, destroying the existing | |
2088 | logical volume if necessary. | |
2089 | """ | |
2090 | with Cleanup() as clean: | |
2091 | ||
2092 | dist, arch = me._dist, me._arch | |
2093 | clean.enter(lockfile(chroot_src_lockfile(dist, arch))) | |
2094 | ||
2095 | mnt = chroot_src_mntpt(dist, arch) | |
2096 | dev = chroot_src_blkdev(dist, arch) | |
2097 | lv = chroot_src_lv(dist, arch) | |
2098 | newlv = lv + ".new" | |
2099 | ||
2100 | ## Clean up any leftover debris. | |
2101 | if mountpoint_p(mnt): umount(mnt) | |
2102 | if block_device_p(dev): | |
2103 | run_root(["lvremove", "-f", "%s/%s" % (C.VG, lv)]) | |
2104 | ||
2105 | ## Create the logical volume and filesystem. It's important that the | |
2106 | ## logical volume not have its official name until after it contains a | |
2107 | ## mountable filesystem. | |
2108 | progress("create filesystem") | |
2109 | run_root(["lvcreate", "--yes", C.LVSZ, "-n", newlv, C.VG]) | |
2110 | run_root(["mkfs", "-j", "-L%s-%s" % (dist, arch), | |
2111 | OS.path.join("/dev", C.VG, newlv)]) | |
2112 | run_root(["lvrename", C.VG, newlv, lv]) | |
2113 | ||
2114 | ## Start installing the chroot. | |
2115 | with mount_chroot_src(dist, arch) as mnt: | |
2116 | ||
2117 | ## Set the basic structure. | |
2118 | run_root(["mkdir", "-m755", OS.path.join(mnt, "fs")]) | |
2119 | run_root(["chmod", "750", mnt]) | |
2120 | ||
2121 | ## Install the base system. | |
2122 | progress("install base system") | |
a323afdf | 2123 | run_root(["eatmydata", "debootstrap", "--no-merged-usr"] + |
a98c9dba MW |
2124 | (arch in C.FOREIGN_ARCHS and ["--foreign"] or []) + |
2125 | ["--arch=" + arch, "--variant=minbase", | |
2126 | "--include=" + ",".join(C.BASE_PACKAGES), | |
2127 | dist, OS.path.join(mnt, "fs"), C.DEBMIRROR]) | |
2128 | ||
2129 | ## If this is a cross-installation, then install the necessary `qemu' | |
2130 | ## and complete the installation. | |
2131 | if arch in C.FOREIGN_ARCHS: | |
2132 | qemu = OS.path.join("cross", "%s-%s" % (dist, C.QEMUHOST[arch]), | |
2133 | "QEMU", "qemu-%s-static" % C.QEMUARCH[arch]) | |
2134 | run_root(["install", OS.path.join(C.LOCAL, qemu), | |
2135 | OS.path.join(mnt, "fs/usr/bin")]) | |
2136 | run_root(["chroot", OS.path.join(mnt, "fs"), | |
2137 | "/debootstrap/debootstrap", "--second-stage"]) | |
2138 | run_root(["ln", "-sf", | |
2139 | OS.path.join("/usr/local.schroot", qemu), | |
2140 | OS.path.join(mnt, "fs/usr/bin")]) | |
2141 | ||
2142 | ## Set up `/usr/local'. | |
2143 | progress("install `/usr/local' symlink") | |
2144 | run_root(["rm", "-rf", OS.path.join(mnt, "fs/usr/local")]) | |
2145 | run_root(["ln", "-s", | |
2146 | OS.path.join("local.schroot", arch), | |
2147 | OS.path.join(mnt, "fs/usr/local")]) | |
2148 | ||
2149 | ## Install the `apt' configuration. | |
2150 | progress("configure package manager") | |
2151 | run_root(["rm", "-f", OS.path.join(mnt, "fs/etc/apt/sources.list")]) | |
2152 | for c in C.APTCONF: | |
2153 | run_root(["ln", "-s", | |
2154 | OS.path.join("/usr/local.schroot/etc/apt/apt.conf.d", c), | |
2155 | OS.path.join(mnt, "fs/etc/apt/apt.conf.d")]) | |
2156 | run_root(["ln", "-s", | |
2157 | "/usr/local.schroot/etc/apt/sources.%s" % dist, | |
2158 | OS.path.join(mnt, "fs/etc/apt/sources.list")]) | |
2159 | ||
2160 | with safewrite_root\ | |
2161 | (OS.path.join(mnt, "fs/etc/apt/apt.conf.d/20arch")) as f: | |
2162 | f.write("""\ | |
4bd9f538 | 2163 | ### -*-conf-*- |
a98c9dba | 2164 | |
4bd9f538 MW |
2165 | APT { |
2166 | Architecture "%s"; | |
2167 | }; | |
2168 | """ % arch) | |
a98c9dba MW |
2169 | |
2170 | ## Set up the locale and time zone from the host system. | |
2171 | progress("configure locales and timezone") | |
2172 | run_root(["cp", "/etc/locale.gen", "/etc/timezone", | |
2173 | OS.path.join(mnt, "fs/etc")]) | |
2174 | with open("/etc/timezone") as f: tz = f.readline().strip() | |
2175 | run_root(["ln", "-sf", | |
2176 | OS.path.join("/usr/share/timezone", tz), | |
2177 | OS.path.join(mnt, "fs/etc/localtime")]) | |
2178 | run_root(["cp", "/etc/default/locale", | |
2179 | OS.path.join(mnt, "fs/etc/default")]) | |
2180 | ||
2181 | ## Fix `/etc/mtab'. | |
2182 | progress("set `/etc/mtab'") | |
2183 | run_root(["ln", "-sf", "/proc/mounts", | |
2184 | OS.path.join(mnt, "fs/etc/mtab")]) | |
2185 | ||
2186 | ## Prevent daemons from starting within the chroot. | |
2187 | progress("inhibit daemon startup") | |
2188 | with safewrite_root(OS.path.join(mnt, "fs/usr/sbin/policy-rc.d"), | |
2189 | mode = "755") as f: | |
2190 | f.write("""\ | |
4bd9f538 MW |
2191 | #! /bin/sh |
2192 | echo >&2 "policy-rc.d: Services disabled by policy." | |
2193 | exit 101 | |
2194 | """) | |
a98c9dba MW |
2195 | |
2196 | ## Hack the dynamic linker to prefer libraries in `/usr' over | |
2197 | ## `/usr/local'. This prevents `dpkg-shlibdeps' from becoming | |
2198 | ## confused. | |
2199 | progress("configure dynamic linker") | |
2200 | with safewrite_root\ | |
2201 | (OS.path.join(mnt, "fs/etc/ld.so.conf.d/libc.conf")) as f: | |
2202 | f.write("# libc default configuration") | |
2203 | with safewrite_root\ | |
2204 | (OS.path.join(mnt, "fs/etc/ld.so.conf.d/zzz-local.conf")) as f: | |
2205 | f.write("""\ | |
4bd9f538 MW |
2206 | ### -*-conf-*- |
2207 | ### Local hack to make /usr/local/ late. | |
2208 | /usr/local/lib | |
2209 | """) | |
a98c9dba MW |
2210 | |
2211 | ## If this is a foreign architecture then we need to set it up. | |
2212 | if arch in C.FOREIGN_ARCHS: | |
2213 | ||
2214 | ## Keep the chroot's native Qemu out of our way: otherwise we'll stop | |
2215 | ## being able to run programs in the chroot. There's a hack here | |
2216 | ## because the `--no-rename' option was required in the same version | |
2217 | ## in which is was introduced, so there's no single incantation that | |
2218 | ## will work across the boundary. | |
2219 | progress("divert emulator") | |
2220 | run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", """ | |
2221 | if dpkg-divert >/dev/null 2>&1 --no-rename --help | |
2222 | then no_rename=--no-rename | |
2223 | else no_rename= | |
2224 | fi | |
2225 | ||
2226 | dpkg-divert --package install-cross-tools $no_rename \ | |
2227 | --divert /usr/bin/%(qemu)s.%(arch)s --add /usr/bin/%(qemu)s | |
2228 | """ % dict(arch = arch, qemu = "qemu-%s-static" % C.QEMUARCH[arch])]) | |
2229 | ||
2230 | ## Install faster native tools. | |
2231 | me._install_cross_tools() | |
2232 | ||
2233 | ## Finishing touches. | |
2234 | progress("finishing touches") | |
a6395bc3 MW |
2235 | run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", |
2236 | DEBCONF_TWEAKS + """ | |
a98c9dba MW |
2237 | apt-get update |
2238 | apt-get -y upgrade | |
2239 | apt-get -y install "$@" | |
2240 | ldconfig | |
2241 | apt-get -y autoremove | |
2242 | apt-get clean | |
2243 | """, "."] + C.EXTRA_PACKAGES, stdin = DISCARD) | |
2244 | ||
2245 | ## Mark the chroot as done. | |
2246 | me._meta.update = zulu() | |
2247 | me._meta.write() | |
2248 | ||
2249 | def _update_chroot(me): | |
2250 | """Refresh the source-chroot with chroot metadata META.""" | |
2251 | with Cleanup() as clean: | |
2252 | dist, arch = me._dist, me._arch | |
2253 | clean.enter(lockfile(chroot_src_lockfile(dist, arch))) | |
a6395bc3 MW |
2254 | run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", |
2255 | DEBCONF_TWEAKS + """ | |
a98c9dba MW |
2256 | apt-get update |
2257 | apt-get -y dist-upgrade | |
2258 | apt-get -y autoremove | |
2259 | apt-get -y clean | |
cac84082 | 2260 | ldconfig |
a98c9dba MW |
2261 | """], stdin = DISCARD) |
2262 | if arch in C.FOREIGN_ARCHS: me._install_cross_tools() | |
2263 | me._meta.update = zulu(); me._meta.write() | |
2264 | ||
2265 | def run(me): | |
2266 | if me._meta.update is not None: me._update_chroot() | |
2267 | else: me._make_chroot() | |
2268 | ||
2269 | ###-------------------------------------------------------------------------- | |
2270 | ### Extracting the cross tools. | |
2271 | ||
2272 | class CrossToolsJob (BaseJob): | |
2273 | """Extract cross-tools from a donor chroot.""" | |
2274 | ||
2275 | SPECS = C.NATIVE_CHROOTS | |
2276 | ||
2277 | def __init__(me, spec, fresh = CREATE, *args, **kw): | |
2278 | super(CrossToolsJob, me).__init__(*args, **kw) | |
2279 | me._dist, me._arch = split_dist_arch(spec) | |
2280 | me._meta = CrossToolsMetadata.read(me._dist, me._arch) | |
2281 | me._fresh = fresh | |
2282 | me._chroot = None | |
2283 | ||
2284 | def _mkname(me): return "cross-tools.%s-%s" % (me._dist, me._arch) | |
2285 | ||
2286 | def prepare(me): | |
2287 | st, r = check_fresh(me._fresh, me._meta.update) | |
2288 | if st is DONE: return | |
2289 | me._chroot = ChrootJob.ensure("%s-%s" % (me._dist, me._arch), FRESH) | |
2290 | me.await(me._chroot) | |
2291 | ||
2292 | def check(me): | |
2293 | status, reason = super(CrossToolsJob, me).check() | |
2294 | if status is not READY: return status, reason | |
2295 | if me._chroot is not None and me._chroot.started: | |
2296 | return READY, "prerequisites run" | |
2297 | return check_fresh(me._fresh, me._meta.update) | |
2298 | ||
2299 | def run(me): | |
2300 | with Cleanup() as clean: | |
2301 | ||
2302 | dist, arch = me._dist, me._arch | |
2303 | ||
2304 | mymulti = run_program(["dpkg-architecture", "-a" + arch, | |
2305 | "-qDEB_HOST_MULTIARCH"], | |
2306 | stdout = RETURN).rstrip("\n") | |
2307 | crossarchs = [run_program(["dpkg-architecture", "-A" + a, | |
2308 | "-qDEB_TARGET_GNU_TYPE"], | |
2309 | stdout = RETURN).rstrip("\n") | |
2310 | for a in C.FOREIGN_ARCHS] | |
2311 | ||
2312 | crossdir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch)) | |
2313 | crossold = crossdir + ".old"; crossnew = crossdir + ".new" | |
2314 | usrbin = OS.path.join(crossnew, "usr/bin") | |
2315 | ||
2316 | clean.enter(lockfile(crosstools_lockfile(dist, arch))) | |
2317 | run_program(["rm", "-rf", crossnew]) | |
2318 | mkdir_p(crossnew) | |
2319 | ||
2320 | ## Open a session to the donor chroot. | |
2321 | progress("establish snapshot") | |
2322 | session, root = clean.enter(chroot_session(dist, arch)) | |
2323 | ||
2324 | ## Make sure the donor tree is up-to-date, and install the extra | |
2325 | ## packages we need. | |
2326 | progress("install tools packages") | |
a6395bc3 MW |
2327 | run_schroot_session(session, ["eatmydata", "sh", "-e", "-c", |
2328 | DEBCONF_TWEAKS + """ | |
a98c9dba MW |
2329 | apt-get update |
2330 | apt-get -y upgrade | |
2331 | apt-get -y install "$@" | |
2332 | """, "."] + C.CROSS_PACKAGES, rootp = True, stdin = DISCARD) | |
2333 | ||
2334 | def chase(path): | |
2335 | dest = "" | |
2336 | ||
2337 | ## Work through the remaining components of the PATH. | |
2338 | while path != "": | |
2339 | try: sl = path.index("/") | |
2340 | except ValueError: step = path; path = "" | |
2341 | else: step, path = path[:sl], path[sl + 1:] | |
2342 | ||
2343 | ## Split off and analyse the first component. | |
2344 | if step == "" or step == ".": | |
2345 | ## A redundant `/' or `./'. Skip it. | |
2346 | pass | |
2347 | elif step == "..": | |
2348 | ## A `../'. Strip off the trailing component of DEST. | |
2349 | dest = dest[:dest.rindex("/")] | |
2350 | else: | |
2351 | ## Something else. Transfer the component name to DEST. | |
2352 | dest += "/" + step | |
2353 | ||
2354 | ## If DEST refers to something in the cross-tools tree then we're | |
2355 | ## good. | |
2356 | crossdest = crossnew + dest | |
2357 | try: st = OS.lstat(crossdest) | |
2358 | except OSError, err: | |
2359 | if err.errno == E.ENOENT: | |
2360 | ## No. We need to copy something from the donor tree so that | |
2361 | ## the name works. | |
2362 | ||
2363 | st = OS.lstat(root + dest) | |
2364 | if ST.S_ISDIR(st.st_mode): | |
2365 | OS.mkdir(crossdest) | |
2366 | else: | |
2367 | progress("copy `%s'" % dest) | |
2368 | run_program(["rsync", "-aHR", | |
2369 | "%s/.%s" % (root, dest), | |
2370 | crossnew]) | |
2371 | else: | |
2372 | raise | |
2373 | ||
2374 | ## If DEST refers to a symbolic link, then prepend the link target | |
2375 | ## to PATH so that we can be sure the link will work. | |
2376 | if ST.S_ISLNK(st.st_mode): | |
2377 | link = OS.readlink(crossdest) | |
2378 | if link.startswith("/"): dest = ""; link = link[1:] | |
2379 | else: | |
2380 | try: dest = dest[:dest.rindex("/")] | |
2381 | except ValueError: dest = "" | |
2382 | if path == "": path = link | |
770c8228 | 2383 | else: path = "%s/%s" % (link, path) |
a98c9dba MW |
2384 | |
2385 | ## Work through the shopping list, copying the things it names into the | |
2386 | ## cross-tools tree. | |
33c6b9e7 MW |
2387 | ## |
2388 | ## Each thing in the `CROSS_PATHS' list is a `|'-separated list of glob | |
2389 | ## patterns, optionally preceded by `?'. Unless the list starts with | |
2390 | ## `?', at least one of the patterns must match at least one file. | |
2391 | ## Patterns may contain the token `MULTI', which is replaced by the | |
2392 | ## donor architecture's multiarch triplet. | |
a98c9dba MW |
2393 | scan = [] |
2394 | for pat in C.CROSS_PATHS: | |
a98c9dba | 2395 | any = False |
33c6b9e7 MW |
2396 | pat = pat.replace("MULTI", mymulti) |
2397 | if pat.startswith("?"): | |
2398 | pat = pat[1:] | |
a98c9dba | 2399 | any = True |
33c6b9e7 MW |
2400 | for subpat in pat.split("|"): |
2401 | for rootpath in GLOB.iglob(root + subpat): | |
2402 | any = True | |
2403 | path = rootpath[len(root):] | |
2404 | progress("copy `%s'" % path) | |
2405 | run_program(["rsync", "-aHR", "%s/.%s" % (root, path), crossnew]) | |
a98c9dba MW |
2406 | if not any: |
2407 | raise RuntimeError("no matches for cross-tool pattern `%s'" % pat) | |
2408 | ||
2409 | ## Scan the new tree: chase down symbolic links, copying extra stuff | |
2410 | ## that we'll need; and examine ELF binaries to make sure we get the | |
2411 | ## necessary shared libraries. | |
2412 | def visit(_, dir, files): | |
2413 | for f in files: | |
2414 | path = OS.path.join(dir, f) | |
2415 | inside = switch_prefix(path, [(crossnew + "/", "/")]) | |
2416 | if OS.path.islink(path): chase(inside) | |
2417 | if elf_binary_p(arch, path): scan.append(inside) | |
2418 | OS.path.walk(crossnew, visit, None) | |
2419 | ||
2420 | ## Work through the ELF binaries in `scan', determining which shared | |
2421 | ## libraries they'll need. | |
2422 | ## | |
2423 | ## The rune running in the chroot session reads ELF binary names on | |
2424 | ## stdin, one per line, and runs `ldd' on them to discover the binary's | |
2425 | ## needed libraries and resolve them into pathnames. Each pathname is | |
2426 | ## printed to stderr as a line `+PATHNAME', followed by a final line | |
2427 | ## consisting only of `-' as a terminator. This is necessary so that | |
2428 | ## we can tell when we've finished, because newly discovered libraries | |
2429 | ## need to be fed back to discover their recursive dependencies. (This | |
2430 | ## is why the `WriteLinesSelector' interface is quite so hairy.) | |
2431 | with subprocess(["schroot", "-r", "-c", session, "--", | |
2432 | "sh", "-e", "-c", """ | |
2433 | while read path; do | |
2434 | ldd "$path" | while read a b c d; do | |
2435 | case $a:$b:$c:$d in | |
2436 | not:a:dynamic:executable) ;; | |
2437 | statically:linked::) ;; | |
2438 | /*) echo "+$a" ;; | |
2439 | *:=\\>:/*) echo "+$c" ;; | |
2440 | linux-*) ;; | |
2441 | *) echo >&2 "failed to find shared library \\`$a'"; exit 2 ;; | |
2442 | esac | |
2443 | done | |
2444 | echo - | |
2445 | done | |
2446 | """], stdin = PIPE, stdout = PIPE) as (fd_in, fd_out, _): | |
2447 | ||
2448 | ## Keep track of the number of binaries we've reported to the `ldd' | |
2449 | ## process for which we haven't yet seen all of their dependencies. | |
2450 | ## (This is wrapped in a `Struct' because of Python's daft scoping | |
2451 | ## rules.) | |
2452 | v = Struct(n = 0) | |
2453 | ||
2454 | def line_in(): | |
2455 | ## Provide a line in., so raise `StopIteration' to signal this. | |
2456 | ||
2457 | try: | |
2458 | ## See if there's something to scan. | |
2459 | path = scan.pop() | |
2460 | ||
2461 | except IndexError: | |
2462 | ## There's nothing currently waiting to be scanned. | |
2463 | if v.n: | |
2464 | ## There are still outstanding replies, so stall. | |
2465 | return None | |
2466 | else: | |
2467 | ## There are no outstanding replies left, and we have nothing | |
2468 | ## more to scan, then we must be finished. | |
2469 | raise StopIteration | |
2470 | ||
2471 | else: | |
2472 | ## The `scan' list isn't empty, so return an item from that, and | |
2473 | ## remember that there's one more thing we expect to see answers | |
2474 | ## from. | |
2475 | v.n += 1; return path | |
2476 | ||
2477 | def line_out(line): | |
2478 | ## We've received a line from the `ldd' process. | |
2479 | ||
2480 | if line == "-": | |
2481 | ## It's finished processing one of our binaries. Note this. | |
2482 | ## Maybe it's time to stop | |
2483 | v.n -= 1 | |
2484 | return | |
2485 | ||
2486 | ## Strip the leading marker (which is just there so that the | |
2487 | ## terminating `-' is unambiguous). | |
2488 | assert line.startswith("+") | |
2489 | lib = line[1:] | |
2490 | ||
2491 | ## If we already have this binary then we'll already have submitted | |
2492 | ## it. | |
2493 | path = crossnew + lib | |
2494 | try: OS.lstat(path) | |
2495 | except OSError, err: | |
2496 | if err.errno == E.ENOENT: pass | |
2497 | else: raise | |
2498 | else: return | |
2499 | ||
2500 | ## Copy it into the tools tree, together with any symbolic links | |
2501 | ## along the path. | |
2502 | chase(lib) | |
2503 | ||
2504 | ## If this is an ELF binary (and it ought to be!) then submit it | |
2505 | ## for further scanning. | |
2506 | if elf_binary_p(arch, path): | |
2507 | scan.append(switch_prefix(path, [(crossnew + "/", "/")])) | |
2508 | ||
2509 | ## And run this entire contraption. When this is done, we should | |
2510 | ## have all of the library dependencies for all of our binaries. | |
2511 | select_loop([WriteLinesSelector(fd_in, line_in), | |
2512 | ReadLinesSelector(fd_out, line_out)]) | |
2513 | ||
2514 | ## Set up the cross-compiler and emulator. Start by moving the cross | |
2515 | ## compilers and emulator into their specific places, so they don't end | |
2516 | ## up cluttering chroots for non-matching architectures. | |
2517 | progress("establish TOOLCHAIN and QEMU") | |
2518 | OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN")) | |
2519 | qemudir = OS.path.join(crossnew, "QEMU") | |
2520 | OS.mkdir(qemudir) | |
2521 | for gnu in C.FOREIGN_GNUARCHS: | |
2522 | OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN", gnu)) | |
2523 | for f in OS.listdir(usrbin): | |
2524 | for gnu in C.FOREIGN_GNUARCHS: | |
2525 | gnuprefix = gnu + "-" | |
2526 | if f.startswith(gnuprefix): | |
2527 | tooldir = OS.path.join(crossnew, "TOOLCHAIN", gnu) | |
2528 | OS.rename(OS.path.join(usrbin, f), OS.path.join(tooldir, f)) | |
2529 | OS.symlink(f, OS.path.join(tooldir, f[len(gnuprefix):])) | |
2530 | break | |
2531 | else: | |
2532 | if f.startswith("qemu-") and f.endswith("-static"): | |
2533 | OS.rename(OS.path.join(usrbin, f), OS.path.join(qemudir, f)) | |
2534 | ||
2535 | ## The GNU cross compilers try to find their additional pieces via a | |
2536 | ## relative path, which isn't going to end well. Add a symbolic link | |
2537 | ## at the right place to where the things are actually going to live. | |
2538 | toollib = OS.path.join(crossnew, "TOOLCHAIN", "lib") | |
2539 | OS.mkdir(toollib) | |
2540 | OS.symlink("../../usr/lib/gcc-cross", | |
2541 | OS.path.join(toollib, "gcc-cross")) | |
2542 | ||
2543 | ## We're done. Replace the old cross-tools with our new one. | |
2544 | me._meta.update = zulu() | |
2545 | me._meta.write(crossnew) | |
2546 | if OS.path.exists(crossdir): run_program(["mv", crossdir, crossold]) | |
2547 | OS.rename(crossnew, crossdir) | |
2548 | run_program(["rm", "-rf", crossold]) | |
2549 | ||
2550 | ###-------------------------------------------------------------------------- | |
2551 | ### Buliding and installing local packages. | |
2552 | ||
2553 | def pkg_metadata_lockfile(pkg): | |
2554 | return lockfile_path("pkg-meta.%s" % pkg) | |
2555 | ||
2556 | def pkg_srcdir_lockfile(pkg, ver): | |
2557 | return lockfile_path("pkg-source.%s-%s" % (pkg, ver)) | |
2558 | ||
2559 | def pkg_srcdir(pkg, ver): | |
2560 | return OS.path.join(C.LOCAL, "src", "%s-%s" % (pkg, ver)) | |
2561 | ||
2562 | def pkg_builddir(pkg, ver, arch): | |
2563 | return OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch) | |
2564 | ||
2565 | class PackageMetadata (BaseMetadata): | |
2566 | VARS = ["pkg"] + list(C.ALL_ARCHS) | |
2567 | ||
2568 | @classmethod | |
2569 | def read(cls, pkg): | |
2570 | try: | |
2571 | return super(PackageMetadata, cls)\ | |
2572 | .read(OS.path.join(C.LOCAL, "src", "META.%s" % pkg)) | |
2573 | except IOError, err: | |
2574 | if err.errno == E.ENOENT: pass | |
2575 | else: raise | |
2576 | return cls(pkg = pkg) | |
2577 | ||
2578 | def write(me): | |
2579 | super(PackageMetadata, me)\ | |
2580 | .write(OS.path.join(C.LOCAL, "src", "META.%s" % me.pkg)) | |
2581 | ||
2582 | class PackageSourceJob (BaseJob): | |
2583 | ||
2584 | SPECS = C.LOCALPKGS | |
2585 | ||
2586 | def __init__(me, pkg, fresh = CREATE, *args, **kw): | |
2587 | super(PackageSourceJob, me).__init__(*args, **kw) | |
2588 | me._pkg = pkg | |
2589 | tar = None; ver = None | |
2590 | r = RX.compile("^%s-(\d.*)\.tar.(?:Z|z|gz|bz2|xz|lzma)$" % | |
2591 | RX.escape(pkg)) | |
2592 | for f in OS.listdir("pkg"): | |
2593 | m = r.match(f) | |
2594 | if not m: pass | |
2595 | elif tar is not None: | |
2596 | raise ExpectedError("multiple source tarballs of package `%s'" % pkg) | |
2597 | else: tar, ver = f, m.group(1) | |
2598 | me.version = ver | |
2599 | me.tarball = OS.path.join("pkg", tar) | |
2600 | ||
2601 | def _mkname(me): return "pkg-source.%s" % me._pkg | |
2602 | ||
2603 | def check(me): | |
2604 | status, reason = super(PackageSourceJob, me).check() | |
2605 | if status is not READY: return status, reason | |
2606 | if OS.path.isdir(pkg_srcdir(me._pkg, me.version)): | |
2607 | return DONE, "already unpacked" | |
2608 | else: | |
2609 | return READY, "no source tree" | |
2610 | ||
2611 | def run(me): | |
2612 | with Cleanup() as clean: | |
2613 | pkg, ver, tar = me._pkg, me.version, me.tarball | |
2614 | srcdir = pkg_srcdir(pkg, ver) | |
2615 | newdir = srcdir + ".new" | |
2616 | ||
2617 | progress("unpack `%s'" % me.tarball) | |
2618 | clean.enter(lockfile(pkg_srcdir_lockfile(pkg, ver))) | |
2619 | run_program(["rm", "-rf", newdir]) | |
2620 | mkdir_p(newdir) | |
2621 | run_program(["tar", "xf", OS.path.join(OS.getcwd(), me.tarball)], | |
2622 | cwd = newdir) | |
2623 | things = OS.listdir(newdir) | |
2624 | if len(things) == 1: | |
2625 | OS.rename(OS.path.join(newdir, things[0]), srcdir) | |
2626 | OS.rmdir(newdir) | |
2627 | else: | |
2628 | OS.rename(newdir, srcdir) | |
2629 | ||
2630 | class PackageBuildJob (BaseJob): | |
2631 | ||
2632 | SPECS = ["%s:%s" % (pkg, arch) | |
2633 | for pkg in C.LOCALPKGS | |
2634 | for arch in C.ALL_ARCHS] | |
2635 | ||
2636 | def __init__(me, spec, fresh = CREATE, *args, **kw): | |
2637 | super(PackageBuildJob, me).__init__(*args, **kw) | |
2638 | colon = spec.index(":") | |
2639 | me._pkg, me._arch = spec[:colon], spec[colon + 1:] | |
2640 | ||
2641 | def _mkname(me): return "pkg-build.%s:%s" % (me._pkg, me._arch) | |
2642 | ||
2643 | def prepare(me): | |
2644 | me.await(ChrootJob.ensure("%s-%s" % (C.PRIMARY_DIST, me._arch), CREATE)) | |
2645 | me._meta = PackageMetadata.read(me._pkg) | |
2646 | me._src = PackageSourceJob.ensure(me._pkg, FRESH); me.await(me._src) | |
2647 | me._prereq = [PackageBuildJob.ensure("%s:%s" % (prereq, me._arch), FRESH) | |
2648 | for prereq in C.PKGDEPS[me._pkg]] | |
2649 | for j in me._prereq: me.await(j) | |
2650 | ||
2651 | def check(me): | |
2652 | status, reason = super(PackageBuildJob, me).check() | |
2653 | if status is not READY: return status, reason | |
2654 | if me._src.started: return READY, "fresh source directory" | |
2655 | for j in me._prereq: | |
2656 | if j.started: | |
2657 | return READY, "dependency `%s' freshly installed" % j._pkg | |
2658 | if getattr(me._meta, me._arch) == me._src.version: | |
2659 | return DONE, "already installed" | |
2660 | return READY, "not yet installed" | |
2661 | ||
2662 | def run(me): | |
2663 | with Cleanup() as clean: | |
2664 | pkg, ver, arch = me._pkg, me._src.version, me._arch | |
2665 | ||
2666 | session, _ = clean.enter(chroot_session(C.PRIMARY_DIST, arch)) | |
2667 | builddir = OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch) | |
2668 | chroot_builddir = host_to_chroot(builddir) | |
2669 | run_program(["rm", "-rf", builddir]) | |
2670 | OS.mkdir(builddir) | |
2671 | ||
2672 | progress("prepare %s chroot" % (arch)) | |
2673 | run_schroot_session(session, | |
2674 | ["eatmydata", "apt-get", "update"], | |
2675 | rootp = True, stdin = DISCARD) | |
2676 | run_schroot_session(session, | |
2677 | ["eatmydata", "apt-get", "-y", "upgrade"], | |
2678 | rootp = True, stdin = DISCARD) | |
2679 | run_schroot_session(session, | |
2680 | ["eatmydata", "apt-get", "-y", | |
2681 | "install", "pkg-config"], | |
2682 | rootp = True, stdin = DISCARD) | |
2683 | run_schroot_session(session, | |
2684 | ["mount", "-oremount,rw", "/usr/local.schroot"], | |
2685 | rootp = True, stdin = DISCARD) | |
942fed18 MW |
2686 | run_schroot_session(session, |
2687 | ["mount", "--bind", | |
2688 | "/usr/local.schroot/%s/include.aside" % arch, | |
2689 | "/usr/local.schroot/%s/include" % arch], | |
2690 | rootp = True, stdin = DISCARD) | |
a98c9dba MW |
2691 | |
2692 | progress("configure `%s' %s for %s" % (pkg, ver, arch)) | |
2693 | run_schroot_session(session, ["sh", "-e", "-c", """ | |
2694 | cd "$1" && | |
2695 | ../configure PKG_CONFIG_PATH=/usr/local/lib/pkgconfig.hidden | |
2696 | """, ".", chroot_builddir]) | |
2697 | ||
2698 | progress("compile `%s' %s for %s" % (pkg, ver, arch)) | |
2699 | run_schroot_session(session, ["sh", "-e", "-c", """ | |
2700 | cd "$1" && make -j4 && make -j4 check | |
2701 | """, ".", chroot_builddir]) | |
2702 | ||
2703 | existing = getattr(me._meta, arch, None) | |
2704 | if existing is not None and existing != ver: | |
2705 | progress("uninstall existing `%s' %s for %s" % (pkg, existing, arch)) | |
2706 | run_schroot_session(session, ["sh", "-e", "-c", """ | |
2707 | cd "$1" && make uninstall | |
2708 | """, ".", OS.path.join(pkg_srcdir(pkg, existing), | |
2709 | "build.%s" % arch)], | |
2710 | rootp = True) | |
2711 | ||
2712 | progress("install `%s' %s for %s" % (pkg, existing, arch)) | |
2713 | run_schroot_session(session, ["sh", "-e", "-c", """ | |
2714 | cd "$1" && make install | |
2715 | mkdir -p /usr/local/lib/pkgconfig.hidden | |
2716 | mv /usr/local/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig.hidden || : | |
2717 | """, ".", chroot_builddir], rootp = True) | |
2718 | ||
2719 | clean.enter(lockfile(pkg_metadata_lockfile(pkg))) | |
2720 | me._meta = PackageMetadata.read(pkg) | |
2721 | setattr(me._meta, arch, ver); me._meta.write() | |
2722 | ||
2723 | with lockfile(chroot_src_lockfile(C.PRIMARY_DIST, arch)): | |
2724 | run_schroot_source(C.PRIMARY_DIST, arch, ["ldconfig"]) | |
2725 | ||
2726 | ###-------------------------------------------------------------------------- | |
2727 | ### Process the configuration and options. | |
2728 | ||
2729 | OPTIONS = OP.OptionParser\ | |
2730 | (usage = "chroot-maint [-diknqs] [-fFRESH] [-jN] JOB[.SPEC,...] ...") | |
2731 | for short, long, props in [ | |
2732 | ("-d", "--debug", { | |
2733 | 'dest': 'debug', 'default': False, 'action': 'store_true', | |
2734 | 'help': "print lots of debugging drivel" }), | |
2735 | ("-f", "--fresh", { | |
2736 | 'dest': 'fresh', 'metavar': 'FRESH', 'default': "create", | |
2737 | 'help': "how fresh (`create', `force', or `N[s|m|h|d|w]')" }), | |
2738 | ("-i", "--ignore-errors", { | |
2739 | 'dest': 'ignerr', 'default': False, 'action': 'store_true', | |
2740 | 'help': "ignore all errors encountered while processing" }), | |
2741 | ("-j", "--jobs", { | |
2742 | 'dest': 'njobs', 'metavar': 'N', 'default': 1, 'type': 'int', | |
2743 | 'help': 'run up to N jobs in parallel' }), | |
2744 | ("-J", "--forkbomb", { | |
2745 | 'dest': 'njobs', 'action': 'store_true', | |
2746 | 'help': 'run as many jobs in parallel as possible' }), | |
2747 | ("-k", "--keep-going", { | |
2748 | 'dest': 'keepon', 'default': False, 'action': 'store_true', | |
2749 | 'help': "keep going even if independent jobs fail" }), | |
2750 | ("-n", "--dry-run", { | |
2751 | 'dest': 'dryrun', 'default': False, 'action': 'store_true', | |
2752 | 'help': "don't actually do anything" }), | |
2753 | ("-q", "--quiet", { | |
2754 | 'dest': 'quiet', 'default': False, 'action': 'store_true', | |
2755 | 'help': "don't print the output from successful jobs" }), | |
2756 | ("-s", "--silent", { | |
2757 | 'dest': 'silent', 'default': False, 'action': 'store_true', | |
2758 | 'help': "don't print progress messages" })]: | |
2759 | OPTIONS.add_option(short, long, **props) | |
2760 | ||
2761 | ###-------------------------------------------------------------------------- | |
2762 | ### Main program. | |
2763 | ||
2764 | R_JOBSERV = RX.compile(r'^--jobserver-(?:fds|auth)=(\d+),(\d+)$') | |
2765 | ||
2766 | JOBMAP = { "chroot": ChrootJob, | |
2767 | "cross-tools": CrossToolsJob, | |
2768 | "pkg-source": PackageSourceJob, | |
2769 | "pkg-build": PackageBuildJob } | |
2770 | ||
2771 | R_FRESH = RX.compile(r"^(?:create|force|(\d+)(|[smhdw]))$") | |
2772 | ||
2773 | def parse_fresh(spec): | |
2774 | m = R_FRESH.match(spec) | |
2775 | if not m: raise ExpectedError("bad freshness `%s'" % spec) | |
2776 | if spec == "create": fresh = CREATE | |
2777 | elif spec == "force": fresh = FORCE | |
2778 | else: | |
2779 | n, u = int(m.group(1)), m.group(2) | |
2780 | if u == "" or u == "s": fresh = n | |
2781 | elif u == "m": fresh = 60*n | |
2782 | elif u == "h": fresh = 3600*n | |
2783 | elif u == "d": fresh = 86400*n | |
2784 | elif u == "w": fresh = 604800*n | |
2785 | else: assert False | |
2786 | return fresh | |
2787 | ||
2788 | with toplevel_handler(): | |
2789 | OPT, args = OPTIONS.parse_args() | |
2790 | rfd, wfd = -1, -1 | |
2791 | njobs = OPT.njobs | |
2792 | try: mkflags = OS.environ['MAKEFLAGS'] | |
2793 | except KeyError: pass | |
2794 | else: | |
2795 | ff = mkflags.split() | |
2796 | for f in ff: | |
2797 | if f == "--": break | |
2798 | m = R_JOBSERV.match(f) | |
2799 | if m: rfd, wfd = int(m.group(1)), int(m.group(2)) | |
2800 | elif f == '-j': njobs = None | |
2801 | elif not f.startswith('-'): | |
2802 | for ch in f: | |
2803 | if ch == 'i': OPT.ignerr = True | |
2804 | elif ch == 'k': OPT.keepon = True | |
2805 | elif ch == 'n': OPT.dryrun = True | |
2806 | elif ch == 's': OPT.silent = True | |
2807 | if OPT.njobs < 1: | |
2808 | raise ExpectedError("running no more than %d jobs is silly" % OPT.njobs) | |
2809 | ||
2810 | FRESH = parse_fresh(OPT.fresh) | |
2811 | ||
2812 | SCHED = JobScheduler(rfd, wfd, njobs) | |
2813 | OS.environ["http_proxy"] = C.PROXY | |
2814 | ||
2815 | jobs = [] | |
2816 | if not args: OPTIONS.print_usage(SYS.stderr); SYS.exit(2) | |
2817 | for arg in args: | |
2818 | try: sl = arg.index("/") | |
2819 | except ValueError: fresh = FRESH | |
2820 | else: arg, fresh = arg[:sl], parse_fresh(arg[sl + 1:]) | |
2821 | try: dot = arg.index(".") | |
2822 | except ValueError: jty, pats = arg, "*" | |
2823 | else: jty, pats = arg[:dot], arg[dot + 1:] | |
2824 | try: jcls = JOBMAP[jty] | |
2825 | except KeyError: raise ExpectedError("unknown job type `%s'" % jty) | |
2826 | specs = [] | |
2827 | for pat in pats.split(","): | |
2828 | any = False | |
2829 | for s in jcls.SPECS: | |
2830 | if FM.fnmatch(s, pat): specs.append(s); any = True | |
2831 | if not any: raise ExpectedError("no match for `%s'" % pat) | |
2832 | for s in specs: | |
2833 | jobs.append(jcls.ensure(s, fresh)) | |
2834 | ||
2835 | SCHED.run() | |
2836 | ||
2837 | SYS.exit(RC) | |
2838 | ||
2839 | ###----- That's all, folks -------------------------------------------------- |