| 1 | # -*- python -*- |
| 2 | # |
| 3 | # Hippotat - Asinine IP Over HTTP program |
| 4 | # hippotatlib/__init__.py - common library code |
| 5 | # |
| 6 | # Copyright 2017 Ian Jackson |
| 7 | # |
| 8 | # GPLv3+ |
| 9 | # |
| 10 | # This program is free software: you can redistribute it and/or modify |
| 11 | # it under the terms of the GNU General Public License as published by |
| 12 | # the Free Software Foundation, either version 3 of the License, or |
| 13 | # (at your option) any later version. |
| 14 | # |
| 15 | # This program is distributed in the hope that it will be useful, |
| 16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18 | # GNU General Public License for more details. |
| 19 | # |
| 20 | # You should have received a copy of the GNU General Public License |
| 21 | # along with this program, in the file GPLv3. If not, |
| 22 | # see <http://www.gnu.org/licenses/>. |
| 23 | |
| 24 | |
| 25 | import signal |
| 26 | signal.signal(signal.SIGINT, signal.SIG_DFL) |
| 27 | |
| 28 | import sys |
| 29 | import os |
| 30 | |
| 31 | from zope.interface import implementer |
| 32 | |
| 33 | import twisted |
| 34 | from twisted.internet import reactor |
| 35 | import twisted.internet.endpoints |
| 36 | import twisted.logger |
| 37 | from twisted.logger import LogLevel |
| 38 | import twisted.python.constants |
| 39 | from twisted.python.constants import NamedConstant |
| 40 | |
| 41 | import ipaddress |
| 42 | from ipaddress import AddressValueError |
| 43 | |
| 44 | from optparse import OptionParser |
| 45 | import configparser |
| 46 | from configparser import ConfigParser |
| 47 | from configparser import NoOptionError |
| 48 | |
| 49 | from functools import partial |
| 50 | |
| 51 | import collections |
| 52 | import time |
| 53 | import codecs |
| 54 | import traceback |
| 55 | |
| 56 | import re as regexp |
| 57 | |
| 58 | import hippotatlib.slip as slip |
| 59 | |
| 60 | class DBG(twisted.python.constants.Names): |
| 61 | INIT = NamedConstant() |
| 62 | CONFIG = NamedConstant() |
| 63 | ROUTE = NamedConstant() |
| 64 | DROP = NamedConstant() |
| 65 | OWNSOURCE = NamedConstant() |
| 66 | FLOW = NamedConstant() |
| 67 | HTTP = NamedConstant() |
| 68 | TWISTED = NamedConstant() |
| 69 | QUEUE = NamedConstant() |
| 70 | HTTP_CTRL = NamedConstant() |
| 71 | QUEUE_CTRL = NamedConstant() |
| 72 | HTTP_FULL = NamedConstant() |
| 73 | CTRL_DUMP = NamedConstant() |
| 74 | SLIP_FULL = NamedConstant() |
| 75 | DATA_COMPLETE = NamedConstant() |
| 76 | |
| 77 | _hex_codec = codecs.getencoder('hex_codec') |
| 78 | |
| 79 | #---------- logging ---------- |
| 80 | |
| 81 | org_stderr = sys.stderr |
| 82 | |
| 83 | log = twisted.logger.Logger() |
| 84 | |
| 85 | debug_set = set() |
| 86 | debug_def_detail = DBG.HTTP |
| 87 | |
| 88 | def log_debug(dflag, msg, idof=None, d=None): |
| 89 | if dflag not in debug_set: return |
| 90 | #print('---------------->',repr((dflag, msg, idof, d)), file=sys.stderr) |
| 91 | if idof is not None: |
| 92 | msg = '[%#x] %s' % (id(idof), msg) |
| 93 | if d is not None: |
| 94 | trunc = '' |
| 95 | if not DBG.DATA_COMPLETE in debug_set: |
| 96 | if len(d) > 64: |
| 97 | d = d[0:64] |
| 98 | trunc = '...' |
| 99 | d = _hex_codec(d)[0].decode('ascii') |
| 100 | msg += ' ' + d + trunc |
| 101 | log.info('{dflag} {msgcore}', dflag=dflag, msgcore=msg) |
| 102 | |
| 103 | def logevent_is_boringtwisted(event): |
| 104 | try: |
| 105 | if event.get('log_level') != LogLevel.info: |
| 106 | return False |
| 107 | dflag = event.get('dflag') |
| 108 | if dflag is False : return False |
| 109 | if dflag in debug_set: return False |
| 110 | if dflag is None and DBG.TWISTED in debug_set: return False |
| 111 | return True |
| 112 | except Exception: |
| 113 | print('EXCEPTION (IN BORINGTWISTED CHECK)', |
| 114 | traceback.format_exc(), file=org_stderr) |
| 115 | return False |
| 116 | |
| 117 | @implementer(twisted.logger.ILogFilterPredicate) |
| 118 | class LogNotBoringTwisted: |
| 119 | def __call__(self, event): |
| 120 | return ( |
| 121 | twisted.logger.PredicateResult.no |
| 122 | if logevent_is_boringtwisted(event) else |
| 123 | twisted.logger.PredicateResult.yes |
| 124 | ) |
| 125 | |
| 126 | #---------- default config ---------- |
| 127 | |
| 128 | defcfg = ''' |
| 129 | [DEFAULT] |
| 130 | max_batch_down = 65536 |
| 131 | max_queue_time = 10 |
| 132 | target_requests_outstanding = 3 |
| 133 | http_timeout = 30 |
| 134 | http_timeout_grace = 5 |
| 135 | max_requests_outstanding = 6 |
| 136 | max_batch_up = 4000 |
| 137 | http_retry = 5 |
| 138 | port = 80 |
| 139 | vroutes = '' |
| 140 | ifname_client = hippo%%d |
| 141 | ifname_server = shippo%%d |
| 142 | |
| 143 | #[server] or [<client>] overrides |
| 144 | ipif = userv root ipif %(local)s,%(peer)s,%(mtu)s,slip,%(ifname)s %(rnets)s |
| 145 | |
| 146 | # relating to virtual network |
| 147 | mtu = 1500 |
| 148 | |
| 149 | # addrs = 127.0.0.1 ::1 |
| 150 | # url |
| 151 | |
| 152 | # relating to virtual network |
| 153 | vvnetwork = 172.24.230.192 |
| 154 | # vnetwork = <prefix>/<len> |
| 155 | # vaddr = <ipaddr> |
| 156 | # vrelay = <ipaddr> |
| 157 | |
| 158 | |
| 159 | # [<client-ip4-or-ipv6-address>] |
| 160 | # password = <password> # used by both, must match |
| 161 | |
| 162 | [LIMIT] |
| 163 | max_batch_down = 262144 |
| 164 | max_queue_time = 121 |
| 165 | http_timeout = 121 |
| 166 | target_requests_outstanding = 10 |
| 167 | ''' |
| 168 | |
| 169 | # these need to be defined here so that they can be imported by import * |
| 170 | cfg = ConfigParser(strict=False) |
| 171 | optparser = OptionParser() |
| 172 | |
| 173 | _mimetrans = bytes.maketrans(b'-'+slip.esc, slip.esc+b'-') |
| 174 | def mime_translate(s): |
| 175 | # SLIP-encoded packets cannot contain ESC ESC. |
| 176 | # Swap `-' and ESC. The result cannot contain `--' |
| 177 | return s.translate(_mimetrans) |
| 178 | |
| 179 | class ConfigResults: |
| 180 | def __init__(self): |
| 181 | pass |
| 182 | def __repr__(self): |
| 183 | return 'ConfigResults('+repr(self.__dict__)+')' |
| 184 | |
| 185 | def log_discard(packet, iface, saddr, daddr, why): |
| 186 | log_debug(DBG.DROP, |
| 187 | 'discarded packet [%s] %s -> %s: %s' % (iface, saddr, daddr, why), |
| 188 | d=packet) |
| 189 | |
| 190 | #---------- packet parsing ---------- |
| 191 | |
| 192 | def packet_addrs(packet): |
| 193 | version = packet[0] >> 4 |
| 194 | if version == 4: |
| 195 | addrlen = 4 |
| 196 | saddroff = 3*4 |
| 197 | factory = ipaddress.IPv4Address |
| 198 | elif version == 6: |
| 199 | addrlen = 16 |
| 200 | saddroff = 2*4 |
| 201 | factory = ipaddress.IPv6Address |
| 202 | else: |
| 203 | raise ValueError('unsupported IP version %d' % version) |
| 204 | saddr = factory(packet[ saddroff : saddroff + addrlen ]) |
| 205 | daddr = factory(packet[ saddroff + addrlen : saddroff + addrlen*2 ]) |
| 206 | return (saddr, daddr) |
| 207 | |
| 208 | #---------- address handling ---------- |
| 209 | |
| 210 | def ipaddr(input): |
| 211 | try: |
| 212 | r = ipaddress.IPv4Address(input) |
| 213 | except AddressValueError: |
| 214 | r = ipaddress.IPv6Address(input) |
| 215 | return r |
| 216 | |
| 217 | def ipnetwork(input): |
| 218 | try: |
| 219 | r = ipaddress.IPv4Network(input) |
| 220 | except NetworkValueError: |
| 221 | r = ipaddress.IPv6Network(input) |
| 222 | return r |
| 223 | |
| 224 | #---------- ipif (SLIP) subprocess ---------- |
| 225 | |
| 226 | class SlipStreamDecoder(): |
| 227 | def __init__(self, desc, on_packet): |
| 228 | self._buffer = b'' |
| 229 | self._on_packet = on_packet |
| 230 | self._desc = desc |
| 231 | self._log('__init__') |
| 232 | |
| 233 | def _log(self, msg, **kwargs): |
| 234 | log_debug(DBG.SLIP_FULL, 'slip %s: %s' % (self._desc, msg), **kwargs) |
| 235 | |
| 236 | def inputdata(self, data): |
| 237 | self._log('inputdata', d=data) |
| 238 | data = self._buffer + data |
| 239 | self._buffer = b'' |
| 240 | packets = slip.decode(data, True) |
| 241 | self._buffer = packets.pop() |
| 242 | for packet in packets: |
| 243 | self._maybe_packet(packet) |
| 244 | self._log('bufremain', d=self._buffer) |
| 245 | |
| 246 | def _maybe_packet(self, packet): |
| 247 | self._log('maybepacket', d=packet) |
| 248 | if len(packet): |
| 249 | self._on_packet(packet) |
| 250 | |
| 251 | def flush(self): |
| 252 | self._log('flush') |
| 253 | data = self._buffer |
| 254 | self._buffer = b'' |
| 255 | packets = slip.decode(data) |
| 256 | assert(len(packets) == 1) |
| 257 | self._maybe_packet(packets[0]) |
| 258 | |
| 259 | class _IpifProcessProtocol(twisted.internet.protocol.ProcessProtocol): |
| 260 | def __init__(self, router): |
| 261 | self._router = router |
| 262 | self._decoder = SlipStreamDecoder('ipif', self.slip_on_packet) |
| 263 | def connectionMade(self): pass |
| 264 | def outReceived(self, data): |
| 265 | self._decoder.inputdata(data) |
| 266 | def slip_on_packet(self, packet): |
| 267 | (saddr, daddr) = packet_addrs(packet) |
| 268 | if saddr.is_link_local or daddr.is_link_local: |
| 269 | log_discard(packet, 'ipif', saddr, daddr, 'link-local') |
| 270 | return |
| 271 | self._router(packet, saddr, daddr) |
| 272 | def processEnded(self, status): |
| 273 | status.raiseException() |
| 274 | |
| 275 | def start_ipif(command, router): |
| 276 | ipif = _IpifProcessProtocol(router) |
| 277 | reactor.spawnProcess(ipif, |
| 278 | '/bin/sh',['sh','-xc', command], |
| 279 | childFDs={0:'w', 1:'r', 2:2}, |
| 280 | env=None) |
| 281 | return ipif |
| 282 | |
| 283 | def queue_inbound(ipif, packet): |
| 284 | log_debug(DBG.FLOW, "queue_inbound", d=packet) |
| 285 | ipif.transport.write(slip.delimiter) |
| 286 | ipif.transport.write(slip.encode(packet)) |
| 287 | ipif.transport.write(slip.delimiter) |
| 288 | |
| 289 | #---------- packet queue ---------- |
| 290 | |
| 291 | class PacketQueue(): |
| 292 | def __init__(self, desc, max_queue_time): |
| 293 | self._desc = desc |
| 294 | assert(desc + '') |
| 295 | self._max_queue_time = max_queue_time |
| 296 | self._pq = collections.deque() # packets |
| 297 | |
| 298 | def _log(self, dflag, msg, **kwargs): |
| 299 | log_debug(dflag, self._desc+' pq: '+msg, **kwargs) |
| 300 | |
| 301 | def append(self, packet): |
| 302 | self._log(DBG.QUEUE, 'append', d=packet) |
| 303 | self._pq.append((time.monotonic(), packet)) |
| 304 | |
| 305 | def nonempty(self): |
| 306 | self._log(DBG.QUEUE, 'nonempty ?') |
| 307 | while True: |
| 308 | try: (queuetime, packet) = self._pq[0] |
| 309 | except IndexError: |
| 310 | self._log(DBG.QUEUE, 'nonempty ? empty.') |
| 311 | return False |
| 312 | |
| 313 | age = time.monotonic() - queuetime |
| 314 | if age > self._max_queue_time: |
| 315 | # strip old packets off the front |
| 316 | self._log(DBG.QUEUE, 'dropping (old)', d=packet) |
| 317 | self._pq.popleft() |
| 318 | continue |
| 319 | |
| 320 | self._log(DBG.QUEUE, 'nonempty ? nonempty.') |
| 321 | return True |
| 322 | |
| 323 | def process(self, sizequery, moredata, max_batch): |
| 324 | # sizequery() should return size of batch so far |
| 325 | # moredata(s) should add s to batch |
| 326 | self._log(DBG.QUEUE, 'process...') |
| 327 | while True: |
| 328 | try: (dummy, packet) = self._pq[0] |
| 329 | except IndexError: |
| 330 | self._log(DBG.QUEUE, 'process... empty') |
| 331 | break |
| 332 | |
| 333 | self._log(DBG.QUEUE_CTRL, 'process... packet', d=packet) |
| 334 | |
| 335 | encoded = slip.encode(packet) |
| 336 | sofar = sizequery() |
| 337 | |
| 338 | self._log(DBG.QUEUE_CTRL, |
| 339 | 'process... (sofar=%d, max=%d) encoded' % (sofar, max_batch), |
| 340 | d=encoded) |
| 341 | |
| 342 | if sofar > 0: |
| 343 | if sofar + len(slip.delimiter) + len(encoded) > max_batch: |
| 344 | self._log(DBG.QUEUE_CTRL, 'process... overflow') |
| 345 | break |
| 346 | moredata(slip.delimiter) |
| 347 | |
| 348 | moredata(encoded) |
| 349 | self._pq.popleft() |
| 350 | |
| 351 | #---------- error handling ---------- |
| 352 | |
| 353 | _crashing = False |
| 354 | |
| 355 | def crash(err): |
| 356 | global _crashing |
| 357 | _crashing = True |
| 358 | print('========== CRASH ==========', err, |
| 359 | '===========================', file=sys.stderr) |
| 360 | try: reactor.stop() |
| 361 | except twisted.internet.error.ReactorNotRunning: pass |
| 362 | |
| 363 | def crash_on_defer(defer): |
| 364 | defer.addErrback(lambda err: crash(err)) |
| 365 | |
| 366 | def crash_on_critical(event): |
| 367 | if event.get('log_level') >= LogLevel.critical: |
| 368 | crash(twisted.logger.formatEvent(event)) |
| 369 | |
| 370 | #---------- config processing ---------- |
| 371 | |
| 372 | def _cfg_process_putatives(): |
| 373 | servers = { } |
| 374 | clients = { } |
| 375 | # maps from abstract object to canonical name for cs's |
| 376 | |
| 377 | def putative(cmap, abstract, canoncs): |
| 378 | try: |
| 379 | current_canoncs = cmap[abstract] |
| 380 | except KeyError: |
| 381 | pass |
| 382 | else: |
| 383 | assert(current_canoncs == canoncs) |
| 384 | cmap[abstract] = canoncs |
| 385 | |
| 386 | server_pat = r'[-.0-9A-Za-z]+' |
| 387 | client_pat = r'[.:0-9a-f]+' |
| 388 | server_re = regexp.compile(server_pat) |
| 389 | serverclient_re = regexp.compile(server_pat + r' ' + client_pat) |
| 390 | |
| 391 | for cs in cfg.sections(): |
| 392 | if cs == 'LIMIT': |
| 393 | # plan A "[LIMIT]" |
| 394 | continue |
| 395 | |
| 396 | try: |
| 397 | # plan B "[<client>]" part 1 |
| 398 | ci = ipaddr(cs) |
| 399 | except AddressValueError: |
| 400 | |
| 401 | if server_re.fullmatch(cs): |
| 402 | # plan C "[<servername>]" |
| 403 | putative(servers, cs, cs) |
| 404 | continue |
| 405 | |
| 406 | if serverclient_re.fullmatch(cs): |
| 407 | # plan D "[<servername> <client>]" part 1 |
| 408 | (pss,pcs) = cs.split(' ') |
| 409 | |
| 410 | if pcs == 'LIMIT': |
| 411 | # plan E "[<servername> LIMIT]" |
| 412 | continue |
| 413 | |
| 414 | try: |
| 415 | # plan D "[<servername> <client>]" part 2 |
| 416 | ci = ipaddr(pc) |
| 417 | except AddressValueError: |
| 418 | # plan F "[<some thing we do not understand>]" |
| 419 | # well, we ignore this |
| 420 | print('warning: ignoring config section %s' % cs, file=sys.stderr) |
| 421 | continue |
| 422 | |
| 423 | else: # no AddressValueError |
| 424 | # plan D "[<servername> <client]" part 3 |
| 425 | putative(clients, ci, pcs) |
| 426 | putative(servers, pss, pss) |
| 427 | continue |
| 428 | |
| 429 | else: # no AddressValueError |
| 430 | # plan B "[<client>" part 2 |
| 431 | putative(clients, ci, cs) |
| 432 | continue |
| 433 | |
| 434 | return (servers, clients) |
| 435 | |
| 436 | def cfg_process_general(c, ss): |
| 437 | c.mtu = cfg1getint(ss, 'mtu') |
| 438 | |
| 439 | def cfg_process_saddrs(c, ss): |
| 440 | class ServerAddr(): |
| 441 | def __init__(self, port, addrspec): |
| 442 | self.port = port |
| 443 | # also self.addr |
| 444 | try: |
| 445 | self.addr = ipaddress.IPv4Address(addrspec) |
| 446 | self._endpointfactory = twisted.internet.endpoints.TCP4ServerEndpoint |
| 447 | self._inurl = b'%s' |
| 448 | except AddressValueError: |
| 449 | self.addr = ipaddress.IPv6Address(addrspec) |
| 450 | self._endpointfactory = twisted.internet.endpoints.TCP6ServerEndpoint |
| 451 | self._inurl = b'[%s]' |
| 452 | def make_endpoint(self): |
| 453 | return self._endpointfactory(reactor, self.port, |
| 454 | interface= '%s' % self.addr) |
| 455 | def url(self): |
| 456 | url = b'http://' + (self._inurl % str(self.addr).encode('ascii')) |
| 457 | if self.port != 80: url += b':%d' % self.port |
| 458 | url += b'/' |
| 459 | return url |
| 460 | def __repr__(self): |
| 461 | return 'ServerAddr'+repr((self.port,self.addr)) |
| 462 | |
| 463 | c.port = cfg1getint(ss,'port') |
| 464 | c.saddrs = [ ] |
| 465 | for addrspec in cfg1get(ss, 'addrs').split(): |
| 466 | sa = ServerAddr(c.port, addrspec) |
| 467 | c.saddrs.append(sa) |
| 468 | |
| 469 | def cfg_process_vnetwork(c, ss): |
| 470 | c.vnetwork = ipnetwork(cfg1get(ss,'vnetwork')) |
| 471 | if c.vnetwork.num_addresses < 3 + 2: |
| 472 | raise ValueError('vnetwork needs at least 2^3 addresses') |
| 473 | |
| 474 | def cfg_process_vaddr(c, ss): |
| 475 | try: |
| 476 | c.vaddr = cfg1get(ss,'vaddr') |
| 477 | except NoOptionError: |
| 478 | cfg_process_vnetwork(c, ss) |
| 479 | c.vaddr = next(c.vnetwork.hosts()) |
| 480 | |
| 481 | def cfg_search_section(key,sections): |
| 482 | for section in sections: |
| 483 | if cfg.has_option(section, key): |
| 484 | return section |
| 485 | raise NoOptionError(key, repr(sections)) |
| 486 | |
| 487 | def cfg_get_raw(*args, **kwargs): |
| 488 | # for passing to cfg_search |
| 489 | return cfg.get(*args, raw=True, **kwargs) |
| 490 | |
| 491 | def cfg_search(getter,key,sections): |
| 492 | section = cfg_search_section(key,sections) |
| 493 | return getter(section, key) |
| 494 | |
| 495 | def cfg1get(*args, **kwargs): |
| 496 | return cfg.get(*args, **kwargs) |
| 497 | |
| 498 | def cfg1getint(*args, **kwargs): |
| 499 | return cfg.getint(*args, **kwargs) |
| 500 | |
| 501 | def cfg_process_client_limited(cc,ss,sections,key): |
| 502 | val = cfg_search(cfg1getint, key, sections) |
| 503 | lim = cfg_search(cfg1getint, key, ['%s LIMIT' % ss, 'LIMIT']) |
| 504 | cc.__dict__[key] = min(val,lim) |
| 505 | |
| 506 | def cfg_process_client_common(cc,ss,cs,ci): |
| 507 | # returns sections to search in, iff password is defined, otherwise None |
| 508 | cc.ci = ci |
| 509 | |
| 510 | sections = ['%s %s' % (ss,cs), |
| 511 | cs, |
| 512 | ss, |
| 513 | 'DEFAULT'] |
| 514 | |
| 515 | try: pwsection = cfg_search_section('password', sections) |
| 516 | except NoOptionError: return None |
| 517 | |
| 518 | pw = cfg1get(pwsection, 'password') |
| 519 | cc.password = pw.encode('utf-8') |
| 520 | |
| 521 | cfg_process_client_limited(cc,ss,sections,'target_requests_outstanding') |
| 522 | cfg_process_client_limited(cc,ss,sections,'http_timeout') |
| 523 | |
| 524 | return sections |
| 525 | |
| 526 | def cfg_process_ipif(c, sections, varmap): |
| 527 | for d, s in varmap: |
| 528 | try: v = getattr(c, s) |
| 529 | except AttributeError: continue |
| 530 | setattr(c, d, v) |
| 531 | |
| 532 | #print('CFGIPIF',repr((varmap, sections, c.__dict__)),file=sys.stderr) |
| 533 | |
| 534 | section = cfg_search_section('ipif', sections) |
| 535 | c.ipif_command = cfg1get(section,'ipif', vars=c.__dict__) |
| 536 | |
| 537 | #---------- startup ---------- |
| 538 | |
| 539 | def log_debug_config(m): |
| 540 | if not DBG.CONFIG in debug_set: return |
| 541 | print('DBG.CONFIG:', m) |
| 542 | |
| 543 | def common_startup(process_cfg): |
| 544 | # calls process_cfg(putative_clients, putative_servers) |
| 545 | |
| 546 | # ConfigParser hates #-comments after values |
| 547 | trailingcomments_re = regexp.compile(r'#.*') |
| 548 | cfg.read_string(trailingcomments_re.sub('', defcfg)) |
| 549 | need_defcfg = True |
| 550 | |
| 551 | def readconfig(pathname, mandatory=True): |
| 552 | def log(m, p=pathname): |
| 553 | if not DBG.CONFIG in debug_set: return |
| 554 | log_debug_config('%s: %s' % (m, p)) |
| 555 | |
| 556 | try: |
| 557 | files = os.listdir(pathname) |
| 558 | |
| 559 | except FileNotFoundError: |
| 560 | if mandatory: raise |
| 561 | log('skipped') |
| 562 | return |
| 563 | |
| 564 | except NotADirectoryError: |
| 565 | cfg.read(pathname) |
| 566 | log('read file') |
| 567 | return |
| 568 | |
| 569 | # is a directory |
| 570 | log('directory') |
| 571 | re = regexp.compile('[^-A-Za-z0-9_]') |
| 572 | for f in os.listdir(pathname): |
| 573 | if re.search(f): continue |
| 574 | subpath = pathname + '/' + f |
| 575 | try: |
| 576 | os.stat(subpath) |
| 577 | except FileNotFoundError: |
| 578 | log('entry skipped', subpath) |
| 579 | continue |
| 580 | cfg.read(subpath) |
| 581 | log('entry read', subpath) |
| 582 | |
| 583 | def oc_config(od,os, value, op): |
| 584 | nonlocal need_defcfg |
| 585 | need_defcfg = False |
| 586 | readconfig(value) |
| 587 | |
| 588 | def oc_extra_config(od,os, value, op): |
| 589 | readconfig(value) |
| 590 | |
| 591 | def read_defconfig(): |
| 592 | readconfig('/etc/hippotat/config.d', False) |
| 593 | readconfig('/etc/hippotat/passwords.d', False) |
| 594 | readconfig('/etc/hippotat/master.cfg', False) |
| 595 | |
| 596 | def oc_defconfig(od,os, value, op): |
| 597 | nonlocal need_defcfg |
| 598 | need_defcfg = False |
| 599 | read_defconfig(value) |
| 600 | |
| 601 | def dfs_less_detailed(dl): |
| 602 | return [df for df in DBG.iterconstants() if df <= dl] |
| 603 | |
| 604 | def ds_default(od,os,dl,op): |
| 605 | global debug_set |
| 606 | debug_set.clear |
| 607 | debug_set |= set(dfs_less_detailed(debug_def_detail)) |
| 608 | |
| 609 | def ds_select(od,os, spec, op): |
| 610 | for it in spec.split(','): |
| 611 | |
| 612 | if it.startswith('-'): |
| 613 | mutator = debug_set.discard |
| 614 | it = it[1:] |
| 615 | else: |
| 616 | mutator = debug_set.add |
| 617 | |
| 618 | if it == '+': |
| 619 | dfs = DBG.iterconstants() |
| 620 | |
| 621 | else: |
| 622 | if it.endswith('+'): |
| 623 | mapper = dfs_less_detailed |
| 624 | it = it[0:len(it)-1] |
| 625 | else: |
| 626 | mapper = lambda x: [x] |
| 627 | |
| 628 | try: |
| 629 | dfspec = DBG.lookupByName(it) |
| 630 | except ValueError: |
| 631 | optparser.error('unknown debug flag %s in --debug-select' % it) |
| 632 | |
| 633 | dfs = mapper(dfspec) |
| 634 | |
| 635 | for df in dfs: |
| 636 | mutator(df) |
| 637 | |
| 638 | optparser.add_option('-D', '--debug', |
| 639 | nargs=0, |
| 640 | action='callback', |
| 641 | help='enable default debug (to stdout)', |
| 642 | callback= ds_default) |
| 643 | |
| 644 | optparser.add_option('--debug-select', |
| 645 | nargs=1, |
| 646 | type='string', |
| 647 | metavar='[-]DFLAG[+]|[-]+,...', |
| 648 | help= |
| 649 | '''enable (`-': disable) each specified DFLAG; |
| 650 | `+': do same for all "more interesting" DFLAGSs; |
| 651 | just `+': all DFLAGs. |
| 652 | DFLAGS: ''' + ' '.join([df.name for df in DBG.iterconstants()]), |
| 653 | action='callback', |
| 654 | callback= ds_select) |
| 655 | |
| 656 | optparser.add_option('-c', '--config', |
| 657 | nargs=1, |
| 658 | type='string', |
| 659 | metavar='CONFIGFILE', |
| 660 | dest='configfile', |
| 661 | action='callback', |
| 662 | callback= oc_config) |
| 663 | |
| 664 | optparser.add_option('--extra-config', |
| 665 | nargs=1, |
| 666 | type='string', |
| 667 | metavar='CONFIGFILE', |
| 668 | dest='configfile', |
| 669 | action='callback', |
| 670 | callback= oc_extra_config) |
| 671 | |
| 672 | optparser.add_option('--default-config', |
| 673 | action='callback', |
| 674 | callback= oc_defconfig) |
| 675 | |
| 676 | (opts, args) = optparser.parse_args() |
| 677 | if len(args): optparser.error('no non-option arguments please') |
| 678 | |
| 679 | if need_defcfg: |
| 680 | read_defconfig() |
| 681 | |
| 682 | try: |
| 683 | (pss, pcs) = _cfg_process_putatives() |
| 684 | process_cfg(opts, pss, pcs) |
| 685 | except (configparser.Error, ValueError): |
| 686 | traceback.print_exc(file=sys.stderr) |
| 687 | print('\nInvalid configuration, giving up.', file=sys.stderr) |
| 688 | sys.exit(12) |
| 689 | |
| 690 | |
| 691 | #print('X', debug_set, file=sys.stderr) |
| 692 | |
| 693 | log_formatter = twisted.logger.formatEventAsClassicLogText |
| 694 | stdout_obs = twisted.logger.FileLogObserver(sys.stdout, log_formatter) |
| 695 | stderr_obs = twisted.logger.FileLogObserver(sys.stderr, log_formatter) |
| 696 | pred = twisted.logger.LogLevelFilterPredicate(LogLevel.error) |
| 697 | stdsomething_obs = twisted.logger.FilteringLogObserver( |
| 698 | stderr_obs, [pred], stdout_obs |
| 699 | ) |
| 700 | global file_log_observer |
| 701 | file_log_observer = twisted.logger.FilteringLogObserver( |
| 702 | stdsomething_obs, [LogNotBoringTwisted()] |
| 703 | ) |
| 704 | #log_observer = stdsomething_obs |
| 705 | twisted.logger.globalLogBeginner.beginLoggingTo( |
| 706 | [ file_log_observer, crash_on_critical ] |
| 707 | ) |
| 708 | |
| 709 | def common_run(): |
| 710 | log_debug(DBG.INIT, 'entering reactor') |
| 711 | if not _crashing: reactor.run() |
| 712 | print('ENDED', file=sys.stderr) |
| 713 | sys.exit(16) |