X-Git-Url: https://git.distorted.org.uk/~mdw/autoys/blobdiff_plain/3bf73acf248b6c874719bdbaf26e0ccea2a8665f..487d44e5b8c92ac02e6ab4e967fddd898cf2d1c9:/gremlin/gremlin.in diff --git a/gremlin/gremlin.in b/gremlin/gremlin.in index f3a870a..c0383ed 100644 --- a/gremlin/gremlin.in +++ b/gremlin/gremlin.in @@ -783,7 +783,7 @@ class GStreamerProgressEyecandy (ProgressEyecandy): return ## Update regularly. The pipeline runs asynchronously. - me._id = G.timeout_add(200, me._update) + me._id = G.timeout_add(100, me._update) def __exit__(me, ty, val, tb): "Leave context: remove display and report completion or failure." @@ -819,29 +819,36 @@ class AudioIdentifier (object): demand. """ - def __init__(me, file, mime): - "Initialize the object suitably for identifying FILE." - - ## Make some initial GStreamer objects. We'll want the pipeline later if - ## we need to analyse a poorly tagged MP3 stream, so save it away. - me._pipe = GS.Pipeline() - me._file = file - bus = me._pipe.get_bus() - loop = G.MainLoop() + def _prepare_pipeline(me): + pipe = GS.Pipeline() + bus = pipe.get_bus() ## The basic recognition kit is based around `decodebin'. We must keep ## it happy by giving it sinks for the streams it's found, which it ## announces asynchronously. - source = make_element('filesrc', 'file', location = file) + source = make_element('filesrc', 'file', location = me._file) decoder = make_element('decodebin', 'decode') sink = make_element('fakesink') def decoder_pad_arrived(elt, pad): if pad.get_current_caps()[0].get_name().startswith('audio/'): elt.link_pads(pad.get_name(), sink, 'sink') - dpaid = decoder.connect('pad-added', decoder_pad_arrived) - for i in [source, decoder, sink]: me._pipe.add(i) + decoder.connect('pad-added', decoder_pad_arrived) + for i in [source, decoder, sink]: pipe.add(i) link_elements([source, decoder]) + ## Done. + return pipe, bus, decoder, sink + + def __init__(me, file, mime): + "Initialize the object suitably for identifying FILE." + + me._file = file + pipe, bus, decoder, sink = me._prepare_pipeline() + + ## Make some initial GStreamer objects. We'll want the pipeline later if + ## we need to analyse a poorly tagged MP3 stream, so save it away. + loop = G.MainLoop() + ## Arrange to collect tags from the pipeline's bus as they're reported. tags = {} fail = [] @@ -852,7 +859,7 @@ class AudioIdentifier (object): loop.quit() elif ty == GS.MessageType.STATE_CHANGED: if s['new-state'] == GS.State.PAUSED and \ - msg.src == me._pipe: + msg.src == pipe: loop.quit() elif ty == GS.MessageType.TAG: tt = s['taglist'] @@ -882,13 +889,12 @@ class AudioIdentifier (object): ## Crank up most of the heavy machinery. The message handler will stop ## the loop when things seem to be sufficiently well underway. bus.add_signal_watch() - me._pipe.set_state(GS.State.PAUSED) + pipe.set_state(GS.State.PAUSED) loop.run() bus.disconnect(bmid) - decoder.disconnect(dpaid) bus.remove_signal_watch() if fail: - me._pipe.set_state(GS.State.NULL) + pipe.set_state(GS.State.NULL) raise fail[0], fail[1], fail[2] ## Store the collected tags. @@ -907,17 +913,11 @@ class AudioIdentifier (object): elif 'bitrate' in tags and tags['bitrate'] >= 80000: me._bitrate = tags['bitrate']/1000 else: - me._bitrate = None - - ## The bitrate computation wants the file size. Ideally we'd want the - ## total size of the frames' contents, but that seems hard to dredge - ## out. If the framing overhead is small, this should be close enough - ## for our purposes. - me._bytes = OS.stat(file).st_size - - def __del__(me): - "Close the pipeline down so we don't leak file descriptors." - me._pipe.set_state(GS.State.NULL) + ok, n = pipe.query_duration(GS.Format.BYTES) + if ok: ok, t = pipe.query_duration(GS.Format.TIME) + if ok: me._bitrate = int((8e6*n)/t) + else: me._bitrate = None + pipe.set_state(GS.State.NULL) @property def bitrate(me): @@ -931,7 +931,8 @@ class AudioIdentifier (object): if me._bitrate is not None: return me._bitrate - ## Make up a new main loop. + ## Make up a new pipeline and main loop. + pipe, bus, _, _ = me._prepare_pipeline() loop = G.MainLoop() ## Watch for bus messages. We'll stop when we reach the end of the @@ -944,27 +945,34 @@ class AudioIdentifier (object): loop.quit() elif ty == GS.MessageType.EOS: loop.quit() - bus = me._pipe.get_bus() + bus = pipe.get_bus() bmid = bus.connect('message', bus_message) ## Get everything moving, and keep the user amused while we work. bus.add_signal_watch() - me._pipe.set_state(GS.State.PLAYING) - with GStreamerProgressEyecandy(filestatus(file, 'measure bitrate') % - me._pipe, - silentp = True): + pipe.set_state(GS.State.PLAYING) + with GStreamerProgressEyecandy(filestatus(me._file, 'measure bitrate'), + pipe, silentp = True): loop.run() bus.remove_signal_watch() bus.disconnect(bmid) if fail: - me._pipe.set_state(GS.State.NULL) + pipe.set_state(GS.State.NULL) raise fail[0], fail[1], fail[2] + STATUS.clear() + + ## The bitrate computation wants the file size. Ideally we'd want the + ## total size of the frames' contents, but that seems hard to dredge + ## out. If the framing overhead is small, this should be close enough + ## for our purposes. + bytes = OS.stat(me._file).st_size ## Now we should be able to find out our position accurately and work out ## a bitrate. Cache it in case anybody asks again. ok, t = pipe.query_position(GS.Format.TIME) assert ok, 'failed to discover bitrate' - me._bitrate = int(8*me._bytes*1e6/t) + me._bitrate = int(8*bytes*1e6/t) + pipe.set_state(GS.State.NULL) ## Done. return me._bitrate