mirror of
https://github.com/bspeice/Melodia
synced 2024-11-16 04:58:20 -05:00
Add in the audiotools resources
This commit is contained in:
parent
51a861abe1
commit
48906a5513
6
Melodia/resources/__init__.py
Normal file
6
Melodia/resources/__init__.py
Normal file
@ -0,0 +1,6 @@
|
||||
import os, sys
|
||||
def get_resource_dir():
|
||||
return os.path.dirname(
|
||||
os.path.abspath(__file__)
|
||||
)
|
||||
|
751
Melodia/resources/audiotools/__aiff__.py
Normal file
751
Melodia/resources/audiotools/__aiff__.py
Normal file
@ -0,0 +1,751 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
from audiotools import (AudioFile, InvalidFile, Con, PCMReader,
|
||||
__capped_stream_reader__, PCMReaderError,
|
||||
transfer_data, DecodingError, EncodingError,
|
||||
ID3v22Comment, BUFFER_SIZE, ChannelMask,
|
||||
ReorderedPCMReader, pcm,
|
||||
cStringIO, os, AiffContainer, to_pcm_progress)
|
||||
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
_HUGE_VAL = 1.79769313486231e+308
|
||||
|
||||
|
||||
class IEEE_Extended(Con.Adapter):
|
||||
"""A construct for handling 80-bit IEEE-extended values."""
|
||||
|
||||
def __init__(self, name):
|
||||
Con.Adapter.__init__(
|
||||
self,
|
||||
Con.Struct(name,
|
||||
Con.Embed(Con.BitStruct(None,
|
||||
Con.Flag("signed"),
|
||||
Con.Bits("exponent", 15))),
|
||||
Con.UBInt64("mantissa")))
|
||||
|
||||
def _encode(self, value, context):
|
||||
import math
|
||||
|
||||
if (value < 0):
|
||||
signed = True
|
||||
value *= -1
|
||||
else:
|
||||
signed = False
|
||||
|
||||
(fmant, exponent) = math.frexp(value)
|
||||
if ((exponent > 16384) or (fmant >= 1)):
|
||||
exponent = 0x7FFF
|
||||
mantissa = 0
|
||||
else:
|
||||
exponent += 16382
|
||||
mantissa = fmant * (2 ** 64)
|
||||
|
||||
return Con.Container(signed=signed,
|
||||
exponent=exponent,
|
||||
mantissa=mantissa)
|
||||
|
||||
def _decode(self, obj, context):
|
||||
if ((obj.exponent == 0) and (obj.mantissa == 0)):
|
||||
return 0
|
||||
else:
|
||||
if (obj.exponent == 0x7FFF):
|
||||
return _HUGE_VAL
|
||||
else:
|
||||
f = obj.mantissa * (2.0 ** (obj.exponent - 16383 - 63))
|
||||
return f if not obj.signed else -f
|
||||
|
||||
#######################
|
||||
#AIFF
|
||||
#######################
|
||||
|
||||
|
||||
class AiffReader(PCMReader):
|
||||
"""A subclass of PCMReader for reading AIFF file contents."""
|
||||
|
||||
def __init__(self, aiff_file,
|
||||
sample_rate, channels, channel_mask, bits_per_sample,
|
||||
chunk_length, process=None):
|
||||
"""aiff_file should be rewound to the start of the SSND chunk."""
|
||||
|
||||
alignment = AiffAudio.SSND_ALIGN.parse_stream(aiff_file)
|
||||
PCMReader.__init__(self,
|
||||
file=__capped_stream_reader__(
|
||||
aiff_file,
|
||||
chunk_length - AiffAudio.SSND_ALIGN.sizeof()),
|
||||
sample_rate=sample_rate,
|
||||
channels=channels,
|
||||
channel_mask=channel_mask,
|
||||
bits_per_sample=bits_per_sample,
|
||||
process=process,
|
||||
signed=True,
|
||||
big_endian=True)
|
||||
self.ssnd_chunk_length = chunk_length - 8
|
||||
standard_channel_mask = ChannelMask(self.channel_mask)
|
||||
aiff_channel_mask = AIFFChannelMask(standard_channel_mask)
|
||||
if (channels in (3, 4, 6)):
|
||||
self.channel_order = [aiff_channel_mask.channels().index(channel)
|
||||
for channel in
|
||||
standard_channel_mask.channels()]
|
||||
else:
|
||||
self.channel_order = None
|
||||
|
||||
def read(self, bytes):
|
||||
"""Try to read a pcm.FrameList of size "bytes"."""
|
||||
|
||||
#align bytes downward if an odd number is read in
|
||||
bytes -= (bytes % (self.channels * self.bits_per_sample / 8))
|
||||
pcm_data = self.file.read(
|
||||
max(bytes, self.channels * self.bits_per_sample / 8))
|
||||
if ((len(pcm_data) == 0) and (self.ssnd_chunk_length > 0)):
|
||||
raise IOError("ssnd chunk ends prematurely")
|
||||
else:
|
||||
self.ssnd_chunk_length -= len(pcm_data)
|
||||
|
||||
try:
|
||||
framelist = pcm.FrameList(pcm_data,
|
||||
self.channels,
|
||||
self.bits_per_sample,
|
||||
True, True)
|
||||
if (self.channel_order is not None):
|
||||
return pcm.from_channels([framelist.channel(channel)
|
||||
for channel in self.channel_order])
|
||||
else:
|
||||
return framelist
|
||||
except ValueError:
|
||||
raise IOError("ssnd chunk ends prematurely")
|
||||
|
||||
|
||||
class InvalidAIFF(InvalidFile):
|
||||
"""Raised if some problem occurs parsing AIFF chunks."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class AiffAudio(AiffContainer):
|
||||
"""An AIFF audio file."""
|
||||
|
||||
SUFFIX = "aiff"
|
||||
NAME = SUFFIX
|
||||
|
||||
AIFF_HEADER = Con.Struct("aiff_header",
|
||||
Con.Const(Con.Bytes("aiff_id", 4), "FORM"),
|
||||
Con.UBInt32("aiff_size"),
|
||||
Con.Const(Con.Bytes("aiff_type", 4), "AIFF"))
|
||||
|
||||
CHUNK_HEADER = Con.Struct("chunk_header",
|
||||
Con.Bytes("chunk_id", 4),
|
||||
Con.UBInt32("chunk_length"))
|
||||
|
||||
COMM_CHUNK = Con.Struct("comm",
|
||||
Con.UBInt16("channels"),
|
||||
Con.UBInt32("total_sample_frames"),
|
||||
Con.UBInt16("sample_size"),
|
||||
IEEE_Extended("sample_rate"))
|
||||
|
||||
SSND_ALIGN = Con.Struct("ssnd",
|
||||
Con.UBInt32("offset"),
|
||||
Con.UBInt32("blocksize"))
|
||||
|
||||
PRINTABLE_ASCII = set([chr(i) for i in xrange(0x20, 0x7E + 1)])
|
||||
|
||||
def __init__(self, filename):
|
||||
"""filename is a plain string."""
|
||||
|
||||
self.filename = filename
|
||||
|
||||
comm_found = False
|
||||
ssnd_found = False
|
||||
try:
|
||||
f = open(self.filename, 'rb')
|
||||
for (chunk_id, chunk_length, chunk_offset) in self.chunks():
|
||||
if (chunk_id == 'COMM'):
|
||||
f.seek(chunk_offset, 0)
|
||||
comm = self.COMM_CHUNK.parse(f.read(chunk_length))
|
||||
self.__channels__ = comm.channels
|
||||
self.__total_sample_frames__ = comm.total_sample_frames
|
||||
self.__sample_size__ = comm.sample_size
|
||||
self.__sample_rate__ = int(comm.sample_rate)
|
||||
comm_found = True
|
||||
elif (chunk_id == 'SSND'):
|
||||
f.seek(chunk_offset, 0)
|
||||
ssnd = self.SSND_ALIGN.parse_stream(f)
|
||||
ssnd_found = True
|
||||
elif (not set(chunk_id).issubset(self.PRINTABLE_ASCII)):
|
||||
raise InvalidAIFF(_("chunk header not ASCII"))
|
||||
|
||||
if (not comm_found):
|
||||
raise InvalidAIFF(_("no COMM chunk found"))
|
||||
if (not ssnd_found):
|
||||
raise InvalidAIFF(_("no SSND chunk found"))
|
||||
f.close()
|
||||
except IOError, msg:
|
||||
raise InvalidAIFF(str(msg))
|
||||
except Con.FieldError:
|
||||
raise InvalidAIFF(_("invalid COMM or SSND chunk"))
|
||||
|
||||
def bits_per_sample(self):
|
||||
"""Returns an integer number of bits-per-sample this track contains."""
|
||||
|
||||
return self.__sample_size__
|
||||
|
||||
def channels(self):
|
||||
"""Returns an integer number of channels this track contains."""
|
||||
|
||||
return self.__channels__
|
||||
|
||||
def channel_mask(self):
|
||||
"""Returns a ChannelMask object of this track's channel layout."""
|
||||
|
||||
#this unusual arrangement is taken from the AIFF specification
|
||||
if (self.channels() <= 2):
|
||||
return ChannelMask.from_channels(self.channels())
|
||||
elif (self.channels() == 3):
|
||||
return ChannelMask.from_fields(
|
||||
front_left=True, front_right=True, front_center=True)
|
||||
elif (self.channels() == 4):
|
||||
return ChannelMask.from_fields(
|
||||
front_left=True, front_right=True,
|
||||
back_left=True, back_right=True)
|
||||
elif (self.channels() == 6):
|
||||
return ChannelMask.from_fields(
|
||||
front_left=True, side_left=True,
|
||||
front_center=True, front_right=True,
|
||||
side_right=True, back_center=True)
|
||||
else:
|
||||
return ChannelMask(0)
|
||||
|
||||
def lossless(self):
|
||||
"""Returns True."""
|
||||
|
||||
return True
|
||||
|
||||
def total_frames(self):
|
||||
"""Returns the total PCM frames of the track as an integer."""
|
||||
|
||||
return self.__total_sample_frames__
|
||||
|
||||
def sample_rate(self):
|
||||
"""Returns the rate of the track's audio as an integer number of Hz."""
|
||||
|
||||
return self.__sample_rate__
|
||||
|
||||
@classmethod
|
||||
def is_type(cls, file):
|
||||
"""Returns True if the given file object describes this format.
|
||||
|
||||
Takes a seekable file pointer rewound to the start of the file."""
|
||||
|
||||
header = file.read(12)
|
||||
|
||||
return ((header[0:4] == 'FORM') and
|
||||
(header[8:12] == 'AIFF'))
|
||||
|
||||
def chunks(self):
|
||||
"""Yields a (chunk_id, length, offset) per AIFF chunk."""
|
||||
|
||||
f = open(self.filename, 'rb')
|
||||
try:
|
||||
aiff_header = self.AIFF_HEADER.parse_stream(f)
|
||||
except Con.ConstError:
|
||||
raise InvalidAIFF(_(u"Not an AIFF file"))
|
||||
except Con.core.FieldError:
|
||||
raise InvalidAIFF(_(u"Invalid AIFF file"))
|
||||
|
||||
total_size = aiff_header.aiff_size - 4
|
||||
while (total_size > 0):
|
||||
chunk_header = self.CHUNK_HEADER.parse_stream(f)
|
||||
total_size -= 8
|
||||
yield (chunk_header.chunk_id,
|
||||
chunk_header.chunk_length,
|
||||
f.tell())
|
||||
f.seek(chunk_header.chunk_length, 1)
|
||||
total_size -= chunk_header.chunk_length
|
||||
f.close()
|
||||
|
||||
def comm_chunk(self):
|
||||
"""Returns (channels, pcm_frames, bits_per_sample, sample_rate) ."""
|
||||
|
||||
try:
|
||||
for (chunk_id, chunk_length, chunk_offset) in self.chunks():
|
||||
if (chunk_id == 'COMM'):
|
||||
f = open(self.filename, 'rb')
|
||||
f.seek(chunk_offset, 0)
|
||||
comm = self.COMM_CHUNK.parse(f.read(chunk_length))
|
||||
f.close()
|
||||
return (comm.channels,
|
||||
comm.total_sample_frames,
|
||||
comm.sample_size,
|
||||
int(comm.sample_rate))
|
||||
else:
|
||||
raise InvalidAIFF(_(u"COMM chunk not found"))
|
||||
except IOError, msg:
|
||||
raise InvalidAIFF(str(msg))
|
||||
except Con.FieldError:
|
||||
raise InvalidAIFF(_(u"invalid COMM chunk"))
|
||||
|
||||
def chunk_files(self):
|
||||
"""Yields a (chunk_id,length,file) per AIFF chunk.
|
||||
|
||||
The file object is capped to read only its chunk data."""
|
||||
|
||||
f = open(self.filename, 'rb')
|
||||
try:
|
||||
aiff_header = self.AIFF_HEADER.parse_stream(f)
|
||||
except Con.ConstError:
|
||||
raise InvalidAIFF(_(u"Not an AIFF file"))
|
||||
except Con.core.FieldError:
|
||||
raise InvalidAIFF(_(u"Invalid AIFF file"))
|
||||
|
||||
total_size = aiff_header.aiff_size - 4
|
||||
while (total_size > 0):
|
||||
chunk_header = self.CHUNK_HEADER.parse_stream(f)
|
||||
total_size -= 8
|
||||
yield (chunk_header.chunk_id,
|
||||
chunk_header.chunk_length,
|
||||
__capped_stream_reader__(f, chunk_header.chunk_length))
|
||||
total_size -= chunk_header.chunk_length
|
||||
f.close()
|
||||
|
||||
def get_metadata(self):
|
||||
"""Returns a MetaData object, or None.
|
||||
|
||||
Raises IOError if unable to read the file."""
|
||||
|
||||
for (chunk_id, chunk_length, chunk_offset) in self.chunks():
|
||||
if (chunk_id == 'ID3 '):
|
||||
f = open(self.filename, 'rb')
|
||||
f.seek(chunk_offset, 0)
|
||||
id3 = ID3v22Comment.parse(f)
|
||||
f.close()
|
||||
return id3
|
||||
else:
|
||||
return None
|
||||
|
||||
def set_metadata(self, metadata):
|
||||
"""Takes a MetaData object and sets this track's metadata.
|
||||
|
||||
This metadata includes track name, album name, and so on.
|
||||
Raises IOError if unable to write the file."""
|
||||
|
||||
if (metadata is None):
|
||||
return
|
||||
|
||||
import tempfile
|
||||
|
||||
id3_chunk = ID3v22Comment.converted(metadata).build()
|
||||
|
||||
new_aiff = tempfile.TemporaryFile()
|
||||
new_aiff.seek(12, 0)
|
||||
|
||||
id3_found = False
|
||||
for (chunk_id, chunk_length, chunk_file) in self.chunk_files():
|
||||
if (chunk_id != 'ID3 '):
|
||||
new_aiff.write(self.CHUNK_HEADER.build(
|
||||
Con.Container(chunk_id=chunk_id,
|
||||
chunk_length=chunk_length)))
|
||||
transfer_data(chunk_file.read, new_aiff.write)
|
||||
else:
|
||||
new_aiff.write(self.CHUNK_HEADER.build(
|
||||
Con.Container(chunk_id='ID3 ',
|
||||
chunk_length=len(id3_chunk))))
|
||||
new_aiff.write(id3_chunk)
|
||||
id3_found = True
|
||||
|
||||
if (not id3_found):
|
||||
new_aiff.write(self.CHUNK_HEADER.build(
|
||||
Con.Container(chunk_id='ID3 ',
|
||||
chunk_length=len(id3_chunk))))
|
||||
new_aiff.write(id3_chunk)
|
||||
|
||||
header = Con.Container(
|
||||
aiff_id='FORM',
|
||||
aiff_size=new_aiff.tell() - 8,
|
||||
aiff_type='AIFF')
|
||||
new_aiff.seek(0, 0)
|
||||
new_aiff.write(self.AIFF_HEADER.build(header))
|
||||
new_aiff.seek(0, 0)
|
||||
f = open(self.filename, 'wb')
|
||||
transfer_data(new_aiff.read, f.write)
|
||||
new_aiff.close()
|
||||
f.close()
|
||||
|
||||
def delete_metadata(self):
|
||||
"""Deletes the track's MetaData.
|
||||
|
||||
This removes or unsets tags as necessary in order to remove all data.
|
||||
Raises IOError if unable to write the file."""
|
||||
|
||||
import tempfile
|
||||
|
||||
new_aiff = tempfile.TemporaryFile()
|
||||
new_aiff.seek(12, 0)
|
||||
|
||||
for (chunk_id, chunk_length, chunk_file) in self.chunk_files():
|
||||
if (chunk_id != 'ID3 '):
|
||||
new_aiff.write(self.CHUNK_HEADER.build(
|
||||
Con.Container(chunk_id=chunk_id,
|
||||
chunk_length=chunk_length)))
|
||||
transfer_data(chunk_file.read, new_aiff.write)
|
||||
|
||||
header = Con.Container(
|
||||
aiff_id='FORM',
|
||||
aiff_size=new_aiff.tell() - 8,
|
||||
aiff_type='AIFF')
|
||||
new_aiff.seek(0, 0)
|
||||
new_aiff.write(self.AIFF_HEADER.build(header))
|
||||
new_aiff.seek(0, 0)
|
||||
f = open(self.filename, 'wb')
|
||||
transfer_data(new_aiff.read, f.write)
|
||||
new_aiff.close()
|
||||
f.close()
|
||||
|
||||
def to_pcm(self):
|
||||
"""Returns a PCMReader object containing the track's PCM data."""
|
||||
|
||||
for (chunk_id, chunk_length, chunk_offset) in self.chunks():
|
||||
if (chunk_id == 'SSND'):
|
||||
f = open(self.filename, 'rb')
|
||||
f.seek(chunk_offset, 0)
|
||||
return AiffReader(f,
|
||||
self.sample_rate(),
|
||||
self.channels(),
|
||||
int(self.channel_mask()),
|
||||
self.bits_per_sample(),
|
||||
chunk_length)
|
||||
else:
|
||||
return PCMReaderError(u"no SSND chunk found",
|
||||
self.sample_rate(),
|
||||
self.channels(),
|
||||
int(self.channel_mask()),
|
||||
self.bits_per_sample)
|
||||
|
||||
@classmethod
|
||||
def from_pcm(cls, filename, pcmreader, compression=None):
|
||||
"""Encodes a new file from PCM data.
|
||||
|
||||
Takes a filename string, PCMReader object
|
||||
and optional compression level string.
|
||||
Encodes a new audio file from pcmreader's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new AiffAudio object."""
|
||||
|
||||
try:
|
||||
f = open(filename, 'wb')
|
||||
except IOError, msg:
|
||||
raise EncodingError(str(msg))
|
||||
|
||||
if (int(pcmreader.channel_mask) in
|
||||
(0x4, # FC
|
||||
0x3, # FL, FR
|
||||
0x7, # FL, FR, FC
|
||||
0x33, # FL, FR, BL, BR
|
||||
0x707)): # FL, SL, FC, FR, SR, BC
|
||||
standard_channel_mask = ChannelMask(pcmreader.channel_mask)
|
||||
aiff_channel_mask = AIFFChannelMask(standard_channel_mask)
|
||||
pcmreader = ReorderedPCMReader(
|
||||
pcmreader,
|
||||
[standard_channel_mask.channels().index(channel)
|
||||
for channel in aiff_channel_mask.channels()])
|
||||
|
||||
try:
|
||||
aiff_header = Con.Container(aiff_id='FORM',
|
||||
aiff_size=4,
|
||||
aiff_type='AIFF')
|
||||
|
||||
comm_chunk = Con.Container(
|
||||
channels=pcmreader.channels,
|
||||
total_sample_frames=0,
|
||||
sample_size=pcmreader.bits_per_sample,
|
||||
sample_rate=float(pcmreader.sample_rate))
|
||||
|
||||
ssnd_header = Con.Container(chunk_id='SSND',
|
||||
chunk_length=0)
|
||||
ssnd_alignment = Con.Container(offset=0,
|
||||
blocksize=0)
|
||||
|
||||
#skip ahead to the start of the SSND chunk
|
||||
f.seek(cls.AIFF_HEADER.sizeof() +
|
||||
cls.CHUNK_HEADER.sizeof() +
|
||||
cls.COMM_CHUNK.sizeof() +
|
||||
cls.CHUNK_HEADER.sizeof(), 0)
|
||||
|
||||
#write the SSND alignment info
|
||||
f.write(cls.SSND_ALIGN.build(ssnd_alignment))
|
||||
|
||||
#write big-endian samples to SSND chunk from pcmreader
|
||||
try:
|
||||
framelist = pcmreader.read(BUFFER_SIZE)
|
||||
except (ValueError, IOError), err:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(err))
|
||||
except Exception, err:
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
total_pcm_frames = 0
|
||||
while (len(framelist) > 0):
|
||||
f.write(framelist.to_bytes(True, True))
|
||||
total_pcm_frames += framelist.frames
|
||||
try:
|
||||
framelist = pcmreader.read(BUFFER_SIZE)
|
||||
except (ValueError, IOError), err:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(err))
|
||||
except Exception, err:
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
total_size = f.tell()
|
||||
|
||||
#return to the start of the file
|
||||
f.seek(0, 0)
|
||||
|
||||
#write AIFF header
|
||||
aiff_header.aiff_size = total_size - 8
|
||||
f.write(cls.AIFF_HEADER.build(aiff_header))
|
||||
|
||||
#write COMM chunk
|
||||
comm_chunk.total_sample_frames = total_pcm_frames
|
||||
comm_chunk = cls.COMM_CHUNK.build(comm_chunk)
|
||||
f.write(cls.CHUNK_HEADER.build(Con.Container(
|
||||
chunk_id='COMM',
|
||||
chunk_length=len(comm_chunk))))
|
||||
f.write(comm_chunk)
|
||||
|
||||
#write SSND chunk header
|
||||
f.write(cls.CHUNK_HEADER.build(Con.Container(
|
||||
chunk_id='SSND',
|
||||
chunk_length=(total_pcm_frames *
|
||||
(pcmreader.bits_per_sample / 8) *
|
||||
pcmreader.channels) +
|
||||
cls.SSND_ALIGN.sizeof())))
|
||||
try:
|
||||
pcmreader.close()
|
||||
except DecodingError, err:
|
||||
raise EncodingError(err.error_message)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
return cls(filename)
|
||||
|
||||
def to_aiff(self, aiff_filename, progress=None):
|
||||
"""Writes the contents of this file to the given .aiff filename string.
|
||||
|
||||
Raises EncodingError if some error occurs during decoding."""
|
||||
|
||||
try:
|
||||
self.verify()
|
||||
except InvalidAiff, err:
|
||||
raise EncodingError(str(err))
|
||||
|
||||
try:
|
||||
output = file(aiff_filename, 'wb')
|
||||
input = file(self.filename, 'rb')
|
||||
except IOError, msg:
|
||||
raise EncodingError(str(msg))
|
||||
try:
|
||||
transfer_data(input.read, output.write)
|
||||
finally:
|
||||
input.close()
|
||||
output.close()
|
||||
|
||||
@classmethod
|
||||
def from_aiff(cls, filename, aiff_filename, compression=None,
|
||||
progress=None):
|
||||
try:
|
||||
cls(aiff_filename).verify()
|
||||
except InvalidAiff, err:
|
||||
raise EncodingError(unicode(err))
|
||||
|
||||
try:
|
||||
input = file(aiff_filename, 'rb')
|
||||
output = file(filename, 'wb')
|
||||
except IOError, err:
|
||||
raise EncodingError(str(err))
|
||||
try:
|
||||
total_bytes = os.path.getsize(aiff_filename)
|
||||
current_bytes = 0
|
||||
s = input.read(4096)
|
||||
while (len(s) > 0):
|
||||
current_bytes += len(s)
|
||||
output.write(s)
|
||||
if (progress is not None):
|
||||
progress(current_bytes, total_bytes)
|
||||
s = input.read(4096)
|
||||
output.flush()
|
||||
try:
|
||||
return AiffAudio(filename)
|
||||
except InvalidFile:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(u"invalid AIFF source file")
|
||||
finally:
|
||||
input.close()
|
||||
output.close()
|
||||
|
||||
def convert(self, target_path, target_class, compression=None,
|
||||
progress=None):
|
||||
"""Encodes a new AudioFile from existing AudioFile.
|
||||
|
||||
Take a filename string, target class and optional compression string.
|
||||
Encodes a new AudioFile in the target class and returns
|
||||
the resulting object.
|
||||
May raise EncodingError if some problem occurs during encoding."""
|
||||
|
||||
if (hasattr(target_class, "from_aiff")):
|
||||
return target_class.from_aiff(target_path,
|
||||
self.filename,
|
||||
compression=compression,
|
||||
progress=progress)
|
||||
else:
|
||||
return target_class.from_pcm(target_path,
|
||||
to_pcm_progress(self, progress),
|
||||
compression)
|
||||
|
||||
def pcm_split(self):
|
||||
"""Returns a pair of data strings before and after PCM data.
|
||||
|
||||
The first contains all data before the PCM content of the data chunk.
|
||||
The second containing all data after the data chunk.
|
||||
"""
|
||||
|
||||
head = cStringIO.StringIO()
|
||||
tail = cStringIO.StringIO()
|
||||
current_block = head
|
||||
|
||||
aiff_file = open(self.filename, 'rb')
|
||||
try:
|
||||
try:
|
||||
#transfer the 12-bite FORMsizeAIFF header
|
||||
header = AiffAudio.AIFF_HEADER.parse(aiff_file.read(12))
|
||||
total_size = header.aiff_size - 4
|
||||
current_block.write(AiffAudio.AIFF_HEADER.build(header))
|
||||
except Con.ConstError:
|
||||
raise InvalidAIFF(_(u"Not an AIFF file"))
|
||||
except Con.core.FieldError:
|
||||
raise InvalidAIFF(_(u"Invalid AIFF file"))
|
||||
|
||||
while (total_size > 0):
|
||||
try:
|
||||
#transfer each chunk header
|
||||
chunk_header = AiffAudio.CHUNK_HEADER.parse(
|
||||
aiff_file.read(8))
|
||||
current_block.write(AiffAudio.CHUNK_HEADER.build(
|
||||
chunk_header))
|
||||
total_size -= 8
|
||||
except Con.core.FieldError:
|
||||
raise InvalidAiff(_(u"Invalid AIFF file"))
|
||||
|
||||
#and transfer the full content of non-ssnd chunks
|
||||
if (chunk_header.chunk_id != "SSND"):
|
||||
current_block.write(
|
||||
aiff_file.read(chunk_header.chunk_length))
|
||||
else:
|
||||
#or, the top 8 align bytes of the ssnd chunk
|
||||
try:
|
||||
align = AiffAudio.SSND_ALIGN.parse(
|
||||
aiff_file.read(8))
|
||||
current_block.write(AiffAudio.SSND_ALIGN.build(
|
||||
align))
|
||||
aiff_file.seek(chunk_header.chunk_length - 8,
|
||||
os.SEEK_CUR)
|
||||
current_block = tail
|
||||
except Con.core.FieldError:
|
||||
raise InvalidAiff(_(u"Invalid AIFF file"))
|
||||
|
||||
total_size -= chunk_header.chunk_length
|
||||
|
||||
return (head.getvalue(), tail.getvalue())
|
||||
finally:
|
||||
aiff_file.close()
|
||||
|
||||
@classmethod
|
||||
def aiff_from_chunks(cls, filename, chunk_iter):
|
||||
"""Builds a new AIFF file from a chunk data iterator.
|
||||
|
||||
filename is the path to the wave file to build.
|
||||
chunk_iter should yield (chunk_id, chunk_data) tuples.
|
||||
"""
|
||||
|
||||
f = file(filename, 'wb')
|
||||
|
||||
header = Con.Container()
|
||||
header.aiff_id = 'FORM'
|
||||
header.aiff_type = 'AIFF'
|
||||
header.aiff_size = 4
|
||||
|
||||
#write an unfinished header with an invalid size (for now)
|
||||
f.write(cls.AIFF_HEADER.build(header))
|
||||
|
||||
for (chunk_id, chunk_data) in chunk_iter:
|
||||
|
||||
#not sure if I need to fix chunk sizes
|
||||
#to fall on 16-bit boundaries
|
||||
|
||||
chunk_header = cls.CHUNK_HEADER.build(
|
||||
Con.Container(chunk_id=chunk_id,
|
||||
chunk_length=len(chunk_data)))
|
||||
f.write(chunk_header)
|
||||
header.aiff_size += len(chunk_header)
|
||||
|
||||
f.write(chunk_data)
|
||||
header.aiff_size += len(chunk_data)
|
||||
|
||||
#now that the chunks are done, go back and re-write the header
|
||||
f.seek(0, 0)
|
||||
f.write(cls.AIFF_HEADER.build(header))
|
||||
f.close()
|
||||
|
||||
def has_foreign_aiff_chunks(self):
|
||||
return (set(['COMM', 'SSND']) !=
|
||||
set([chunk[0] for chunk in self.chunks()]))
|
||||
|
||||
|
||||
class AIFFChannelMask(ChannelMask):
|
||||
"""The AIFF-specific channel mapping."""
|
||||
|
||||
def __repr__(self):
|
||||
return "AIFFChannelMask(%s)" % \
|
||||
",".join(["%s=%s" % (field, getattr(self, field))
|
||||
for field in self.SPEAKER_TO_MASK.keys()
|
||||
if (getattr(self, field))])
|
||||
|
||||
def channels(self):
|
||||
"""Returns a list of speaker strings this mask contains.
|
||||
|
||||
Returned in the order in which they should appear
|
||||
in the PCM stream.
|
||||
"""
|
||||
|
||||
count = len(self)
|
||||
if (count == 1):
|
||||
return ["front_center"]
|
||||
elif (count == 2):
|
||||
return ["front_left", "front_right"]
|
||||
elif (count == 3):
|
||||
return ["front_left", "front_right", "front_center"]
|
||||
elif (count == 4):
|
||||
return ["front_left", "front_right",
|
||||
"back_left", "back_right"]
|
||||
elif (count == 6):
|
||||
return ["front_left", "side_left", "front_center",
|
||||
"front_right", "side_right", "back_center"]
|
||||
else:
|
||||
return []
|
808
Melodia/resources/audiotools/__ape__.py
Normal file
808
Melodia/resources/audiotools/__ape__.py
Normal file
@ -0,0 +1,808 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
from audiotools import (AudioFile, WaveAudio, InvalidFile, PCMReader,
|
||||
Con, transfer_data, subprocess, BIN, MetaData,
|
||||
os, re, TempWaveReader, Image, cStringIO)
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
|
||||
#takes a pair of integers for the current and total values
|
||||
#returns a unicode string of their combined pair
|
||||
#for example, __number_pair__(2,3) returns u"2/3"
|
||||
#whereas __number_pair__(4,0) returns u"4"
|
||||
def __number_pair__(current, total):
|
||||
if (total == 0):
|
||||
return u"%d" % (current)
|
||||
else:
|
||||
return u"%d/%d" % (current, total)
|
||||
|
||||
|
||||
#######################
|
||||
#MONKEY'S AUDIO
|
||||
#######################
|
||||
|
||||
|
||||
class ApeTagItem:
|
||||
"""A container for APEv2 tag items."""
|
||||
|
||||
APEv2_FLAGS = Con.BitStruct("APEv2_FLAGS",
|
||||
Con.Bits("undefined1", 5),
|
||||
Con.Flag("read_only"),
|
||||
Con.Bits("encoding", 2),
|
||||
Con.Bits("undefined2", 16),
|
||||
Con.Flag("contains_header"),
|
||||
Con.Flag("contains_no_footer"),
|
||||
Con.Flag("is_header"),
|
||||
Con.Bits("undefined3", 5))
|
||||
|
||||
APEv2_TAG = Con.Struct("APEv2_TAG",
|
||||
Con.ULInt32("length"),
|
||||
Con.Embed(APEv2_FLAGS),
|
||||
Con.CString("key"),
|
||||
Con.MetaField("value",
|
||||
lambda ctx: ctx["length"]))
|
||||
|
||||
def __init__(self, item_type, read_only, key, data):
|
||||
"""Fields are as follows:
|
||||
|
||||
item_type is 0 = UTF-8, 1 = binary, 2 = external, 3 = reserved.
|
||||
read_only is True if the item is read only.
|
||||
key is an ASCII string.
|
||||
data is a binary string of the data itself.
|
||||
"""
|
||||
|
||||
self.type = item_type
|
||||
self.read_only = read_only
|
||||
self.key = key
|
||||
self.data = data
|
||||
|
||||
def __repr__(self):
|
||||
return "ApeTagItem(%s,%s,%s,%s)" % \
|
||||
(repr(self.type),
|
||||
repr(self.read_only),
|
||||
repr(self.key),
|
||||
repr(self.data))
|
||||
|
||||
def __str__(self):
|
||||
return self.data
|
||||
|
||||
def __unicode__(self):
|
||||
return self.data.rstrip(chr(0)).decode('utf-8', 'replace')
|
||||
|
||||
def build(self):
|
||||
"""Returns this tag as a binary string of data."""
|
||||
|
||||
return self.APEv2_TAG.build(
|
||||
Con.Container(key=self.key,
|
||||
value=self.data,
|
||||
length=len(self.data),
|
||||
encoding=self.type,
|
||||
undefined1=0,
|
||||
undefined2=0,
|
||||
undefined3=0,
|
||||
read_only=self.read_only,
|
||||
contains_header=False,
|
||||
contains_no_footer=False,
|
||||
is_header=False))
|
||||
|
||||
@classmethod
|
||||
def binary(cls, key, data):
|
||||
"""Returns an ApeTagItem of binary data.
|
||||
|
||||
key is an ASCII string, data is a binary string."""
|
||||
|
||||
return cls(1, False, key, data)
|
||||
|
||||
@classmethod
|
||||
def external(cls, key, data):
|
||||
"""Returns an ApeTagItem of external data.
|
||||
|
||||
key is an ASCII string, data is a binary string."""
|
||||
|
||||
return cls(2, False, key, data)
|
||||
|
||||
@classmethod
|
||||
def string(cls, key, data):
|
||||
"""Returns an ApeTagItem of text data.
|
||||
|
||||
key is an ASCII string, data is a UTF-8 binary string."""
|
||||
|
||||
return cls(0, False, key, data.encode('utf-8', 'replace'))
|
||||
|
||||
|
||||
class ApeTag(MetaData):
|
||||
"""A complete APEv2 tag."""
|
||||
|
||||
ITEM = ApeTagItem
|
||||
|
||||
APEv2_FLAGS = Con.BitStruct("APEv2_FLAGS",
|
||||
Con.Bits("undefined1", 5),
|
||||
Con.Flag("read_only"),
|
||||
Con.Bits("encoding", 2),
|
||||
Con.Bits("undefined2", 16),
|
||||
Con.Flag("contains_header"),
|
||||
Con.Flag("contains_no_footer"),
|
||||
Con.Flag("is_header"),
|
||||
Con.Bits("undefined3", 5))
|
||||
|
||||
APEv2_FOOTER = Con.Struct("APEv2",
|
||||
Con.String("preamble", 8),
|
||||
Con.ULInt32("version_number"),
|
||||
Con.ULInt32("tag_size"),
|
||||
Con.ULInt32("item_count"),
|
||||
Con.Embed(APEv2_FLAGS),
|
||||
Con.ULInt64("reserved"))
|
||||
|
||||
APEv2_HEADER = APEv2_FOOTER
|
||||
|
||||
APEv2_TAG = ApeTagItem.APEv2_TAG
|
||||
|
||||
ATTRIBUTE_MAP = {'track_name': 'Title',
|
||||
'track_number': 'Track',
|
||||
'track_total': 'Track',
|
||||
'album_number': 'Media',
|
||||
'album_total': 'Media',
|
||||
'album_name': 'Album',
|
||||
'artist_name': 'Artist',
|
||||
#"Performer" is not a defined APEv2 key
|
||||
#it would be nice to have, yet would not be standard
|
||||
'performer_name': 'Performer',
|
||||
'composer_name': 'Composer',
|
||||
'conductor_name': 'Conductor',
|
||||
'ISRC': 'ISRC',
|
||||
'catalog': 'Catalog',
|
||||
'copyright': 'Copyright',
|
||||
'publisher': 'Publisher',
|
||||
'year': 'Year',
|
||||
'date': 'Record Date',
|
||||
'comment': 'Comment'}
|
||||
|
||||
INTEGER_ITEMS = ('Track', 'Media')
|
||||
|
||||
def __init__(self, tags, tag_length=None):
|
||||
"""Constructs an ApeTag from a list of ApeTagItem objects.
|
||||
|
||||
tag_length is an optional total length integer."""
|
||||
|
||||
for tag in tags:
|
||||
if (not isinstance(tag, ApeTagItem)):
|
||||
raise ValueError("%s is not ApeTag" % (repr(tag)))
|
||||
self.__dict__["tags"] = tags
|
||||
self.__dict__["tag_length"] = tag_length
|
||||
|
||||
def __eq__(self, metadata):
|
||||
if (isinstance(metadata, ApeTag)):
|
||||
if (set(self.keys()) != set(metadata.keys())):
|
||||
return False
|
||||
|
||||
for tag in self.tags:
|
||||
try:
|
||||
if (tag.data != metadata[tag.key].data):
|
||||
return False
|
||||
except KeyError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
elif (isinstance(metadata, MetaData)):
|
||||
return MetaData.__eq__(self, metadata)
|
||||
else:
|
||||
return False
|
||||
|
||||
def keys(self):
|
||||
return [tag.key for tag in self.tags]
|
||||
|
||||
def __getitem__(self, key):
|
||||
for tag in self.tags:
|
||||
if (tag.key == key):
|
||||
return tag
|
||||
else:
|
||||
raise KeyError(key)
|
||||
|
||||
def get(self, key, default):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
for i in xrange(len(self.tags)):
|
||||
if (self.tags[i].key == key):
|
||||
self.tags[i] = value
|
||||
return
|
||||
else:
|
||||
self.tags.append(value)
|
||||
|
||||
def index(self, key):
|
||||
for (i, tag) in enumerate(self.tags):
|
||||
if (tag.key == key):
|
||||
return i
|
||||
else:
|
||||
raise ValueError(key)
|
||||
|
||||
def __delitem__(self, key):
|
||||
for i in xrange(len(self.tags)):
|
||||
if (self.tags[i].key == key):
|
||||
del(self.tags[i])
|
||||
return
|
||||
|
||||
#if an attribute is updated (e.g. self.track_name)
|
||||
#make sure to update the corresponding dict pair
|
||||
def __setattr__(self, key, value):
|
||||
if (key in self.ATTRIBUTE_MAP):
|
||||
if (key == 'track_number'):
|
||||
self['Track'] = self.ITEM.string(
|
||||
'Track', __number_pair__(value, self.track_total))
|
||||
elif (key == 'track_total'):
|
||||
self['Track'] = self.ITEM.string(
|
||||
'Track', __number_pair__(self.track_number, value))
|
||||
elif (key == 'album_number'):
|
||||
self['Media'] = self.ITEM.string(
|
||||
'Media', __number_pair__(value, self.album_total))
|
||||
elif (key == 'album_total'):
|
||||
self['Media'] = self.ITEM.string(
|
||||
'Media', __number_pair__(self.album_number, value))
|
||||
else:
|
||||
self[self.ATTRIBUTE_MAP[key]] = self.ITEM.string(
|
||||
self.ATTRIBUTE_MAP[key], value)
|
||||
else:
|
||||
self.__dict__[key] = value
|
||||
|
||||
def __getattr__(self, key):
|
||||
if (key == 'track_number'):
|
||||
try:
|
||||
return int(re.findall('\d+',
|
||||
unicode(self.get("Track", u"0")))[0])
|
||||
except IndexError:
|
||||
return 0
|
||||
elif (key == 'track_total'):
|
||||
try:
|
||||
return int(re.findall('\d+/(\d+)',
|
||||
unicode(self.get("Track", u"0")))[0])
|
||||
except IndexError:
|
||||
return 0
|
||||
elif (key == 'album_number'):
|
||||
try:
|
||||
return int(re.findall('\d+',
|
||||
unicode(self.get("Media", u"0")))[0])
|
||||
except IndexError:
|
||||
return 0
|
||||
elif (key == 'album_total'):
|
||||
try:
|
||||
return int(re.findall('\d+/(\d+)',
|
||||
unicode(self.get("Media", u"0")))[0])
|
||||
except IndexError:
|
||||
return 0
|
||||
elif (key in self.ATTRIBUTE_MAP):
|
||||
return unicode(self.get(self.ATTRIBUTE_MAP[key], u''))
|
||||
elif (key in MetaData.__FIELDS__):
|
||||
return u''
|
||||
else:
|
||||
try:
|
||||
return self.__dict__[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def __delattr__(self, key):
|
||||
if (key == 'track_number'):
|
||||
setattr(self, 'track_number', 0)
|
||||
if ((self.track_number == 0) and (self.track_total == 0)):
|
||||
del(self['Track'])
|
||||
elif (key == 'track_total'):
|
||||
setattr(self, 'track_total', 0)
|
||||
if ((self.track_number == 0) and (self.track_total == 0)):
|
||||
del(self['Track'])
|
||||
elif (key == 'album_number'):
|
||||
setattr(self, 'album_number', 0)
|
||||
if ((self.album_number == 0) and (self.album_total == 0)):
|
||||
del(self['Media'])
|
||||
elif (key == 'album_total'):
|
||||
setattr(self, 'album_total', 0)
|
||||
if ((self.album_number == 0) and (self.album_total == 0)):
|
||||
del(self['Media'])
|
||||
elif (key in self.ATTRIBUTE_MAP):
|
||||
try:
|
||||
del(self[self.ATTRIBUTE_MAP[key]])
|
||||
except ValueError:
|
||||
pass
|
||||
elif (key in MetaData.__FIELDS__):
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
del(self.__dict__[key])
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
@classmethod
|
||||
def converted(cls, metadata):
|
||||
"""Converts a MetaData object to an ApeTag object."""
|
||||
|
||||
if ((metadata is None) or (isinstance(metadata, ApeTag))):
|
||||
return metadata
|
||||
else:
|
||||
tags = cls([])
|
||||
for (field, key) in cls.ATTRIBUTE_MAP.items():
|
||||
if (field not in cls.__INTEGER_FIELDS__):
|
||||
field = unicode(getattr(metadata, field))
|
||||
if (len(field) > 0):
|
||||
tags[key] = cls.ITEM.string(key, field)
|
||||
|
||||
if ((metadata.track_number != 0) or
|
||||
(metadata.track_total != 0)):
|
||||
tags["Track"] = cls.ITEM.string(
|
||||
"Track", __number_pair__(metadata.track_number,
|
||||
metadata.track_total))
|
||||
|
||||
if ((metadata.album_number != 0) or
|
||||
(metadata.album_total != 0)):
|
||||
tags["Media"] = cls.ITEM.string(
|
||||
"Media", __number_pair__(metadata.album_number,
|
||||
metadata.album_total))
|
||||
|
||||
for image in metadata.images():
|
||||
tags.add_image(image)
|
||||
|
||||
return tags
|
||||
|
||||
def merge(self, metadata):
|
||||
"""Updates any currently empty entries from metadata's values."""
|
||||
|
||||
metadata = self.__class__.converted(metadata)
|
||||
if (metadata is None):
|
||||
return
|
||||
|
||||
for tag in metadata.tags:
|
||||
if ((tag.key not in ('Track', 'Media')) and
|
||||
(len(str(tag)) > 0) and
|
||||
(len(str(self.get(tag.key, ""))) == 0)):
|
||||
self[tag.key] = tag
|
||||
for attr in ("track_number", "track_total",
|
||||
"album_number", "album_total"):
|
||||
if ((getattr(self, attr) == 0) and
|
||||
(getattr(metadata, attr) != 0)):
|
||||
setattr(self, attr, getattr(metadata, attr))
|
||||
|
||||
def __comment_name__(self):
|
||||
return u'APEv2'
|
||||
|
||||
#takes two (key,value) apetag pairs
|
||||
#returns cmp on the weighted set of them
|
||||
#(title first, then artist, album, tracknumber)
|
||||
@classmethod
|
||||
def __by_pair__(cls, pair1, pair2):
|
||||
KEY_MAP = {"Title": 1,
|
||||
"Album": 2,
|
||||
"Track": 3,
|
||||
"Media": 4,
|
||||
"Artist": 5,
|
||||
"Performer": 6,
|
||||
"Composer": 7,
|
||||
"Conductor": 8,
|
||||
"Catalog": 9,
|
||||
"Publisher": 10,
|
||||
"ISRC": 11,
|
||||
#"Media": 12,
|
||||
"Year": 13,
|
||||
"Record Date": 14,
|
||||
"Copyright": 15}
|
||||
|
||||
return cmp((KEY_MAP.get(pair1[0], 16), pair1[0], pair1[1]),
|
||||
(KEY_MAP.get(pair2[0], 16), pair2[0], pair2[1]))
|
||||
|
||||
def __comment_pairs__(self):
|
||||
items = []
|
||||
|
||||
for tag in self.tags:
|
||||
if (tag.key in ('Cover Art (front)', 'Cover Art (back)')):
|
||||
pass
|
||||
elif (tag.type == 0):
|
||||
items.append((tag.key, unicode(tag)))
|
||||
else:
|
||||
if (len(str(tag)) <= 20):
|
||||
items.append((tag.key, str(tag).encode('hex')))
|
||||
else:
|
||||
items.append((tag.key,
|
||||
str(tag).encode('hex')[0:39].upper() +
|
||||
u"\u2026"))
|
||||
|
||||
return sorted(items, ApeTag.__by_pair__)
|
||||
|
||||
@classmethod
|
||||
def supports_images(cls):
|
||||
"""Returns True."""
|
||||
|
||||
return True
|
||||
|
||||
def __parse_image__(self, key, type):
|
||||
data = cStringIO.StringIO(str(self[key]))
|
||||
description = Con.CString(None).parse_stream(data).decode('utf-8',
|
||||
'replace')
|
||||
data = data.read()
|
||||
return Image.new(data, description, type)
|
||||
|
||||
def add_image(self, image):
|
||||
"""Embeds an Image object in this metadata."""
|
||||
|
||||
if (image.type == 0):
|
||||
self['Cover Art (front)'] = self.ITEM.external(
|
||||
'Cover Art (front)',
|
||||
Con.CString(None).build(image.description.encode(
|
||||
'utf-8', 'replace')) + image.data)
|
||||
elif (image.type == 1):
|
||||
self['Cover Art (back)'] = self.ITEM.binary(
|
||||
'Cover Art (back)',
|
||||
Con.CString(None).build(image.description.encode(
|
||||
'utf-8', 'replace')) + image.data)
|
||||
|
||||
def delete_image(self, image):
|
||||
"""Deletes an Image object from this metadata."""
|
||||
|
||||
if ((image.type == 0) and 'Cover Art (front)' in self.keys()):
|
||||
del(self['Cover Art (front)'])
|
||||
elif ((image.type == 1) and 'Cover Art (back)' in self.keys()):
|
||||
del(self['Cover Art (back)'])
|
||||
|
||||
def images(self):
|
||||
"""Returns a list of embedded Image objects."""
|
||||
|
||||
#APEv2 supports only one value per key
|
||||
#so a single front and back cover are all that is possible
|
||||
img = []
|
||||
if ('Cover Art (front)' in self.keys()):
|
||||
img.append(self.__parse_image__('Cover Art (front)', 0))
|
||||
if ('Cover Art (back)' in self.keys()):
|
||||
img.append(self.__parse_image__('Cover Art (back)', 1))
|
||||
return img
|
||||
|
||||
@classmethod
|
||||
def read(cls, apefile):
|
||||
"""Returns an ApeTag object from an APEv2 tagged file object.
|
||||
|
||||
May return None if the file object has no tag."""
|
||||
|
||||
apefile.seek(-32, 2)
|
||||
footer = cls.APEv2_FOOTER.parse(apefile.read(32))
|
||||
|
||||
if (footer.preamble != 'APETAGEX'):
|
||||
return None
|
||||
|
||||
apefile.seek(-(footer.tag_size), 2)
|
||||
|
||||
return cls([ApeTagItem(item_type=tag.encoding,
|
||||
read_only=tag.read_only,
|
||||
key=tag.key,
|
||||
data=tag.value)
|
||||
for tag in Con.StrictRepeater(
|
||||
footer.item_count,
|
||||
cls.APEv2_TAG).parse(apefile.read())],
|
||||
tag_length=footer.tag_size + ApeTag.APEv2_FOOTER.sizeof()
|
||||
if footer.contains_header else
|
||||
footer.tag_size)
|
||||
|
||||
def build(self):
|
||||
"""Returns an APEv2 tag as a binary string."""
|
||||
|
||||
header = Con.Container(preamble='APETAGEX',
|
||||
version_number=2000,
|
||||
tag_size=0,
|
||||
item_count=len(self.tags),
|
||||
undefined1=0,
|
||||
undefined2=0,
|
||||
undefined3=0,
|
||||
read_only=False,
|
||||
encoding=0,
|
||||
contains_header=True,
|
||||
contains_no_footer=False,
|
||||
is_header=True,
|
||||
reserved=0l)
|
||||
|
||||
footer = Con.Container(preamble=header.preamble,
|
||||
version_number=header.version_number,
|
||||
tag_size=0,
|
||||
item_count=len(self.tags),
|
||||
undefined1=0,
|
||||
undefined2=0,
|
||||
undefined3=0,
|
||||
read_only=False,
|
||||
encoding=0,
|
||||
contains_header=True,
|
||||
contains_no_footer=False,
|
||||
is_header=False,
|
||||
reserved=0l)
|
||||
|
||||
tags = "".join([tag.build() for tag in self.tags])
|
||||
|
||||
footer.tag_size = header.tag_size = \
|
||||
len(tags) + len(ApeTag.APEv2_FOOTER.build(footer))
|
||||
|
||||
return ApeTag.APEv2_FOOTER.build(header) + \
|
||||
tags + \
|
||||
ApeTag.APEv2_FOOTER.build(footer)
|
||||
|
||||
|
||||
class ApeTaggedAudio:
|
||||
"""A class for handling audio formats with APEv2 tags.
|
||||
|
||||
This class presumes there will be a filename attribute which
|
||||
can be opened and checked for tags, or written if necessary."""
|
||||
|
||||
APE_TAG_CLASS = ApeTag
|
||||
|
||||
def get_metadata(self):
|
||||
"""Returns an ApeTag object, or None.
|
||||
|
||||
Raises IOError if unable to read the file."""
|
||||
|
||||
f = file(self.filename, 'rb')
|
||||
try:
|
||||
return self.APE_TAG_CLASS.read(f)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def set_metadata(self, metadata):
|
||||
"""Takes a MetaData object and sets this track's metadata.
|
||||
|
||||
Raises IOError if unable to write the file."""
|
||||
|
||||
apetag = self.APE_TAG_CLASS.converted(metadata)
|
||||
|
||||
if (apetag is None):
|
||||
return
|
||||
|
||||
current_metadata = self.get_metadata()
|
||||
if (current_metadata is not None): # there's existing tags to delete
|
||||
f = file(self.filename, "rb")
|
||||
untagged_data = f.read()[0:-current_metadata.tag_length]
|
||||
f.close()
|
||||
f = file(self.filename, "wb")
|
||||
f.write(untagged_data)
|
||||
f.write(apetag.build())
|
||||
f.close()
|
||||
else: # no existing tags
|
||||
f = file(self.filename, "ab")
|
||||
f.write(apetag.build())
|
||||
f.close()
|
||||
|
||||
def delete_metadata(self):
|
||||
"""Deletes the track's MetaData.
|
||||
|
||||
Raises IOError if unable to write the file."""
|
||||
|
||||
current_metadata = self.get_metadata()
|
||||
if (current_metadata is not None): # there's existing tags to delete
|
||||
f = file(self.filename, "rb")
|
||||
untagged_data = f.read()[0:-current_metadata.tag_length]
|
||||
f.close()
|
||||
f = file(self.filename, "wb")
|
||||
f.write(untagged_data)
|
||||
f.close()
|
||||
|
||||
|
||||
class ApeAudio(ApeTaggedAudio, AudioFile):
|
||||
"""A Monkey's Audio file."""
|
||||
|
||||
SUFFIX = "ape"
|
||||
NAME = SUFFIX
|
||||
DEFAULT_COMPRESSION = "5000"
|
||||
COMPRESSION_MODES = tuple([str(x * 1000) for x in range(1, 6)])
|
||||
BINARIES = ("mac",)
|
||||
|
||||
FILE_HEAD = Con.Struct("ape_head",
|
||||
Con.String('id', 4),
|
||||
Con.ULInt16('version'))
|
||||
|
||||
#version >= 3.98
|
||||
APE_DESCRIPTOR = Con.Struct("ape_descriptor",
|
||||
Con.ULInt16('padding'),
|
||||
Con.ULInt32('descriptor_bytes'),
|
||||
Con.ULInt32('header_bytes'),
|
||||
Con.ULInt32('seektable_bytes'),
|
||||
Con.ULInt32('header_data_bytes'),
|
||||
Con.ULInt32('frame_data_bytes'),
|
||||
Con.ULInt32('frame_data_bytes_high'),
|
||||
Con.ULInt32('terminating_data_bytes'),
|
||||
Con.String('md5', 16))
|
||||
|
||||
APE_HEADER = Con.Struct("ape_header",
|
||||
Con.ULInt16('compression_level'),
|
||||
Con.ULInt16('format_flags'),
|
||||
Con.ULInt32('blocks_per_frame'),
|
||||
Con.ULInt32('final_frame_blocks'),
|
||||
Con.ULInt32('total_frames'),
|
||||
Con.ULInt16('bits_per_sample'),
|
||||
Con.ULInt16('number_of_channels'),
|
||||
Con.ULInt32('sample_rate'))
|
||||
|
||||
#version <= 3.97
|
||||
APE_HEADER_OLD = Con.Struct("ape_header_old",
|
||||
Con.ULInt16('compression_level'),
|
||||
Con.ULInt16('format_flags'),
|
||||
Con.ULInt16('number_of_channels'),
|
||||
Con.ULInt32('sample_rate'),
|
||||
Con.ULInt32('header_bytes'),
|
||||
Con.ULInt32('terminating_bytes'),
|
||||
Con.ULInt32('total_frames'),
|
||||
Con.ULInt32('final_frame_blocks'))
|
||||
|
||||
def __init__(self, filename):
|
||||
"""filename is a plain string."""
|
||||
|
||||
AudioFile.__init__(self, filename)
|
||||
|
||||
(self.__samplespersec__,
|
||||
self.__channels__,
|
||||
self.__bitspersample__,
|
||||
self.__totalsamples__) = ApeAudio.__ape_info__(filename)
|
||||
|
||||
@classmethod
|
||||
def is_type(cls, file):
|
||||
"""Returns True if the given file object describes this format.
|
||||
|
||||
Takes a seekable file pointer rewound to the start of the file."""
|
||||
|
||||
return file.read(4) == "MAC "
|
||||
|
||||
def lossless(self):
|
||||
"""Returns True."""
|
||||
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def supports_foreign_riff_chunks(cls):
|
||||
"""Returns True."""
|
||||
|
||||
return True
|
||||
|
||||
def has_foreign_riff_chunks(self):
|
||||
"""Returns True."""
|
||||
|
||||
#FIXME - this isn't strictly true
|
||||
#I'll need a way to detect foreign chunks in APE's stream
|
||||
#without decoding it first,
|
||||
#but since I'm not supporting APE anyway, I'll take the lazy way out
|
||||
return True
|
||||
|
||||
def bits_per_sample(self):
|
||||
"""Returns an integer number of bits-per-sample this track contains."""
|
||||
|
||||
return self.__bitspersample__
|
||||
|
||||
def channels(self):
|
||||
"""Returns an integer number of channels this track contains."""
|
||||
|
||||
return self.__channels__
|
||||
|
||||
def total_frames(self):
|
||||
"""Returns the total PCM frames of the track as an integer."""
|
||||
|
||||
return self.__totalsamples__
|
||||
|
||||
def sample_rate(self):
|
||||
"""Returns the rate of the track's audio as an integer number of Hz."""
|
||||
|
||||
return self.__samplespersec__
|
||||
|
||||
@classmethod
|
||||
def __ape_info__(cls, filename):
|
||||
f = file(filename, 'rb')
|
||||
try:
|
||||
file_head = cls.FILE_HEAD.parse_stream(f)
|
||||
|
||||
if (file_head.id != 'MAC '):
|
||||
raise InvalidFile(_(u"Invalid Monkey's Audio header"))
|
||||
|
||||
if (file_head.version >= 3980): # the latest APE file type
|
||||
descriptor = cls.APE_DESCRIPTOR.parse_stream(f)
|
||||
header = cls.APE_HEADER.parse_stream(f)
|
||||
|
||||
return (header.sample_rate,
|
||||
header.number_of_channels,
|
||||
header.bits_per_sample,
|
||||
((header.total_frames - 1) * \
|
||||
header.blocks_per_frame) + \
|
||||
header.final_frame_blocks)
|
||||
else: # old-style APE file (obsolete)
|
||||
header = cls.APE_HEADER_OLD.parse_stream(f)
|
||||
|
||||
if (file_head.version >= 3950):
|
||||
blocks_per_frame = 0x48000
|
||||
elif ((file_head.version >= 3900) or
|
||||
((file_head.version >= 3800) and
|
||||
(header.compression_level == 4000))):
|
||||
blocks_per_frame = 0x12000
|
||||
else:
|
||||
blocks_per_frame = 0x2400
|
||||
|
||||
if (header.format_flags & 0x01):
|
||||
bits_per_sample = 8
|
||||
elif (header.format_flags & 0x08):
|
||||
bits_per_sample = 24
|
||||
else:
|
||||
bits_per_sample = 16
|
||||
|
||||
return (header.sample_rate,
|
||||
header.number_of_channels,
|
||||
bits_per_sample,
|
||||
((header.total_frames - 1) * \
|
||||
blocks_per_frame) + \
|
||||
header.final_frame_blocks)
|
||||
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def to_wave(self, wave_filename):
|
||||
"""Writes the contents of this file to the given .wav filename string.
|
||||
|
||||
Raises EncodingError if some error occurs during decoding."""
|
||||
|
||||
if (self.filename.endswith(".ape")):
|
||||
devnull = file(os.devnull, "wb")
|
||||
sub = subprocess.Popen([BIN['mac'],
|
||||
self.filename,
|
||||
wave_filename,
|
||||
'-d'],
|
||||
stdout=devnull,
|
||||
stderr=devnull)
|
||||
sub.wait()
|
||||
devnull.close()
|
||||
else:
|
||||
devnull = file(os.devnull, 'ab')
|
||||
import tempfile
|
||||
ape = tempfile.NamedTemporaryFile(suffix='.ape')
|
||||
f = file(self.filename, 'rb')
|
||||
transfer_data(f.read, ape.write)
|
||||
f.close()
|
||||
ape.flush()
|
||||
sub = subprocess.Popen([BIN['mac'],
|
||||
ape.name,
|
||||
wave_filename,
|
||||
'-d'],
|
||||
stdout=devnull,
|
||||
stderr=devnull)
|
||||
sub.wait()
|
||||
ape.close()
|
||||
devnull.close()
|
||||
|
||||
@classmethod
|
||||
def from_wave(cls, filename, wave_filename, compression=None):
|
||||
"""Encodes a new AudioFile from an existing .wav file.
|
||||
|
||||
Takes a filename string, wave_filename string
|
||||
of an existing WaveAudio file
|
||||
and an optional compression level string.
|
||||
Encodes a new audio file from the wave's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new ApeAudio object."""
|
||||
|
||||
if (str(compression) not in cls.COMPRESSION_MODES):
|
||||
compression = cls.DEFAULT_COMPRESSION
|
||||
|
||||
devnull = file(os.devnull, "wb")
|
||||
sub = subprocess.Popen([BIN['mac'],
|
||||
wave_filename,
|
||||
filename,
|
||||
"-c%s" % (compression)],
|
||||
stdout=devnull,
|
||||
stderr=devnull)
|
||||
sub.wait()
|
||||
devnull.close()
|
||||
return ApeAudio(filename)
|
256
Melodia/resources/audiotools/__au__.py
Normal file
256
Melodia/resources/audiotools/__au__.py
Normal file
@ -0,0 +1,256 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
from audiotools import (AudioFile, InvalidFile, PCMReader, Con,
|
||||
transfer_data, InvalidFormat,
|
||||
__capped_stream_reader__, BUFFER_SIZE,
|
||||
FILENAME_FORMAT, EncodingError, DecodingError,
|
||||
ChannelMask)
|
||||
import audiotools.pcm
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
|
||||
class InvalidAU(InvalidFile):
|
||||
pass
|
||||
|
||||
|
||||
#######################
|
||||
#Sun AU
|
||||
#######################
|
||||
|
||||
|
||||
class AuReader(PCMReader):
|
||||
"""A subclass of PCMReader for reading Sun AU file contents."""
|
||||
|
||||
def __init__(self, au_file, data_size,
|
||||
sample_rate, channels, channel_mask, bits_per_sample):
|
||||
"""au_file is a file, data_size is an integer byte count.
|
||||
|
||||
sample_rate, channels, channel_mask and bits_per_sample are ints.
|
||||
"""
|
||||
|
||||
PCMReader.__init__(self,
|
||||
file=au_file,
|
||||
sample_rate=sample_rate,
|
||||
channels=channels,
|
||||
channel_mask=channel_mask,
|
||||
bits_per_sample=bits_per_sample)
|
||||
self.data_size = data_size
|
||||
|
||||
def read(self, bytes):
|
||||
"""Try to read a pcm.FrameList of size "bytes"."""
|
||||
|
||||
#align bytes downward if an odd number is read in
|
||||
bytes -= (bytes % (self.channels * self.bits_per_sample / 8))
|
||||
bytes = max(bytes, self.channels * self.bits_per_sample / 8)
|
||||
pcm_data = self.file.read(bytes)
|
||||
if ((len(pcm_data) == 0) and (self.data_size > 0)):
|
||||
raise IOError("data ends prematurely")
|
||||
else:
|
||||
self.data_size -= len(pcm_data)
|
||||
|
||||
try:
|
||||
return audiotools.pcm.FrameList(pcm_data,
|
||||
self.channels,
|
||||
self.bits_per_sample,
|
||||
True,
|
||||
True)
|
||||
except ValueError:
|
||||
raise IOError("data ends prematurely")
|
||||
|
||||
|
||||
class AuAudio(AudioFile):
|
||||
"""A Sun AU audio file."""
|
||||
|
||||
SUFFIX = "au"
|
||||
NAME = SUFFIX
|
||||
|
||||
AU_HEADER = Con.Struct('header',
|
||||
Con.Const(Con.String('magic_number', 4), '.snd'),
|
||||
Con.UBInt32('data_offset'),
|
||||
Con.UBInt32('data_size'),
|
||||
Con.UBInt32('encoding_format'),
|
||||
Con.UBInt32('sample_rate'),
|
||||
Con.UBInt32('channels'))
|
||||
|
||||
def __init__(self, filename):
|
||||
AudioFile.__init__(self, filename)
|
||||
|
||||
try:
|
||||
f = file(filename, 'rb')
|
||||
except IOError, msg:
|
||||
raise InvalidAU(str(msg))
|
||||
try:
|
||||
header = AuAudio.AU_HEADER.parse_stream(f)
|
||||
|
||||
if (header.encoding_format not in (2, 3, 4)):
|
||||
raise InvalidFile(_(u"Unsupported Sun AU encoding format"))
|
||||
|
||||
self.__bits_per_sample__ = {2: 8, 3: 16, 4: 24}[
|
||||
header.encoding_format]
|
||||
self.__channels__ = header.channels
|
||||
self.__sample_rate__ = header.sample_rate
|
||||
self.__total_frames__ = header.data_size / \
|
||||
(self.__bits_per_sample__ / 8) / \
|
||||
self.__channels__
|
||||
self.__data_offset__ = header.data_offset
|
||||
self.__data_size__ = header.data_size
|
||||
except Con.ConstError:
|
||||
raise InvalidFile(_(u"Invalid Sun AU header"))
|
||||
except Con.FieldError:
|
||||
raise InvalidAU(_(u"Invalid Sun AU header"))
|
||||
|
||||
@classmethod
|
||||
def is_type(cls, file):
|
||||
"""Returns True if the given file object describes this format.
|
||||
|
||||
Takes a seekable file pointer rewound to the start of the file."""
|
||||
|
||||
return file.read(4) == ".snd"
|
||||
|
||||
def lossless(self):
|
||||
"""Returns True."""
|
||||
|
||||
return True
|
||||
|
||||
def bits_per_sample(self):
|
||||
"""Returns an integer number of bits-per-sample this track contains."""
|
||||
|
||||
return self.__bits_per_sample__
|
||||
|
||||
def channels(self):
|
||||
"""Returns an integer number of channels this track contains."""
|
||||
|
||||
return self.__channels__
|
||||
|
||||
def channel_mask(self):
|
||||
"""Returns a ChannelMask object of this track's channel layout."""
|
||||
|
||||
if (self.channels() <= 2):
|
||||
return ChannelMask.from_channels(self.channels())
|
||||
else:
|
||||
return ChannelMask(0)
|
||||
|
||||
def sample_rate(self):
|
||||
"""Returns the rate of the track's audio as an integer number of Hz."""
|
||||
|
||||
return self.__sample_rate__
|
||||
|
||||
def total_frames(self):
|
||||
"""Returns the total PCM frames of the track as an integer."""
|
||||
|
||||
return self.__total_frames__
|
||||
|
||||
def to_pcm(self):
|
||||
"""Returns a PCMReader object containing the track's PCM data."""
|
||||
|
||||
f = file(self.filename, 'rb')
|
||||
f.seek(self.__data_offset__, 0)
|
||||
|
||||
return AuReader(au_file=f,
|
||||
data_size=self.__data_size__,
|
||||
sample_rate=self.sample_rate(),
|
||||
channels=self.channels(),
|
||||
channel_mask=int(self.channel_mask()),
|
||||
bits_per_sample=self.bits_per_sample())
|
||||
|
||||
@classmethod
|
||||
def from_pcm(cls, filename, pcmreader, compression=None):
|
||||
"""Encodes a new file from PCM data.
|
||||
|
||||
Takes a filename string, PCMReader object
|
||||
and optional compression level string.
|
||||
Encodes a new audio file from pcmreader's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new AuAudio object."""
|
||||
|
||||
if (pcmreader.bits_per_sample not in (8, 16, 24)):
|
||||
raise InvalidFormat(
|
||||
_(u"Unsupported bits per sample %s") % (
|
||||
pcmreader.bits_per_sample))
|
||||
|
||||
bytes_per_sample = pcmreader.bits_per_sample / 8
|
||||
|
||||
header = Con.Container(magic_number='.snd',
|
||||
data_offset=0,
|
||||
data_size=0,
|
||||
encoding_format={8: 2, 16: 3, 24: 4}[
|
||||
pcmreader.bits_per_sample],
|
||||
sample_rate=pcmreader.sample_rate,
|
||||
channels=pcmreader.channels)
|
||||
|
||||
try:
|
||||
f = file(filename, 'wb')
|
||||
except IOError, err:
|
||||
raise EncodingError(str(err))
|
||||
try:
|
||||
#send out a dummy header
|
||||
f.write(AuAudio.AU_HEADER.build(header))
|
||||
header.data_offset = f.tell()
|
||||
|
||||
#send our big-endian PCM data
|
||||
#d will be a list of ints, so we can't use transfer_data
|
||||
try:
|
||||
framelist = pcmreader.read(BUFFER_SIZE)
|
||||
while (len(framelist) > 0):
|
||||
bytes = framelist.to_bytes(True, True)
|
||||
f.write(bytes)
|
||||
header.data_size += len(bytes)
|
||||
framelist = pcmreader.read(BUFFER_SIZE)
|
||||
except (IOError, ValueError), err:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(err))
|
||||
except Exception, err:
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
|
||||
#send out a complete header
|
||||
f.seek(0, 0)
|
||||
f.write(AuAudio.AU_HEADER.build(header))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
try:
|
||||
pcmreader.close()
|
||||
except DecodingError, err:
|
||||
raise EncodingError(err.error_message)
|
||||
|
||||
return AuAudio(filename)
|
||||
|
||||
@classmethod
|
||||
def track_name(cls, file_path, track_metadata=None, format=None,
|
||||
suffix=None):
|
||||
"""Constructs a new filename string.
|
||||
|
||||
Given a plain string to an existing path,
|
||||
a MetaData-compatible object (or None),
|
||||
a UTF-8-encoded Python format string
|
||||
and an ASCII-encoded suffix string (such as "mp3")
|
||||
returns a plain string of a new filename with format's
|
||||
fields filled-in and encoded as FS_ENCODING.
|
||||
Raises UnsupportedTracknameField if the format string
|
||||
contains invalid template fields."""
|
||||
|
||||
if (format is None):
|
||||
format = "track%(track_number)2.2d.au"
|
||||
return AudioFile.track_name(file_path, track_metadata, format,
|
||||
suffix=cls.SUFFIX)
|
687
Melodia/resources/audiotools/__dvda__.py
Normal file
687
Melodia/resources/audiotools/__dvda__.py
Normal file
@ -0,0 +1,687 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
from audiotools import Con, re, os, pcm, cStringIO, struct
|
||||
|
||||
|
||||
class DVDAudio:
|
||||
"""An object representing an entire DVD-Audio disc.
|
||||
|
||||
A DVDAudio object contains one or more DVDATitle objects
|
||||
(accessible via the .titlesets attribute).
|
||||
Typically, only the first DVDTitle is interesting.
|
||||
Each DVDATitle then contains one or more DVDATrack objects.
|
||||
"""
|
||||
|
||||
SECTOR_SIZE = 2048
|
||||
PTS_PER_SECOND = 90000
|
||||
|
||||
AUDIO_TS_IFO = Con.Struct(
|
||||
"AUDIO_TS_IFO",
|
||||
Con.Const(Con.Bytes("identifier", 12), "DVDAUDIO-AMG"),
|
||||
Con.UBInt32("AMG_start_sector"),
|
||||
Con.Padding(12),
|
||||
Con.UBInt32("AMGI_end_sector"),
|
||||
Con.UBInt16("DVD_version"),
|
||||
Con.Padding(4),
|
||||
Con.UBInt16("volume_count"),
|
||||
Con.UBInt16("volume_number"),
|
||||
Con.UBInt8("disc_side"),
|
||||
Con.Padding(4),
|
||||
Con.UBInt8("autoplay"),
|
||||
Con.UBInt32("ts_to_sv"),
|
||||
Con.Padding(10),
|
||||
Con.UBInt8("video_titlesets"),
|
||||
Con.UBInt8("audio_titlesets"),
|
||||
Con.Bytes("provider_identifier", 40))
|
||||
|
||||
ATS_XX_S1 = Con.Struct(
|
||||
"ATS_XX",
|
||||
Con.Const(Con.String("identifier", 12), "DVDAUDIO-ATS"),
|
||||
Con.UBInt32("ATS_end_sector"),
|
||||
Con.Padding(12),
|
||||
Con.UBInt32("ATSI_end_sector"),
|
||||
Con.UBInt16("DVD_specification_version"),
|
||||
Con.UBInt32("VTS_category"),
|
||||
Con.Padding(90),
|
||||
Con.UBInt32("ATSI_MAT_end_sector"),
|
||||
Con.Padding(60),
|
||||
Con.UBInt32("VTSM_VOBS_start_sector"),
|
||||
Con.UBInt32("ATST_AOBS_start_sector"),
|
||||
Con.UBInt32("VTS_PTT_SRPT_start_sector"),
|
||||
Con.UBInt32("ATS_PGCI_UT_start_sector"),
|
||||
Con.UBInt32("VTSM_PGCI_UT_start_sector"),
|
||||
Con.UBInt32("VTS_TMAPT_start_sector"),
|
||||
Con.UBInt32("VTSM_C_ADT_start_sector"),
|
||||
Con.UBInt32("VTSM_VOBU_ADMA_start_sector"),
|
||||
Con.UBInt32("VTS_C_ADT_start_sector"),
|
||||
Con.UBInt32("VTS_VOBU_ADMAP_start_sector"),
|
||||
Con.Padding(24))
|
||||
|
||||
ATS_XX_S2 = Con.Struct(
|
||||
"ATS_XX2",
|
||||
Con.UBInt16("title_count"),
|
||||
Con.Padding(2),
|
||||
Con.UBInt32("last_byte_address"),
|
||||
Con.StrictRepeater(
|
||||
lambda ctx: ctx['title_count'],
|
||||
Con.Struct('titles',
|
||||
Con.UBInt16("unknown1"),
|
||||
Con.UBInt16("unknown2"),
|
||||
Con.UBInt32("byte_offset"))))
|
||||
|
||||
ATS_TITLE = Con.Struct(
|
||||
"ATS_title",
|
||||
Con.Bytes("unknown1", 2),
|
||||
Con.UBInt8("tracks"),
|
||||
Con.UBInt8("indexes"),
|
||||
Con.UBInt32("track_length"),
|
||||
Con.Bytes("unknown2", 4),
|
||||
Con.UBInt16("sector_pointers_table"),
|
||||
Con.Bytes("unknown3", 2),
|
||||
Con.StrictRepeater(
|
||||
lambda ctx: ctx["tracks"],
|
||||
Con.Struct("timestamps",
|
||||
Con.Bytes("unknown1", 2),
|
||||
Con.Bytes("unknown2", 2),
|
||||
Con.UBInt8("index_number"),
|
||||
Con.Bytes("unknown3", 1),
|
||||
Con.UBInt32("first_pts"),
|
||||
Con.UBInt32("pts_length"),
|
||||
Con.Padding(6))))
|
||||
|
||||
ATS_SECTOR_POINTER = Con.Struct(
|
||||
"sector_pointer",
|
||||
Con.Const(Con.Bytes("unknown", 4),
|
||||
'\x01\x00\x00\x00'),
|
||||
Con.UBInt32("first_sector"),
|
||||
Con.UBInt32("last_sector"))
|
||||
|
||||
PACK_HEADER = Con.Struct(
|
||||
"pack_header",
|
||||
Con.Const(Con.UBInt32("sync_bytes"), 0x1BA),
|
||||
Con.Embed(Con.BitStruct(
|
||||
"markers",
|
||||
Con.Const(Con.Bits("marker1", 2), 1),
|
||||
Con.Bits("system_clock_high", 3),
|
||||
Con.Const(Con.Bits("marker2", 1), 1),
|
||||
Con.Bits("system_clock_mid", 15),
|
||||
Con.Const(Con.Bits("marker3", 1), 1),
|
||||
Con.Bits("system_clock_low", 15),
|
||||
Con.Const(Con.Bits("marker4", 1), 1),
|
||||
Con.Bits("scr_extension", 9),
|
||||
Con.Const(Con.Bits("marker5", 1), 1),
|
||||
Con.Bits("bit_rate", 22),
|
||||
Con.Const(Con.Bits("marker6", 2), 3),
|
||||
Con.Bits("reserved", 5),
|
||||
Con.Bits("stuffing_length", 3))),
|
||||
Con.StrictRepeater(lambda ctx: ctx["stuffing_length"],
|
||||
Con.UBInt8("stuffing")))
|
||||
|
||||
PES_HEADER = Con.Struct(
|
||||
"pes_header",
|
||||
Con.Const(Con.Bytes("start_code", 3), "\x00\x00\x01"),
|
||||
Con.UBInt8("stream_id"),
|
||||
Con.UBInt16("packet_length"))
|
||||
|
||||
PACKET_HEADER = Con.Struct(
|
||||
"packet_header",
|
||||
Con.UBInt16("unknown1"),
|
||||
Con.Byte("pad1_size"),
|
||||
Con.StrictRepeater(lambda ctx: ctx["pad1_size"],
|
||||
Con.Byte("pad1")),
|
||||
Con.Byte("stream_id"),
|
||||
Con.Byte("crc"),
|
||||
Con.Byte("padding"),
|
||||
Con.Switch("info",
|
||||
lambda ctx: ctx["stream_id"],
|
||||
{0xA0: Con.Struct( # PCM info
|
||||
"pcm",
|
||||
Con.Byte("pad2_size"),
|
||||
Con.UBInt16("first_audio_frame"),
|
||||
Con.UBInt8("padding2"),
|
||||
Con.Embed(Con.BitStruct(
|
||||
"flags",
|
||||
Con.Bits("group1_bps", 4),
|
||||
Con.Bits("group2_bps", 4),
|
||||
Con.Bits("group1_sample_rate", 4),
|
||||
Con.Bits("group2_sample_rate", 4))),
|
||||
Con.UBInt8("padding3"),
|
||||
Con.UBInt8("channel_assignment")),
|
||||
|
||||
0xA1: Con.Struct( # MLP info
|
||||
"mlp",
|
||||
Con.Byte("pad2_size"),
|
||||
Con.StrictRepeater(lambda ctx: ctx["pad2_size"],
|
||||
Con.Byte("pad2")),
|
||||
Con.Bytes("mlp_size", 4),
|
||||
Con.Const(Con.Bytes("sync_words", 3), "\xF8\x72\x6F"),
|
||||
Con.Const(Con.UBInt8("stream_type"), 0xBB),
|
||||
Con.Embed(Con.BitStruct(
|
||||
"flags",
|
||||
Con.Bits("group1_bps", 4),
|
||||
Con.Bits("group2_bps", 4),
|
||||
Con.Bits("group1_sample_rate", 4),
|
||||
Con.Bits("group2_sample_rate", 4),
|
||||
Con.Bits("unknown1", 11),
|
||||
Con.Bits("channel_assignment", 5),
|
||||
Con.Bits("unknown2", 48))))}))
|
||||
|
||||
def __init__(self, audio_ts_path, cdrom_device=None):
|
||||
"""A DVD-A which contains PCMReader-compatible track objects."""
|
||||
|
||||
#an inventory of AUDIO_TS files converted to uppercase keys
|
||||
self.files = dict([(name.upper(),
|
||||
os.path.join(audio_ts_path, name))
|
||||
for name in os.listdir(audio_ts_path)])
|
||||
|
||||
titleset_numbers = list(self.__titlesets__())
|
||||
|
||||
#for each titleset, read an ATS_XX_0.IFO file
|
||||
#each titleset contains one or more DVDATitle objects
|
||||
#and each DVDATitle object contains one or more DVDATrack objects
|
||||
self.titlesets = [self.__titles__(titleset) for titleset in
|
||||
titleset_numbers]
|
||||
|
||||
#for each titleset, calculate the lengths of the corresponding AOBs
|
||||
#in terms of 2048 byte sectors
|
||||
self.aob_sectors = []
|
||||
for titleset in titleset_numbers:
|
||||
aob_re = re.compile("ATS_%2.2d_\\d\\.AOB" % (titleset))
|
||||
titleset_aobs = dict([(key, value) for (key, value) in
|
||||
self.files.items()
|
||||
if (aob_re.match(key))])
|
||||
for aob_length in [os.path.getsize(titleset_aobs[key]) /
|
||||
DVDAudio.SECTOR_SIZE
|
||||
for key in sorted(titleset_aobs.keys())]:
|
||||
if (len(self.aob_sectors) == 0):
|
||||
self.aob_sectors.append(
|
||||
(0, aob_length))
|
||||
else:
|
||||
self.aob_sectors.append(
|
||||
(self.aob_sectors[-1][1],
|
||||
self.aob_sectors[-1][1] + aob_length))
|
||||
|
||||
try:
|
||||
if ((cdrom_device is not None) and
|
||||
('DVDAUDIO.MKB' in self.files.keys())):
|
||||
|
||||
from audiotools.prot import CPPMDecoder
|
||||
|
||||
self.unprotector = CPPMDecoder(
|
||||
cdrom_device, self.files['DVDAUDIO.MKB']).decode
|
||||
else:
|
||||
self.unprotector = lambda sector: sector
|
||||
except ImportError:
|
||||
self.unprotector = lambda sector: sector
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.titlesets[key]
|
||||
|
||||
def __len__(self):
|
||||
return len(self.titlesets)
|
||||
|
||||
def __titlesets__(self):
|
||||
"""return valid audio titleset integers from AUDIO_TS.IFO"""
|
||||
|
||||
try:
|
||||
f = open(self.files['AUDIO_TS.IFO'], 'rb')
|
||||
except (KeyError, IOError):
|
||||
raise InvalidDVDA(_(u"unable to open AUDIO_TS.IFO"))
|
||||
try:
|
||||
try:
|
||||
for titleset in xrange(
|
||||
1,
|
||||
DVDAudio.AUDIO_TS_IFO.parse_stream(f).audio_titlesets + 1):
|
||||
#ensure there are IFO files and AOBs
|
||||
#for each valid titleset
|
||||
if (("ATS_%2.2d_0.IFO" % (titleset) in
|
||||
self.files.keys()) and
|
||||
("ATS_%2.2d_1.AOB" % (titleset) in
|
||||
self.files.keys())):
|
||||
yield titleset
|
||||
|
||||
except Con.ConstError:
|
||||
raise InvalidDVDA(_(u"invalid AUDIO_TS.IFO"))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def __titles__(self, titleset):
|
||||
"""returns a list of DVDATitle objects for the given titleset"""
|
||||
|
||||
try:
|
||||
f = open(self.files['ATS_%2.2d_0.IFO' % (titleset)], 'rb')
|
||||
except (KeyError, IOError):
|
||||
raise InvalidDVDA(
|
||||
_(u"unable to open ATS_%2.2d_0.IFO") % (titleset))
|
||||
try:
|
||||
try:
|
||||
#the first sector contains little of interest
|
||||
#but we'll read it to check the identifier string
|
||||
DVDAudio.ATS_XX_S1.parse_stream(f)
|
||||
except Con.ConstError:
|
||||
raise InvalidDVDA(_(u"invalid ATS_%2.2d_0.IFO") % (titleset))
|
||||
|
||||
#then move to the second sector and continue parsing
|
||||
f.seek(DVDAudio.SECTOR_SIZE, os.SEEK_SET)
|
||||
|
||||
#may contain one or more titles
|
||||
title_records = DVDAudio.ATS_XX_S2.parse_stream(f)
|
||||
|
||||
titles = []
|
||||
|
||||
for (title_number,
|
||||
title_offset) in enumerate(title_records.titles):
|
||||
f.seek(DVDAudio.SECTOR_SIZE +
|
||||
title_offset.byte_offset,
|
||||
os.SEEK_SET)
|
||||
title = DVDAudio.ATS_TITLE.parse_stream(f)
|
||||
|
||||
f.seek(DVDAudio.SECTOR_SIZE +
|
||||
title_offset.byte_offset +
|
||||
title.sector_pointers_table,
|
||||
os.SEEK_SET)
|
||||
sector_pointers = ([None] +
|
||||
[DVDAudio.ATS_SECTOR_POINTER.parse_stream(f)
|
||||
for i in xrange(title.indexes)])
|
||||
|
||||
dvda_title = DVDATitle(dvdaudio=self,
|
||||
titleset=titleset,
|
||||
title=title_number + 1,
|
||||
pts_length=title.track_length,
|
||||
tracks=[])
|
||||
|
||||
#for each track, determine its first and last sector
|
||||
#based on the sector pointers between the track's
|
||||
#initial index and the next track's initial index
|
||||
for (track_number,
|
||||
(timestamp, next_timestamp)) in enumerate(zip(
|
||||
title.timestamps, title.timestamps[1:])):
|
||||
dvda_title.tracks.append(
|
||||
DVDATrack(
|
||||
dvdaudio=self,
|
||||
titleset=titleset,
|
||||
title=dvda_title,
|
||||
track=track_number + 1,
|
||||
first_pts=timestamp.first_pts,
|
||||
pts_length=timestamp.pts_length,
|
||||
first_sector=sector_pointers[
|
||||
timestamp.index_number].first_sector,
|
||||
last_sector=sector_pointers[
|
||||
next_timestamp.index_number - 1].last_sector))
|
||||
|
||||
#for the last track, its sector pointers
|
||||
#simply consume what remains on the list
|
||||
timestamp = title.timestamps[-1]
|
||||
dvda_title.tracks.append(
|
||||
DVDATrack(
|
||||
dvdaudio=self,
|
||||
titleset=titleset,
|
||||
title=dvda_title,
|
||||
track=len(title.timestamps),
|
||||
first_pts=timestamp.first_pts,
|
||||
pts_length=timestamp.pts_length,
|
||||
first_sector=sector_pointers[
|
||||
timestamp.index_number].first_sector,
|
||||
last_sector=sector_pointers[-1].last_sector))
|
||||
|
||||
titles.append(dvda_title)
|
||||
|
||||
return titles
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def sector_reader(self, aob_filename):
|
||||
if (self.unprotector is None):
|
||||
return SectorReader(aob_filename)
|
||||
else:
|
||||
return UnprotectionSectorReader(aob_filename,
|
||||
self.unprotector)
|
||||
|
||||
|
||||
class InvalidDVDA(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DVDATitle:
|
||||
"""An object representing a DVD-Audio title.
|
||||
|
||||
Contains one or more DVDATrack objects
|
||||
which may are accessible via __getitem__
|
||||
"""
|
||||
|
||||
def __init__(self, dvdaudio, titleset, title, pts_length, tracks):
|
||||
"""length is in PTS ticks, tracks is a list of DVDATrack objects"""
|
||||
|
||||
self.dvdaudio = dvdaudio
|
||||
self.titleset = titleset
|
||||
self.title = title
|
||||
self.pts_length = pts_length
|
||||
self.tracks = tracks
|
||||
|
||||
def __len__(self):
|
||||
return len(self.tracks)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.tracks[index]
|
||||
|
||||
def __repr__(self):
|
||||
return "DVDATitle(%s)" % \
|
||||
(",".join(["%s=%s" % (key, getattr(self, key))
|
||||
for key in ["titleset", "title", "pts_length",
|
||||
"tracks"]]))
|
||||
|
||||
def info(self):
|
||||
"""returns a (sample_rate, channels, channel_mask, bps, type) tuple"""
|
||||
|
||||
#find the AOB file of the title's first track
|
||||
track_sector = self[0].first_sector
|
||||
titleset = re.compile("ATS_%2.2d_\\d\\.AOB" % (self.titleset))
|
||||
for aob_path in sorted([self.dvdaudio.files[key] for key in
|
||||
self.dvdaudio.files.keys()
|
||||
if (titleset.match(key))]):
|
||||
aob_sectors = os.path.getsize(aob_path) / DVDAudio.SECTOR_SIZE
|
||||
if (track_sector > aob_sectors):
|
||||
track_sector -= aob_sectors
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise ValueError(_(u"unable to find track sector in AOB files"))
|
||||
|
||||
#open that AOB file and seek to that track's first sector
|
||||
aob_file = open(aob_path, 'rb')
|
||||
try:
|
||||
aob_file.seek(track_sector * DVDAudio.SECTOR_SIZE)
|
||||
|
||||
#read the pack header
|
||||
DVDAudio.PACK_HEADER.parse_stream(aob_file)
|
||||
|
||||
#skip packets until the stream ID 0xBD is found
|
||||
pes_header = DVDAudio.PES_HEADER.parse_stream(aob_file)
|
||||
while (pes_header.stream_id != 0xBD):
|
||||
aob_file.read(pes_header.packet_length)
|
||||
pes_header = DVDAudio.PES_HEADER.parse_stream(aob_file)
|
||||
|
||||
#parse the PCM/MLP header
|
||||
header = DVDAudio.PACKET_HEADER.parse_stream(aob_file)
|
||||
|
||||
#return the values indicated by the header
|
||||
return (DVDATrack.SAMPLE_RATE[
|
||||
header.info.group1_sample_rate],
|
||||
DVDATrack.CHANNELS[
|
||||
header.info.channel_assignment],
|
||||
DVDATrack.CHANNEL_MASK[
|
||||
header.info.channel_assignment],
|
||||
DVDATrack.BITS_PER_SAMPLE[
|
||||
header.info.group1_bps],
|
||||
header.stream_id)
|
||||
|
||||
finally:
|
||||
aob_file.close()
|
||||
|
||||
def stream(self):
|
||||
titleset = re.compile("ATS_%2.2d_\\d\\.AOB" % (self.titleset))
|
||||
|
||||
return AOBStream(
|
||||
aob_files=sorted([self.dvdaudio.files[key]
|
||||
for key in self.dvdaudio.files.keys()
|
||||
if (titleset.match(key))]),
|
||||
first_sector=self[0].first_sector,
|
||||
last_sector=self[-1].last_sector,
|
||||
unprotector=self.dvdaudio.unprotector)
|
||||
|
||||
def to_pcm(self):
|
||||
(sample_rate,
|
||||
channels,
|
||||
channel_mask,
|
||||
bits_per_sample,
|
||||
stream_type) = self.info()
|
||||
|
||||
if (stream_type == 0xA1):
|
||||
from audiotools.decoders import MLPDecoder
|
||||
|
||||
return MLPDecoder(IterReader(self.stream().packet_payloads()),
|
||||
(self.pts_length * sample_rate) /
|
||||
DVDAudio.PTS_PER_SECOND)
|
||||
elif (stream_type == 0xA0):
|
||||
from audiotools.decoders import AOBPCMDecoder
|
||||
|
||||
return AOBPCMDecoder(IterReader(self.stream().packet_payloads()),
|
||||
sample_rate,
|
||||
channels,
|
||||
channel_mask,
|
||||
bits_per_sample)
|
||||
else:
|
||||
raise ValueError(_(u"unsupported DVD-Audio stream type"))
|
||||
|
||||
|
||||
class DVDATrack:
|
||||
"""An object representing an individual DVD-Audio track."""
|
||||
|
||||
SAMPLE_RATE = [48000, 96000, 192000, 0, 0, 0, 0, 0,
|
||||
44100, 88200, 176400, 0, 0, 0, 0, 0]
|
||||
CHANNELS = [1, 2, 3, 4, 3, 4, 5, 3, 4, 5, 4, 5, 6, 4, 5, 4, 5, 6, 5, 5, 6]
|
||||
CHANNEL_MASK = [0x4, 0x3, 0x103, 0x33, 0xB, 0x10B, 0x3B, 0x7,
|
||||
0x107, 0x37, 0xF, 0x10F, 0x3F, 0x107, 0x37, 0xF,
|
||||
0x10F, 0x3F, 0x3B, 0x37, 0x3F]
|
||||
BITS_PER_SAMPLE = [16, 20, 24] + [0] * 13
|
||||
|
||||
def __init__(self, dvdaudio,
|
||||
titleset, title, track,
|
||||
first_pts, pts_length,
|
||||
first_sector, last_sector):
|
||||
self.dvdaudio = dvdaudio
|
||||
self.titleset = titleset
|
||||
self.title = title
|
||||
self.track = track
|
||||
self.first_pts = first_pts
|
||||
self.pts_length = pts_length
|
||||
self.first_sector = first_sector
|
||||
self.last_sector = last_sector
|
||||
|
||||
def __repr__(self):
|
||||
return "DVDATrack(%s)" % \
|
||||
(", ".join(["%s=%s" % (attr, getattr(self, attr))
|
||||
for attr in ["titleset",
|
||||
"title",
|
||||
"track",
|
||||
"first_pts",
|
||||
"pts_length",
|
||||
"first_sector",
|
||||
"last_sector"]]))
|
||||
|
||||
def sectors(self):
|
||||
"""iterates (aob_file, start_sector, end_sector)
|
||||
|
||||
for each AOB file necessary to extract the track's data
|
||||
in the order in which they should be read."""
|
||||
|
||||
track_sectors = Rangeset(self.first_sector,
|
||||
self.last_sector + 1)
|
||||
|
||||
for (i, (start_sector,
|
||||
end_sector)) in enumerate(self.dvdaudio.aob_sectors):
|
||||
aob_sectors = Rangeset(start_sector, end_sector)
|
||||
intersection = aob_sectors & track_sectors
|
||||
if (len(intersection)):
|
||||
yield (self.dvdaudio.files["ATS_%2.2d_%d.AOB" % \
|
||||
(self.titleset, i + 1)],
|
||||
intersection.start - start_sector,
|
||||
intersection.end - start_sector)
|
||||
|
||||
|
||||
class Rangeset:
|
||||
"""An optimized combination of range() and set()"""
|
||||
|
||||
#The purpose of this class is for finding the subset of
|
||||
#two Rangesets, such as with:
|
||||
#
|
||||
# >>> Rangeset(1, 10) & Rangeset(5, 15)
|
||||
# Rangeset(5, 10)
|
||||
#
|
||||
#which returns another Rangeset object.
|
||||
#This is preferable to performing:
|
||||
#
|
||||
# >>> set(range(1, 10)) & set(range(5, 15))
|
||||
# set([8, 9, 5, 6, 7])
|
||||
#
|
||||
#which allocates lots of unnecessary values
|
||||
#when all we're interested in is the min and max.
|
||||
|
||||
def __init__(self, start, end):
|
||||
self.start = start
|
||||
self.end = end
|
||||
|
||||
def __repr__(self):
|
||||
return "Rangeset(%s, %s)" % (repr(self.start), repr(self.end))
|
||||
|
||||
def __len__(self):
|
||||
return self.end - self.start
|
||||
|
||||
def __getitem__(self, i):
|
||||
if (i >= 0):
|
||||
if (i < len(self)):
|
||||
return self.start + i
|
||||
else:
|
||||
raise IndexError(i)
|
||||
else:
|
||||
if (-i - 1 < len(self)):
|
||||
return self.end + i
|
||||
else:
|
||||
raise IndexError(i)
|
||||
|
||||
def __and__(self, rangeset):
|
||||
min_point = max(self.start, rangeset.start)
|
||||
max_point = min(self.end, rangeset.end)
|
||||
|
||||
if (min_point <= max_point):
|
||||
return Rangeset(min_point, max_point)
|
||||
else:
|
||||
return Rangeset(0, 0)
|
||||
|
||||
|
||||
class AOBSectorReader:
|
||||
def __init__(self, aob_files):
|
||||
self.aob_files = list(aob_files)
|
||||
self.aob_files.sort()
|
||||
|
||||
self.current_file_index = 0
|
||||
self.current_file = open(self.aob_files[self.current_file_index], 'rb')
|
||||
|
||||
def read(self, *args):
|
||||
s = self.current_file.read(DVDAudio.SECTOR_SIZE)
|
||||
if (len(s) == DVDAudio.SECTOR_SIZE):
|
||||
return s
|
||||
else:
|
||||
try:
|
||||
#if we can increment to the next file,
|
||||
#close the current one and do so
|
||||
self.current_file.close()
|
||||
self.current_file_index += 1
|
||||
self.current_file = open(
|
||||
self.aob_files[self.current_file_index], 'rb')
|
||||
return self.read()
|
||||
except IndexError:
|
||||
#otherwise, we've reached the end of all the files
|
||||
return ""
|
||||
|
||||
def seek(self, sector):
|
||||
for self.current_file_index in xrange(len(self.aob_files)):
|
||||
aob_size = os.path.getsize(
|
||||
self.aob_files[self.current_file_index]) / DVDAudio.SECTOR_SIZE
|
||||
if (sector <= aob_size):
|
||||
self.current_file = open(
|
||||
self.aob_files[self.current_file_index], 'rb')
|
||||
if (sector > 0):
|
||||
self.current_file.seek(sector * DVDAudio.SECTOR_SIZE)
|
||||
return
|
||||
else:
|
||||
sector -= aob_size
|
||||
|
||||
def close(self):
|
||||
self.current_file.close()
|
||||
del(self.aob_files)
|
||||
del(self.current_file_index)
|
||||
del(self.current_file)
|
||||
|
||||
|
||||
class AOBStream:
|
||||
def __init__(self, aob_files, first_sector, last_sector,
|
||||
unprotector=lambda sector: sector):
|
||||
self.aob_files = aob_files
|
||||
self.first_sector = first_sector
|
||||
self.last_sector = last_sector
|
||||
self.unprotector = unprotector
|
||||
|
||||
def sectors(self):
|
||||
first_sector = self.first_sector
|
||||
last_sector = self.last_sector
|
||||
|
||||
reader = AOBSectorReader(self.aob_files)
|
||||
reader.seek(first_sector)
|
||||
last_sector -= first_sector
|
||||
for i in xrange(last_sector + 1):
|
||||
yield self.unprotector(reader.read())
|
||||
reader.close()
|
||||
|
||||
def packets(self):
|
||||
packet_header_size = struct.calcsize(">3sBH")
|
||||
|
||||
for sector in self.sectors():
|
||||
assert(sector[0:4] == '\x00\x00\x01\xBA')
|
||||
stuffing_count = ord(sector[13]) & 0x7
|
||||
sector_bytes = 2048 - (14 + stuffing_count)
|
||||
sector = cStringIO.StringIO(sector[-sector_bytes:])
|
||||
while (sector_bytes > 0):
|
||||
(start_code,
|
||||
stream_id,
|
||||
packet_length) = struct.unpack(
|
||||
">3sBH", sector.read(packet_header_size))
|
||||
sector_bytes -= packet_header_size
|
||||
|
||||
assert(start_code == '\x00\x00\x01')
|
||||
if (stream_id == 0xBD):
|
||||
yield sector.read(packet_length)
|
||||
else:
|
||||
sector.read(packet_length)
|
||||
sector_bytes -= packet_length
|
||||
|
||||
def packet_payloads(self):
|
||||
def payload(packet):
|
||||
pad1_len = ord(packet[2])
|
||||
pad2_len = ord(packet[3 + pad1_len + 3])
|
||||
return packet[3 + pad1_len + 4 + pad2_len:]
|
||||
|
||||
for packet in self.packets():
|
||||
yield payload(packet)
|
||||
|
||||
|
||||
class IterReader:
|
||||
def __init__(self, iterator):
|
||||
self.iterator = iterator
|
||||
|
||||
def read(self, bytes):
|
||||
try:
|
||||
return self.iterator.next()
|
||||
except StopIteration:
|
||||
return ""
|
||||
|
||||
def close(self):
|
||||
pass
|
2150
Melodia/resources/audiotools/__flac__.py
Normal file
2150
Melodia/resources/audiotools/__flac__.py
Normal file
File diff suppressed because it is too large
Load Diff
724
Melodia/resources/audiotools/__freedb__.py
Normal file
724
Melodia/resources/audiotools/__freedb__.py
Normal file
@ -0,0 +1,724 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
from audiotools import (VERSION, Con, cStringIO, sys, re, MetaData,
|
||||
AlbumMetaData, AlbumMetaDataFile, __most_numerous__,
|
||||
DummyAudioFile, MetaDataFileException)
|
||||
import StringIO
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
#######################
|
||||
#XMCD
|
||||
#######################
|
||||
|
||||
|
||||
class XMCDException(MetaDataFileException):
|
||||
"""Raised if some error occurs parsing an XMCD file."""
|
||||
|
||||
def __unicode__(self):
|
||||
return _(u"Invalid XMCD file")
|
||||
|
||||
|
||||
class XMCD(AlbumMetaDataFile):
|
||||
LINE_LENGTH = 78
|
||||
|
||||
def __init__(self, fields, comments):
|
||||
"""fields a dict of key->values. comment is a list of comments.
|
||||
|
||||
keys are plain strings. values and comments are unicode."""
|
||||
|
||||
self.fields = fields
|
||||
self.comments = comments
|
||||
|
||||
def __getattr__(self, key):
|
||||
if (key == 'album_name'):
|
||||
dtitle = self.fields.get('DTITLE', u"")
|
||||
if (u" / " in dtitle):
|
||||
return dtitle.split(u" / ", 1)[1]
|
||||
else:
|
||||
return dtitle
|
||||
elif (key == 'artist_name'):
|
||||
dtitle = self.fields.get('DTITLE', u"")
|
||||
if (u" / " in dtitle):
|
||||
return dtitle.split(u" / ", 1)[0]
|
||||
else:
|
||||
return u""
|
||||
elif (key == 'year'):
|
||||
return self.fields.get('DYEAR', u"")
|
||||
elif (key == 'catalog'):
|
||||
return u""
|
||||
elif (key == 'extra'):
|
||||
return self.fields.get('EXTD', u"")
|
||||
else:
|
||||
try:
|
||||
return self.__dict__[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if (key == 'album_name'):
|
||||
dtitle = self.fields.get('DTITLE', u"")
|
||||
if (u" / " in dtitle):
|
||||
artist = dtitle.split(u" / ", 1)[0]
|
||||
self.fields['DTITLE'] = u"%s / %s" % (artist, value)
|
||||
else:
|
||||
self.fields['DTITLE'] = value
|
||||
elif (key == 'artist_name'):
|
||||
dtitle = self.fields.get('DTITLE', u"")
|
||||
if (u" / " in dtitle):
|
||||
album = dtitle.split(u" / ", 1)[1]
|
||||
else:
|
||||
album = dtitle
|
||||
self.fields['DTITLE'] = u"%s / %s" % (value, album)
|
||||
elif (key == 'year'):
|
||||
self.fields['DYEAR'] = value
|
||||
elif (key == 'catalog'):
|
||||
pass
|
||||
elif (key == 'extra'):
|
||||
self.fields['EXTD'] = value
|
||||
else:
|
||||
self.__dict__[key] = value
|
||||
|
||||
def __len__(self):
|
||||
track_field = re.compile(r'(TTITLE|EXTT)(\d+)')
|
||||
|
||||
return max(set([int(m.group(2)) for m in
|
||||
[track_field.match(key) for key in self.fields.keys()]
|
||||
if m is not None])) + 1
|
||||
|
||||
def to_string(self):
|
||||
def write_field(f, key, value):
|
||||
chars = list(value)
|
||||
encoded_value = "%s=" % (key)
|
||||
|
||||
while ((len(chars) > 0) and
|
||||
(len(encoded_value +
|
||||
chars[0].encode('utf-8', 'replace')) <
|
||||
XMCD.LINE_LENGTH)):
|
||||
encoded_value += chars.pop(0).encode('utf-8', 'replace')
|
||||
|
||||
f.write("%s\r\n" % (encoded_value))
|
||||
if (len(chars) > 0):
|
||||
write_field(f, key, u"".join(chars))
|
||||
|
||||
output = cStringIO.StringIO()
|
||||
|
||||
for comment in self.comments:
|
||||
output.write(comment.encode('utf-8'))
|
||||
output.write('\r\n')
|
||||
|
||||
fields = set(self.fields.keys())
|
||||
for field in ['DISCID', 'DTITLE', 'DYEAR', 'DGENRE']:
|
||||
if (field in fields):
|
||||
write_field(output, field, self.fields[field])
|
||||
fields.remove(field)
|
||||
|
||||
for i in xrange(len(self)):
|
||||
field = 'TTITLE%d' % (i)
|
||||
if (field in fields):
|
||||
write_field(output, field, self.fields[field])
|
||||
fields.remove(field)
|
||||
|
||||
if ('EXTD' in fields):
|
||||
write_field(output, 'EXTD', self.fields['EXTD'])
|
||||
fields.remove('EXTD')
|
||||
|
||||
for i in xrange(len(self)):
|
||||
field = 'EXTT%d' % (i)
|
||||
if (field in fields):
|
||||
write_field(output, field, self.fields[field])
|
||||
fields.remove(field)
|
||||
|
||||
for field in fields:
|
||||
write_field(output, field, self.fields[field])
|
||||
|
||||
return output.getvalue()
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, string):
|
||||
# try:
|
||||
# data = string.decode('latin-1')
|
||||
# except UnicodeDecodeError:
|
||||
# data = string.decode('utf-8','replace')
|
||||
#FIXME - handle latin-1 files?
|
||||
data = string.decode('utf-8', 'replace')
|
||||
|
||||
if (not data.startswith(u"# xmcd")):
|
||||
raise XMCDException()
|
||||
|
||||
fields = {}
|
||||
comments = []
|
||||
field_line = re.compile(r'([A-Z0-9]+?)=(.*)')
|
||||
|
||||
for line in StringIO.StringIO(data):
|
||||
if (line.startswith(u'#')):
|
||||
comments.append(line.rstrip('\r\n'))
|
||||
else:
|
||||
match = field_line.match(line.rstrip('\r\n'))
|
||||
if (match is not None):
|
||||
key = match.group(1).encode('ascii')
|
||||
value = match.group(2)
|
||||
if (key in fields):
|
||||
fields[key] += value
|
||||
else:
|
||||
fields[key] = value
|
||||
|
||||
return cls(fields, comments)
|
||||
|
||||
def get_track(self, index):
|
||||
try:
|
||||
ttitle = self.fields['TTITLE%d' % (index)]
|
||||
track_extra = self.fields['EXTT%d' % (index)]
|
||||
except KeyError:
|
||||
return (u"", u"", u"")
|
||||
|
||||
if (u' / ' in ttitle):
|
||||
(track_artist, track_title) = ttitle.split(u' / ', 1)
|
||||
else:
|
||||
track_title = ttitle
|
||||
track_artist = u""
|
||||
|
||||
return (track_title, track_artist, track_extra)
|
||||
|
||||
def set_track(self, index, name, artist, extra):
|
||||
if ((index < 0) or (index >= len(self))):
|
||||
raise IndexError(index)
|
||||
|
||||
if (len(artist) > 0):
|
||||
self.fields["TTITLE%d" % (index)] = u"%s / %s" % (artist, name)
|
||||
else:
|
||||
self.fields["TTITLE%d" % (index)] = name
|
||||
|
||||
if (len(extra) > 0):
|
||||
self.fields["EXTT%d" % (index)] = extra
|
||||
|
||||
@classmethod
|
||||
def from_tracks(cls, tracks):
|
||||
def track_string(track, album_artist, metadata):
|
||||
if (track.track_number() in metadata.keys()):
|
||||
metadata = metadata[track.track_number()]
|
||||
if (metadata.artist_name == album_artist):
|
||||
return metadata.track_name
|
||||
else:
|
||||
return u"%s / %s" % (metadata.artist_name,
|
||||
metadata.track_name)
|
||||
else:
|
||||
return u""
|
||||
|
||||
audiofiles = [f for f in tracks if f.track_number() != 0]
|
||||
audiofiles.sort(lambda t1, t2: cmp(t1.track_number(),
|
||||
t2.track_number()))
|
||||
|
||||
discid = DiscID([track.cd_frames() for track in audiofiles])
|
||||
|
||||
metadata = dict([(t.track_number(), t.get_metadata())
|
||||
for t in audiofiles
|
||||
if (t.get_metadata() is not None)])
|
||||
|
||||
artist_names = [m.artist_name for m in metadata.values()]
|
||||
if (len(artist_names) == 0):
|
||||
album_artist = u""
|
||||
elif ((len(artist_names) > 1) and
|
||||
(len(set(artist_names)) == len(artist_names))):
|
||||
#if all track artists are different, don't pick one
|
||||
album_artist = u"Various"
|
||||
else:
|
||||
album_artist = __most_numerous__(artist_names)
|
||||
|
||||
return cls(dict([("DISCID", str(discid).decode('ascii')),
|
||||
("DTITLE", u"%s / %s" % \
|
||||
(album_artist,
|
||||
__most_numerous__([m.album_name for m in
|
||||
metadata.values()]))),
|
||||
("DYEAR", __most_numerous__([m.year for m in
|
||||
metadata.values()])),
|
||||
("EXTDD", u""),
|
||||
("PLAYORDER", u"")] + \
|
||||
[("TTITLE%d" % (track.track_number() - 1),
|
||||
track_string(track, album_artist, metadata))
|
||||
for track in audiofiles] + \
|
||||
[("EXTT%d" % (track.track_number() - 1),
|
||||
u"")
|
||||
for track in audiofiles]),
|
||||
[u"# xmcd",
|
||||
u"#",
|
||||
u"# Track frame offsets:"] +
|
||||
[u"#\t%d" % (offset) for offset in discid.offsets()] +
|
||||
[u"#",
|
||||
u"# Disc length: %d seconds" % (
|
||||
(discid.length() / 75) + 2),
|
||||
u"#"])
|
||||
|
||||
|
||||
#######################
|
||||
#FREEDB
|
||||
#######################
|
||||
|
||||
class DiscID:
|
||||
"""An object representing a 32 bit FreeDB disc ID value."""
|
||||
|
||||
DISCID = Con.Struct('discid',
|
||||
Con.UBInt8('digit_sum'),
|
||||
Con.UBInt16('length'),
|
||||
Con.UBInt8('track_count'))
|
||||
|
||||
def __init__(self, tracks=[], offsets=None, length=None, lead_in=150):
|
||||
"""Fields are as follows:
|
||||
|
||||
tracks - a list of track lengths in CD frames
|
||||
offsets - a list of track offsets in CD frames
|
||||
length - the length of the entire disc in CD frames
|
||||
lead_in - the location of the first track on the CD, in frames
|
||||
|
||||
These fields are all optional.
|
||||
One will presumably fill them with data later in that event.
|
||||
"""
|
||||
|
||||
self.tracks = tracks
|
||||
self.__offsets__ = offsets
|
||||
self.__length__ = length
|
||||
self.__lead_in__ = lead_in
|
||||
|
||||
@classmethod
|
||||
def from_cdda(cls, cdda):
|
||||
"""Given a CDDA object, returns a populated DiscID.
|
||||
|
||||
May raise ValueError if there are no audio tracks on the CD."""
|
||||
|
||||
tracks = list(cdda)
|
||||
if (len(tracks) < 1):
|
||||
raise ValueError(_(u"no audio tracks in CDDA object"))
|
||||
|
||||
return cls(tracks=[t.length() for t in tracks],
|
||||
offsets=[t.offset() for t in tracks],
|
||||
length=cdda.last_sector(),
|
||||
lead_in=tracks[0].offset())
|
||||
|
||||
def add(self, track):
|
||||
"""Adds a new track length, in CD frames."""
|
||||
|
||||
self.tracks.append(track)
|
||||
|
||||
def offsets(self):
|
||||
"""Returns a list of calculated offset integers, from track lengths."""
|
||||
|
||||
if (self.__offsets__ is None):
|
||||
offsets = [self.__lead_in__]
|
||||
|
||||
for track in self.tracks[0:-1]:
|
||||
offsets.append(track + offsets[-1])
|
||||
|
||||
return offsets
|
||||
else:
|
||||
return self.__offsets__
|
||||
|
||||
def length(self):
|
||||
"""Returns the total length of the disc, in seconds."""
|
||||
|
||||
if (self.__length__ is None):
|
||||
return sum(self.tracks)
|
||||
else:
|
||||
return self.__length__
|
||||
|
||||
def idsuffix(self):
|
||||
"""Returns a FreeDB disc ID suffix string.
|
||||
|
||||
This is for making server queries."""
|
||||
|
||||
return str(len(self.tracks)) + " " + \
|
||||
" ".join([str(offset) for offset in self.offsets()]) + \
|
||||
" " + str((self.length() + self.__lead_in__) / 75)
|
||||
|
||||
def __str__(self):
|
||||
def __count_digits__(i):
|
||||
if (i == 0):
|
||||
return 0
|
||||
else:
|
||||
return (i % 10) + __count_digits__(i / 10)
|
||||
|
||||
disc_id = Con.Container()
|
||||
|
||||
disc_id.track_count = len(self.tracks)
|
||||
disc_id.length = self.length() / 75
|
||||
disc_id.digit_sum = sum([__count_digits__(o / 75)
|
||||
for o in self.offsets()]) % 0xFF
|
||||
|
||||
return DiscID.DISCID.build(disc_id).encode('hex')
|
||||
|
||||
def freedb_id(self):
|
||||
"""Returns the entire FreeDB disc ID, including suffix."""
|
||||
|
||||
return str(self) + " " + self.idsuffix()
|
||||
|
||||
def toxmcd(self, output):
|
||||
"""Writes a newly created XMCD file to output.
|
||||
|
||||
Its values are populated from this DiscID's fields."""
|
||||
|
||||
output.write(XMCD.from_tracks(
|
||||
[DummyAudioFile(length, None, i + 1)
|
||||
for (i, length) in enumerate(self.tracks)]).to_string())
|
||||
|
||||
|
||||
class FreeDBException(Exception):
|
||||
"""Raised if some problem occurs during FreeDB querying."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FreeDB:
|
||||
"""A class for performing queries on a FreeDB or compatible server.
|
||||
|
||||
This operates using the original FreeDB client-server protocol."""
|
||||
|
||||
LINE = re.compile(r'\d\d\d\s.+')
|
||||
|
||||
def __init__(self, server, port, messenger):
|
||||
"""server is a string, port is an int, messenger is a Messenger.
|
||||
|
||||
Queries are sent to the server, and output to the messenger."""
|
||||
|
||||
self.server = server
|
||||
self.port = port
|
||||
self.socket = None
|
||||
self.r = None
|
||||
self.w = None
|
||||
self.messenger = messenger
|
||||
|
||||
def connect(self):
|
||||
"""Performs the initial connection."""
|
||||
|
||||
import socket
|
||||
|
||||
try:
|
||||
self.messenger.info(_(u"Connecting to \"%s\"") % (self.server))
|
||||
|
||||
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.socket.connect((self.server, self.port))
|
||||
|
||||
self.r = self.socket.makefile("rb")
|
||||
self.w = self.socket.makefile("wb")
|
||||
|
||||
(code, msg) = self.read() # the welcome message
|
||||
if (code == 201):
|
||||
self.messenger.info(_(u"Connected ... attempting to login"))
|
||||
else:
|
||||
self.r.close()
|
||||
self.w.close()
|
||||
self.socket.close()
|
||||
raise FreeDBException(_(u"Invalid hello message"))
|
||||
|
||||
self.write("cddb hello user %s %s %s" % \
|
||||
(socket.getfqdn(), "audiotools", VERSION))
|
||||
|
||||
(code, msg) = self.read() # the handshake successful message
|
||||
if (code != 200):
|
||||
self.r.close()
|
||||
self.w.close()
|
||||
self.socket.close()
|
||||
raise FreeDBException(_(u"Handshake unsuccessful"))
|
||||
|
||||
self.write("proto 6")
|
||||
|
||||
(code, msg) = self.read() # the protocol successful message
|
||||
if ((code != 200) and (code != 201)):
|
||||
self.r.close()
|
||||
self.w.close()
|
||||
self.socket.close()
|
||||
raise FreeDBException(_(u"Protocol change unsuccessful"))
|
||||
|
||||
except socket.error, err:
|
||||
raise FreeDBException(err[1])
|
||||
|
||||
def close(self):
|
||||
"""Closes an open connection."""
|
||||
|
||||
self.messenger.info(_(u"Closing connection"))
|
||||
|
||||
self.write("quit")
|
||||
(code, msg) = self.read() # the quit successful message
|
||||
|
||||
self.r.close()
|
||||
self.w.close()
|
||||
self.socket.close()
|
||||
|
||||
def write(self, line):
|
||||
"""Writes a single command line to the server."""
|
||||
|
||||
if (self.socket is not None):
|
||||
self.w.write(line)
|
||||
self.w.write("\r\n")
|
||||
self.w.flush()
|
||||
|
||||
def read(self):
|
||||
"""Reads a result line from the server."""
|
||||
|
||||
line = self.r.readline()
|
||||
if (FreeDB.LINE.match(line)):
|
||||
return (int(line[0:3]), line[4:].rstrip("\r\n"))
|
||||
else:
|
||||
return (None, line.rstrip("\r\n"))
|
||||
|
||||
def query(self, disc_id):
|
||||
"""Given a DiscID, performs an album query and returns matches.
|
||||
|
||||
Each match is a (category, id) pair, which the user may
|
||||
need to decide between."""
|
||||
|
||||
matches = []
|
||||
|
||||
self.messenger.info(
|
||||
_(u"Sending Disc ID \"%(disc_id)s\" to server \"%(server)s\"") % \
|
||||
{"disc_id": str(disc_id).decode('ascii'),
|
||||
"server": self.server.decode('ascii', 'replace')})
|
||||
|
||||
self.write("cddb query " + disc_id.freedb_id())
|
||||
(code, msg) = self.read()
|
||||
if (code == 200):
|
||||
matches.append(msg)
|
||||
elif ((code == 211) or (code == 210)):
|
||||
while (msg != "."):
|
||||
(code, msg) = self.read()
|
||||
if (msg != "."):
|
||||
matches.append(msg)
|
||||
|
||||
if (len(matches) == 1):
|
||||
self.messenger.info(_(u"1 match found"))
|
||||
else:
|
||||
self.messenger.info(_(u"%s matches found") % (len(matches)))
|
||||
|
||||
return map(lambda m: m.split(" ", 2), matches)
|
||||
|
||||
def read_data(self, category, id, output):
|
||||
"""Reads the FreeDB entry matching category and id to output.
|
||||
|
||||
category and id are raw strings, as returned by query().
|
||||
output is an open file object.
|
||||
"""
|
||||
|
||||
self.write("cddb read " + category + " " + id)
|
||||
(code, msg) = self.read()
|
||||
if (code == 210):
|
||||
line = self.r.readline()
|
||||
while (line.strip() != "."):
|
||||
output.write(line)
|
||||
line = self.r.readline()
|
||||
else:
|
||||
print >> sys.stderr, (code, msg)
|
||||
|
||||
|
||||
class FreeDBWeb(FreeDB):
|
||||
"""A class for performing queries on a FreeDB or compatible server.
|
||||
|
||||
This operates using the FreeDB web-based protocol."""
|
||||
|
||||
def __init__(self, server, port, messenger):
|
||||
"""server is a string, port is an int, messenger is a Messenger.
|
||||
|
||||
Queries are sent to the server, and output to the messenger."""
|
||||
|
||||
self.server = server
|
||||
self.port = port
|
||||
self.connection = None
|
||||
self.messenger = messenger
|
||||
|
||||
def connect(self):
|
||||
"""Performs the initial connection."""
|
||||
|
||||
import httplib
|
||||
|
||||
self.connection = httplib.HTTPConnection(self.server, self.port,
|
||||
timeout=10)
|
||||
|
||||
def close(self):
|
||||
"""Closes an open connection."""
|
||||
|
||||
if (self.connection is not None):
|
||||
self.connection.close()
|
||||
|
||||
def write(self, line):
|
||||
"""Writes a single command line to the server."""
|
||||
|
||||
import urllib
|
||||
import socket
|
||||
|
||||
u = urllib.urlencode({"hello": "user %s %s %s" % \
|
||||
(socket.getfqdn(),
|
||||
"audiotools",
|
||||
VERSION),
|
||||
"proto": str(6),
|
||||
"cmd": line})
|
||||
|
||||
try:
|
||||
self.connection.request(
|
||||
"POST",
|
||||
"/~cddb/cddb.cgi",
|
||||
u,
|
||||
{"Content-type": "application/x-www-form-urlencoded",
|
||||
"Accept": "text/plain"})
|
||||
except socket.error, msg:
|
||||
raise FreeDBException(str(msg))
|
||||
|
||||
def read(self):
|
||||
"""Reads a result line from the server."""
|
||||
|
||||
response = self.connection.getresponse()
|
||||
return response.read()
|
||||
|
||||
def __parse_line__(self, line):
|
||||
if (FreeDB.LINE.match(line)):
|
||||
return (int(line[0:3]), line[4:].rstrip("\r\n"))
|
||||
else:
|
||||
return (None, line.rstrip("\r\n"))
|
||||
|
||||
def query(self, disc_id):
|
||||
"""Given a DiscID, performs an album query and returns matches.
|
||||
|
||||
Each match is a (category, id) pair, which the user may
|
||||
need to decide between."""
|
||||
|
||||
matches = []
|
||||
|
||||
self.messenger.info(
|
||||
_(u"Sending Disc ID \"%(disc_id)s\" to server \"%(server)s\"") % \
|
||||
{"disc_id": str(disc_id).decode('ascii'),
|
||||
"server": self.server.decode('ascii', 'replace')})
|
||||
|
||||
self.write("cddb query " + disc_id.freedb_id())
|
||||
data = cStringIO.StringIO(self.read())
|
||||
(code, msg) = self.__parse_line__(data.readline())
|
||||
if (code == 200):
|
||||
matches.append(msg)
|
||||
elif ((code == 211) or (code == 210)):
|
||||
while (msg != "."):
|
||||
(code, msg) = self.__parse_line__(data.readline())
|
||||
if (msg != "."):
|
||||
matches.append(msg)
|
||||
|
||||
if (len(matches) == 1):
|
||||
self.messenger.info(_(u"1 match found"))
|
||||
else:
|
||||
self.messenger.info(_(u"%s matches found") % (len(matches)))
|
||||
|
||||
return map(lambda m: m.split(" ", 2), matches)
|
||||
|
||||
def read_data(self, category, id, output):
|
||||
"""Reads the FreeDB entry matching category and id to output.
|
||||
|
||||
category and id are raw strings, as returned by query().
|
||||
output is an open file object.
|
||||
"""
|
||||
|
||||
self.write("cddb read " + category + " " + id)
|
||||
data = cStringIO.StringIO(self.read())
|
||||
(code, msg) = self.__parse_line__(data.readline())
|
||||
if (code == 210):
|
||||
line = data.readline()
|
||||
while (line.strip() != "."):
|
||||
output.write(line)
|
||||
line = data.readline()
|
||||
else:
|
||||
print >> sys.stderr, (code, msg)
|
||||
|
||||
|
||||
#matches is a list of (category,disc_id,title) tuples returned from
|
||||
#FreeDB.query(). If the length of that list is 1, return the first
|
||||
#item. If the length is greater than one, present the user a list of
|
||||
#choices and force him/her to pick the closest match for the CD.
|
||||
#That data can then be sent to FreeDB.read_data()
|
||||
def __select_match__(matches, messenger):
|
||||
if (len(matches) == 1):
|
||||
return matches[0]
|
||||
elif (len(matches) < 1):
|
||||
return None
|
||||
else:
|
||||
messenger.info(_(u"Please Select the Closest Match:"))
|
||||
selected = 0
|
||||
while ((selected < 1) or (selected > len(matches))):
|
||||
for i in range(len(matches)):
|
||||
messenger.info(_(u"%(choice)s) [%(genre)s] %(name)s") % \
|
||||
{"choice": i + 1,
|
||||
"genre": matches[i][0],
|
||||
"name": matches[i][2].decode('utf-8',
|
||||
'replace')})
|
||||
try:
|
||||
messenger.partial_info(_(u"Your Selection [1-%s]:") % \
|
||||
(len(matches)))
|
||||
selected = int(sys.stdin.readline().strip())
|
||||
except ValueError:
|
||||
selected = 0
|
||||
|
||||
return matches[selected - 1]
|
||||
|
||||
|
||||
def __select_default_match__(matches, selection):
|
||||
if (len(matches) < 1):
|
||||
return None
|
||||
else:
|
||||
try:
|
||||
return matches[selection]
|
||||
except IndexError:
|
||||
return matches[0]
|
||||
|
||||
|
||||
def get_xmcd(disc_id, output, freedb_server, freedb_server_port,
|
||||
messenger, default_selection=None):
|
||||
"""Runs through the entire FreeDB querying sequence.
|
||||
|
||||
Fields are as follows:
|
||||
disc_id - a DiscID object
|
||||
output - an open file object for writing
|
||||
freedb_server - a server name string
|
||||
freedb_port - a server port int
|
||||
messenger - a Messenger object
|
||||
default_selection - if given, the default match to choose
|
||||
"""
|
||||
|
||||
try:
|
||||
freedb = FreeDBWeb(freedb_server, freedb_server_port, messenger)
|
||||
freedb.connect()
|
||||
except FreeDBException, msg:
|
||||
#if an exception occurs during the opening,
|
||||
#freedb will auto-close its sockets
|
||||
raise IOError(str(msg))
|
||||
|
||||
try:
|
||||
matches = freedb.query(disc_id)
|
||||
#HANDLE MULTIPLE MATCHES, or NO MATCHES
|
||||
if (len(matches) > 0):
|
||||
if (default_selection is None):
|
||||
(category, idstring, title) = __select_match__(
|
||||
matches, messenger)
|
||||
else:
|
||||
(category, idstring, title) = __select_default_match__(
|
||||
matches, default_selection)
|
||||
|
||||
freedb.read_data(category, idstring, output)
|
||||
output.flush()
|
||||
|
||||
freedb.close()
|
||||
except FreeDBException, msg:
|
||||
#otherwise, close the sockets manually
|
||||
freedb.close()
|
||||
raise IOError(str(msg))
|
||||
|
||||
return len(matches)
|
1765
Melodia/resources/audiotools/__id3__.py
Normal file
1765
Melodia/resources/audiotools/__id3__.py
Normal file
File diff suppressed because it is too large
Load Diff
190
Melodia/resources/audiotools/__id3v1__.py
Normal file
190
Melodia/resources/audiotools/__id3v1__.py
Normal file
@ -0,0 +1,190 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
from audiotools import MetaData, Con, os
|
||||
|
||||
|
||||
class ID3v1Comment(MetaData, list):
|
||||
"""A complete ID3v1 tag."""
|
||||
|
||||
ID3v1 = Con.Struct("id3v1",
|
||||
Con.Const(Con.String("identifier", 3), 'TAG'),
|
||||
Con.String("song_title", 30),
|
||||
Con.String("artist", 30),
|
||||
Con.String("album", 30),
|
||||
Con.String("year", 4),
|
||||
Con.String("comment", 28),
|
||||
Con.Padding(1),
|
||||
Con.Byte("track_number"),
|
||||
Con.Byte("genre"))
|
||||
|
||||
ID3v1_NO_TRACKNUMBER = Con.Struct("id3v1_notracknumber",
|
||||
Con.Const(Con.String("identifier", 3), 'TAG'),
|
||||
Con.String("song_title", 30),
|
||||
Con.String("artist", 30),
|
||||
Con.String("album", 30),
|
||||
Con.String("year", 4),
|
||||
Con.String("comment", 30),
|
||||
Con.Byte("genre"))
|
||||
|
||||
ATTRIBUTES = ['track_name',
|
||||
'artist_name',
|
||||
'album_name',
|
||||
'year',
|
||||
'comment',
|
||||
'track_number']
|
||||
|
||||
@classmethod
|
||||
def read_id3v1_comment(cls, mp3filename):
|
||||
"""Reads a ID3v1Comment data from an MP3 filename.
|
||||
|
||||
Returns a (song title, artist, album, year, comment, track number)
|
||||
tuple.
|
||||
If no ID3v1 tag is present, returns a tuple with those fields blank.
|
||||
All text is in unicode.
|
||||
If track number is -1, the id3v1 comment could not be found.
|
||||
"""
|
||||
|
||||
mp3file = file(mp3filename, "rb")
|
||||
try:
|
||||
mp3file.seek(-128, 2)
|
||||
try:
|
||||
id3v1 = ID3v1Comment.ID3v1.parse(mp3file.read())
|
||||
except Con.adapters.PaddingError:
|
||||
mp3file.seek(-128, 2)
|
||||
id3v1 = ID3v1Comment.ID3v1_NO_TRACKNUMBER.parse(mp3file.read())
|
||||
id3v1.track_number = 0
|
||||
except Con.ConstError:
|
||||
return tuple([u""] * 5 + [-1])
|
||||
|
||||
field_list = (id3v1.song_title,
|
||||
id3v1.artist,
|
||||
id3v1.album,
|
||||
id3v1.year,
|
||||
id3v1.comment)
|
||||
|
||||
return tuple(map(lambda t:
|
||||
t.rstrip('\x00').decode('ascii', 'replace'),
|
||||
field_list) + [id3v1.track_number])
|
||||
finally:
|
||||
mp3file.close()
|
||||
|
||||
@classmethod
|
||||
def build_id3v1(cls, song_title, artist, album, year, comment,
|
||||
track_number):
|
||||
"""Turns fields into a complete ID3v1 binary tag string.
|
||||
|
||||
All fields are unicode except for track_number, an int."""
|
||||
|
||||
def __s_pad__(s, length):
|
||||
if (len(s) < length):
|
||||
return s + chr(0) * (length - len(s))
|
||||
else:
|
||||
s = s[0:length].rstrip()
|
||||
return s + chr(0) * (length - len(s))
|
||||
|
||||
c = Con.Container()
|
||||
c.identifier = 'TAG'
|
||||
c.song_title = __s_pad__(song_title.encode('ascii', 'replace'), 30)
|
||||
c.artist = __s_pad__(artist.encode('ascii', 'replace'), 30)
|
||||
c.album = __s_pad__(album.encode('ascii', 'replace'), 30)
|
||||
c.year = __s_pad__(year.encode('ascii', 'replace'), 4)
|
||||
c.comment = __s_pad__(comment.encode('ascii', 'replace'), 28)
|
||||
c.track_number = int(track_number)
|
||||
c.genre = 0
|
||||
|
||||
return ID3v1Comment.ID3v1.build(c)
|
||||
|
||||
def __init__(self, metadata):
|
||||
"""Initialized with a read_id3v1_comment tuple.
|
||||
|
||||
Fields are (title,artist,album,year,comment,tracknum)"""
|
||||
|
||||
list.__init__(self, metadata)
|
||||
|
||||
@classmethod
|
||||
def supports_images(cls):
|
||||
"""Returns False."""
|
||||
|
||||
return False
|
||||
|
||||
#if an attribute is updated (e.g. self.track_name)
|
||||
#make sure to update the corresponding list item
|
||||
def __setattr__(self, key, value):
|
||||
if (key in self.ATTRIBUTES):
|
||||
if (key != 'track_number'):
|
||||
self[self.ATTRIBUTES.index(key)] = value
|
||||
else:
|
||||
self[self.ATTRIBUTES.index(key)] = int(value)
|
||||
elif (key in MetaData.__FIELDS__):
|
||||
pass
|
||||
else:
|
||||
self.__dict__[key] = value
|
||||
|
||||
def __delattr__(self, key):
|
||||
if (key == 'track_number'):
|
||||
setattr(self, key, 0)
|
||||
elif (key in self.ATTRIBUTES):
|
||||
setattr(self, key, u"")
|
||||
|
||||
def __getattr__(self, key):
|
||||
if (key in self.ATTRIBUTES):
|
||||
return self[self.ATTRIBUTES.index(key)]
|
||||
elif (key in MetaData.__INTEGER_FIELDS__):
|
||||
return 0
|
||||
elif (key in MetaData.__FIELDS__):
|
||||
return u""
|
||||
else:
|
||||
raise AttributeError(key)
|
||||
|
||||
@classmethod
|
||||
def converted(cls, metadata):
|
||||
"""Converts a MetaData object to an ID3v1Comment object."""
|
||||
|
||||
if ((metadata is None) or (isinstance(metadata, ID3v1Comment))):
|
||||
return metadata
|
||||
|
||||
return ID3v1Comment((metadata.track_name,
|
||||
metadata.artist_name,
|
||||
metadata.album_name,
|
||||
metadata.year,
|
||||
metadata.comment,
|
||||
int(metadata.track_number)))
|
||||
|
||||
def __comment_name__(self):
|
||||
return u'ID3v1'
|
||||
|
||||
def __comment_pairs__(self):
|
||||
return zip(('Title', 'Artist', 'Album', 'Year', 'Comment', 'Tracknum'),
|
||||
self)
|
||||
|
||||
def build_tag(self):
|
||||
"""Returns a binary string of this tag's data."""
|
||||
|
||||
return self.build_id3v1(self.track_name,
|
||||
self.artist_name,
|
||||
self.album_name,
|
||||
self.year,
|
||||
self.comment,
|
||||
self.track_number)
|
||||
|
||||
def images(self):
|
||||
"""Returns an empty list of Image objects."""
|
||||
|
||||
return []
|
538
Melodia/resources/audiotools/__image__.py
Normal file
538
Melodia/resources/audiotools/__image__.py
Normal file
@ -0,0 +1,538 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
from audiotools import Con
|
||||
import imghdr
|
||||
import cStringIO
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
|
||||
def __jpeg__(h, f):
|
||||
if (h[0:3] == "FFD8FF".decode('hex')):
|
||||
return 'jpeg'
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
imghdr.tests.append(__jpeg__)
|
||||
|
||||
|
||||
def image_metrics(file_data):
|
||||
"""Returns an ImageMetrics subclass from a string of file data.
|
||||
|
||||
Raises InvalidImage if there is an error parsing the file
|
||||
or its type is unknown."""
|
||||
|
||||
header = imghdr.what(None, file_data)
|
||||
|
||||
file = cStringIO.StringIO(file_data)
|
||||
try:
|
||||
if (header == 'jpeg'):
|
||||
return __JPEG__.parse(file)
|
||||
elif (header == 'png'):
|
||||
return __PNG__.parse(file)
|
||||
elif (header == 'gif'):
|
||||
return __GIF__.parse(file)
|
||||
elif (header == 'bmp'):
|
||||
return __BMP__.parse(file)
|
||||
elif (header == 'tiff'):
|
||||
return __TIFF__.parse(file)
|
||||
else:
|
||||
raise InvalidImage(_(u'Unknown image type'))
|
||||
finally:
|
||||
file.close()
|
||||
|
||||
|
||||
#######################
|
||||
#JPEG
|
||||
#######################
|
||||
|
||||
|
||||
class ImageMetrics:
|
||||
"""A container for image data."""
|
||||
|
||||
def __init__(self, width, height, bits_per_pixel, color_count, mime_type):
|
||||
"""Fields are as follows:
|
||||
|
||||
width - image width as an integer number of pixels
|
||||
height - image height as an integer number of pixels
|
||||
bits_per_pixel - the number of bits per pixel as an integer
|
||||
color_count - for palette-based images, the total number of colors
|
||||
mime_type - the image's MIME type, as a string
|
||||
|
||||
All of the ImageMetrics subclasses implement these fields.
|
||||
In addition, they all implement a parse() classmethod
|
||||
used to parse binary string data and return something
|
||||
ImageMetrics compatible.
|
||||
"""
|
||||
|
||||
self.width = width
|
||||
self.height = height
|
||||
self.bits_per_pixel = bits_per_pixel
|
||||
self.color_count = color_count
|
||||
self.mime_type = mime_type
|
||||
|
||||
def __repr__(self):
|
||||
return "ImageMetrics(%s,%s,%s,%s,%s)" % \
|
||||
(repr(self.width),
|
||||
repr(self.height),
|
||||
repr(self.bits_per_pixel),
|
||||
repr(self.color_count),
|
||||
repr(self.mime_type))
|
||||
|
||||
|
||||
class InvalidImage(Exception):
|
||||
"""Raised if an image cannot be parsed correctly."""
|
||||
|
||||
def __init__(self, err):
|
||||
self.err = unicode(err)
|
||||
|
||||
def __unicode__(self):
|
||||
return self.err
|
||||
|
||||
|
||||
class InvalidJPEG(InvalidImage):
|
||||
"""Raised if a JPEG cannot be parsed correctly."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class __JPEG__(ImageMetrics):
|
||||
SEGMENT_HEADER = Con.Struct('segment_header',
|
||||
Con.Const(Con.Byte('header'), 0xFF),
|
||||
Con.Byte('type'),
|
||||
Con.If(
|
||||
lambda ctx: ctx['type'] not in (0xD8, 0xD9),
|
||||
Con.UBInt16('length')))
|
||||
|
||||
APP0 = Con.Struct('JFIF_segment_marker',
|
||||
Con.String('identifier', 5),
|
||||
Con.Byte('major_version'),
|
||||
Con.Byte('minor_version'),
|
||||
Con.Byte('density_units'),
|
||||
Con.UBInt16('x_density'),
|
||||
Con.UBInt16('y_density'),
|
||||
Con.Byte('thumbnail_width'),
|
||||
Con.Byte('thumbnail_height'))
|
||||
|
||||
SOF = Con.Struct('start_of_frame',
|
||||
Con.Byte('data_precision'),
|
||||
Con.UBInt16('image_height'),
|
||||
Con.UBInt16('image_width'),
|
||||
Con.Byte('components'))
|
||||
|
||||
def __init__(self, width, height, bits_per_pixel):
|
||||
ImageMetrics.__init__(self, width, height, bits_per_pixel,
|
||||
0, u'image/jpeg')
|
||||
|
||||
@classmethod
|
||||
def parse(cls, file):
|
||||
try:
|
||||
header = cls.SEGMENT_HEADER.parse_stream(file)
|
||||
if (header.type != 0xD8):
|
||||
raise InvalidJPEG(_(u'Invalid JPEG header'))
|
||||
|
||||
segment = cls.SEGMENT_HEADER.parse_stream(file)
|
||||
while (segment.type != 0xD9):
|
||||
if (segment.type == 0xDA):
|
||||
break
|
||||
|
||||
if (segment.type in (0xC0, 0xC1, 0xC2, 0xC3,
|
||||
0xC5, 0XC5, 0xC6, 0xC7,
|
||||
0xC9, 0xCA, 0xCB, 0xCD,
|
||||
0xCE, 0xCF)): # start of frame
|
||||
segment_data = cStringIO.StringIO(
|
||||
file.read(segment.length - 2))
|
||||
frame0 = cls.SOF.parse_stream(segment_data)
|
||||
segment_data.close()
|
||||
|
||||
return __JPEG__(width=frame0.image_width,
|
||||
height=frame0.image_height,
|
||||
bits_per_pixel=(frame0.data_precision *
|
||||
frame0.components))
|
||||
else:
|
||||
file.seek(segment.length - 2, 1)
|
||||
|
||||
segment = cls.SEGMENT_HEADER.parse_stream(file)
|
||||
|
||||
raise InvalidJPEG(_(u'Start of frame not found'))
|
||||
except Con.ConstError:
|
||||
raise InvalidJPEG(_(u"Invalid JPEG segment marker at 0x%X") % \
|
||||
(file.tell()))
|
||||
|
||||
|
||||
#######################
|
||||
#PNG
|
||||
#######################
|
||||
|
||||
|
||||
class InvalidPNG(InvalidImage):
|
||||
"""Raised if a PNG cannot be parsed correctly."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class __PNG__(ImageMetrics):
|
||||
HEADER = Con.Const(Con.String('header', 8),
|
||||
'89504e470d0a1a0a'.decode('hex'))
|
||||
CHUNK_HEADER = Con.Struct('chunk',
|
||||
Con.UBInt32('length'),
|
||||
Con.String('type', 4))
|
||||
CHUNK_FOOTER = Con.Struct('crc32',
|
||||
Con.UBInt32('crc'))
|
||||
|
||||
IHDR = Con.Struct('IHDR',
|
||||
Con.UBInt32('width'),
|
||||
Con.UBInt32('height'),
|
||||
Con.Byte('bit_depth'),
|
||||
Con.Byte('color_type'),
|
||||
Con.Byte('compression_method'),
|
||||
Con.Byte('filter_method'),
|
||||
Con.Byte('interlace_method'))
|
||||
|
||||
def __init__(self, width, height, bits_per_pixel, color_count):
|
||||
ImageMetrics.__init__(self, width, height, bits_per_pixel, color_count,
|
||||
u'image/png')
|
||||
|
||||
@classmethod
|
||||
def parse(cls, file):
|
||||
ihdr = None
|
||||
plte = None
|
||||
|
||||
try:
|
||||
header = cls.HEADER.parse_stream(file)
|
||||
|
||||
chunk_header = cls.CHUNK_HEADER.parse_stream(file)
|
||||
data = file.read(chunk_header.length)
|
||||
chunk_footer = cls.CHUNK_FOOTER.parse_stream(file)
|
||||
while (chunk_header.type != 'IEND'):
|
||||
if (chunk_header.type == 'IHDR'):
|
||||
ihdr = cls.IHDR.parse(data)
|
||||
elif (chunk_header.type == 'PLTE'):
|
||||
plte = data
|
||||
|
||||
chunk_header = cls.CHUNK_HEADER.parse_stream(file)
|
||||
data = file.read(chunk_header.length)
|
||||
chunk_footer = cls.CHUNK_FOOTER.parse_stream(file)
|
||||
|
||||
if (ihdr.color_type == 0): # grayscale
|
||||
bits_per_pixel = ihdr.bit_depth
|
||||
color_count = 0
|
||||
elif (ihdr.color_type == 2): # RGB
|
||||
bits_per_pixel = ihdr.bit_depth * 3
|
||||
color_count = 0
|
||||
elif (ihdr.color_type == 3): # palette
|
||||
bits_per_pixel = 8
|
||||
if ((len(plte) % 3) != 0):
|
||||
raise InvalidPNG(_(u'Invalid PLTE chunk length'))
|
||||
else:
|
||||
color_count = len(plte) / 3
|
||||
elif (ihdr.color_type == 4): # grayscale + alpha
|
||||
bits_per_pixel = ihdr.bit_depth * 2
|
||||
color_count = 0
|
||||
elif (ihdr.color_type == 6): # RGB + alpha
|
||||
bits_per_pixel = ihdr.bit_depth * 4
|
||||
color_count = 0
|
||||
|
||||
return __PNG__(ihdr.width, ihdr.height, bits_per_pixel,
|
||||
color_count)
|
||||
except Con.ConstError:
|
||||
raise InvalidPNG(_(u'Invalid PNG'))
|
||||
|
||||
|
||||
#######################
|
||||
#BMP
|
||||
#######################
|
||||
|
||||
|
||||
class InvalidBMP(InvalidImage):
|
||||
"""Raised if a BMP cannot be parsed correctly."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class __BMP__(ImageMetrics):
|
||||
HEADER = Con.Struct('bmp_header',
|
||||
Con.Const(Con.String('magic_number', 2), 'BM'),
|
||||
Con.ULInt32('file_size'),
|
||||
Con.ULInt16('reserved1'),
|
||||
Con.ULInt16('reserved2'),
|
||||
Con.ULInt32('bitmap_data_offset'))
|
||||
|
||||
INFORMATION = Con.Struct('bmp_information',
|
||||
Con.ULInt32('header_size'),
|
||||
Con.ULInt32('width'),
|
||||
Con.ULInt32('height'),
|
||||
Con.ULInt16('color_planes'),
|
||||
Con.ULInt16('bits_per_pixel'),
|
||||
Con.ULInt32('compression_method'),
|
||||
Con.ULInt32('image_size'),
|
||||
Con.ULInt32('horizontal_resolution'),
|
||||
Con.ULInt32('vertical_resolution'),
|
||||
Con.ULInt32('colors_used'),
|
||||
Con.ULInt32('important_colors_used'))
|
||||
|
||||
def __init__(self, width, height, bits_per_pixel, color_count):
|
||||
ImageMetrics.__init__(self, width, height, bits_per_pixel, color_count,
|
||||
u'image/x-ms-bmp')
|
||||
|
||||
@classmethod
|
||||
def parse(cls, file):
|
||||
try:
|
||||
header = cls.HEADER.parse_stream(file)
|
||||
information = cls.INFORMATION.parse_stream(file)
|
||||
|
||||
return __BMP__(information.width, information.height,
|
||||
information.bits_per_pixel,
|
||||
information.colors_used)
|
||||
|
||||
except Con.ConstError:
|
||||
raise InvalidBMP(_(u'Invalid BMP'))
|
||||
|
||||
|
||||
#######################
|
||||
#GIF
|
||||
#######################
|
||||
|
||||
|
||||
class InvalidGIF(InvalidImage):
|
||||
"""Raised if a GIF cannot be parsed correctly."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class __GIF__(ImageMetrics):
|
||||
HEADER = Con.Struct('header',
|
||||
Con.Const(Con.String('gif', 3), 'GIF'),
|
||||
Con.String('version', 3))
|
||||
|
||||
SCREEN_DESCRIPTOR = Con.Struct('logical_screen_descriptor',
|
||||
Con.ULInt16('width'),
|
||||
Con.ULInt16('height'),
|
||||
Con.Embed(
|
||||
Con.BitStruct('packed_fields',
|
||||
Con.Flag('global_color_table'),
|
||||
Con.Bits('color_resolution', 3),
|
||||
Con.Flag('sort'),
|
||||
Con.Bits('global_color_table_size', 3))),
|
||||
Con.Byte('background_color_index'),
|
||||
Con.Byte('pixel_aspect_ratio'))
|
||||
|
||||
def __init__(self, width, height, color_count):
|
||||
ImageMetrics.__init__(self, width, height, 8, color_count,
|
||||
u'image/gif')
|
||||
|
||||
@classmethod
|
||||
def parse(cls, file):
|
||||
try:
|
||||
header = cls.HEADER.parse_stream(file)
|
||||
descriptor = cls.SCREEN_DESCRIPTOR.parse_stream(file)
|
||||
|
||||
return __GIF__(descriptor.width, descriptor.height,
|
||||
2 ** (descriptor.global_color_table_size + 1))
|
||||
except Con.ConstError:
|
||||
raise InvalidGIF(_(u'Invalid GIF'))
|
||||
|
||||
|
||||
#######################
|
||||
#TIFF
|
||||
#######################
|
||||
|
||||
|
||||
class InvalidTIFF(InvalidImage):
|
||||
"""Raised if a TIFF cannot be parsed correctly."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class __TIFF__(ImageMetrics):
|
||||
HEADER = Con.Struct('header',
|
||||
Con.String('byte_order', 2),
|
||||
Con.Switch('order',
|
||||
lambda ctx: ctx['byte_order'],
|
||||
{"II": Con.Embed(
|
||||
Con.Struct('little_endian',
|
||||
Con.Const(Con.ULInt16('version'), 42),
|
||||
Con.ULInt32('offset'))),
|
||||
"MM": Con.Embed(
|
||||
Con.Struct('big_endian',
|
||||
Con.Const(Con.UBInt16('version'), 42),
|
||||
Con.UBInt32('offset')))}))
|
||||
|
||||
L_IFD = Con.Struct('ifd',
|
||||
Con.PrefixedArray(
|
||||
length_field=Con.ULInt16('length'),
|
||||
subcon=Con.Struct('tags',
|
||||
Con.ULInt16('id'),
|
||||
Con.ULInt16('type'),
|
||||
Con.ULInt32('count'),
|
||||
Con.ULInt32('offset'))),
|
||||
Con.ULInt32('next'))
|
||||
|
||||
B_IFD = Con.Struct('ifd',
|
||||
Con.PrefixedArray(
|
||||
length_field=Con.UBInt16('length'),
|
||||
subcon=Con.Struct('tags',
|
||||
Con.UBInt16('id'),
|
||||
Con.UBInt16('type'),
|
||||
Con.UBInt32('count'),
|
||||
Con.UBInt32('offset'))),
|
||||
Con.UBInt32('next'))
|
||||
|
||||
def __init__(self, width, height, bits_per_pixel, color_count):
|
||||
ImageMetrics.__init__(self, width, height,
|
||||
bits_per_pixel, color_count,
|
||||
u'image/tiff')
|
||||
|
||||
@classmethod
|
||||
def b_tag_value(cls, file, tag):
|
||||
subtype = {1: Con.Byte("data"),
|
||||
2: Con.CString("data"),
|
||||
3: Con.UBInt16("data"),
|
||||
4: Con.UBInt32("data"),
|
||||
5: Con.Struct("data",
|
||||
Con.UBInt32("high"),
|
||||
Con.UBInt32("low"))}[tag.type]
|
||||
|
||||
data = Con.StrictRepeater(tag.count,
|
||||
subtype)
|
||||
if ((tag.type != 2) and (data.sizeof() <= 4)):
|
||||
return tag.offset
|
||||
else:
|
||||
file.seek(tag.offset, 0)
|
||||
return data.parse_stream(file)
|
||||
|
||||
@classmethod
|
||||
def l_tag_value(cls, file, tag):
|
||||
subtype = {1: Con.Byte("data"),
|
||||
2: Con.CString("data"),
|
||||
3: Con.ULInt16("data"),
|
||||
4: Con.ULInt32("data"),
|
||||
5: Con.Struct("data",
|
||||
Con.ULInt32("high"),
|
||||
Con.ULInt32("low"))}[tag.type]
|
||||
|
||||
data = Con.StrictRepeater(tag.count,
|
||||
subtype)
|
||||
if ((tag.type != 2) and (data.sizeof() <= 4)):
|
||||
return tag.offset
|
||||
else:
|
||||
file.seek(tag.offset, 0)
|
||||
return data.parse_stream(file)
|
||||
|
||||
@classmethod
|
||||
def parse(cls, file):
|
||||
width = 0
|
||||
height = 0
|
||||
bits_per_sample = 0
|
||||
color_count = 0
|
||||
|
||||
try:
|
||||
header = cls.HEADER.parse_stream(file)
|
||||
if (header.byte_order == 'II'):
|
||||
IFD = cls.L_IFD
|
||||
tag_value = cls.l_tag_value
|
||||
elif (header.byte_order == 'MM'):
|
||||
IFD = cls.B_IFD
|
||||
tag_value = cls.b_tag_value
|
||||
else:
|
||||
raise InvalidTIFF(_(u'Invalid byte order'))
|
||||
|
||||
file.seek(header.offset, 0)
|
||||
|
||||
ifd = IFD.parse_stream(file)
|
||||
|
||||
while (True):
|
||||
for tag in ifd.tags:
|
||||
if (tag.id == 0x0100):
|
||||
width = tag_value(file, tag)
|
||||
elif (tag.id == 0x0101):
|
||||
height = tag_value(file, tag)
|
||||
elif (tag.id == 0x0102):
|
||||
try:
|
||||
bits_per_sample = sum(tag_value(file, tag))
|
||||
except TypeError:
|
||||
bits_per_sample = tag_value(file, tag)
|
||||
elif (tag.id == 0x0140):
|
||||
color_count = tag.count / 3
|
||||
else:
|
||||
pass
|
||||
|
||||
if (ifd.next == 0x00):
|
||||
break
|
||||
else:
|
||||
file.seek(ifd.next, 0)
|
||||
ifd = IFD.parse_stream(file)
|
||||
|
||||
return __TIFF__(width, height, bits_per_sample, color_count)
|
||||
except Con.ConstError:
|
||||
raise InvalidTIFF(_(u'Invalid TIFF'))
|
||||
|
||||
|
||||
def can_thumbnail():
|
||||
"""Returns True if we have the capability to thumbnail images."""
|
||||
|
||||
try:
|
||||
import Image as PIL_Image
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
|
||||
def thumbnail_formats():
|
||||
"""Returns a list of available thumbnail image formats."""
|
||||
|
||||
import Image as PIL_Image
|
||||
import cStringIO
|
||||
|
||||
#performing a dummy save seeds PIL_Image.SAVE with possible save types
|
||||
PIL_Image.new("RGB", (1, 1)).save(cStringIO.StringIO(), "bmp")
|
||||
|
||||
return PIL_Image.SAVE.keys()
|
||||
|
||||
|
||||
def thumbnail_image(image_data, width, height, format):
|
||||
"""Generates a new, smaller image from a larger one.
|
||||
|
||||
image_data is a binary string.
|
||||
width and height are the requested maximum values.
|
||||
format as a binary string, such as 'JPEG'.
|
||||
"""
|
||||
|
||||
import cStringIO
|
||||
import Image as PIL_Image
|
||||
import ImageFile as PIL_ImageFile
|
||||
|
||||
PIL_ImageFile.MAXBLOCK = 0x100000
|
||||
|
||||
img = PIL_Image.open(cStringIO.StringIO(image_data)).convert('RGB')
|
||||
img.thumbnail((width, height), PIL_Image.ANTIALIAS)
|
||||
output = cStringIO.StringIO()
|
||||
|
||||
if (format.upper() == 'JPEG'):
|
||||
#PIL's default JPEG save quality isn't too great
|
||||
#so it's best to add a couple of optimizing parameters
|
||||
#since this is a common case
|
||||
img.save(output, 'JPEG', quality=90, optimize=True)
|
||||
else:
|
||||
img.save(output, format)
|
||||
|
||||
return output.getvalue()
|
4591
Melodia/resources/audiotools/__init__.py
Normal file
4591
Melodia/resources/audiotools/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
1942
Melodia/resources/audiotools/__m4a__.py
Normal file
1942
Melodia/resources/audiotools/__m4a__.py
Normal file
File diff suppressed because it is too large
Load Diff
387
Melodia/resources/audiotools/__m4a_atoms__.py
Normal file
387
Melodia/resources/audiotools/__m4a_atoms__.py
Normal file
@ -0,0 +1,387 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
from audiotools import Con
|
||||
|
||||
#M4A atoms are typically laid on in the file as follows:
|
||||
# ftyp
|
||||
# mdat
|
||||
# moov/
|
||||
# +mvhd
|
||||
# +iods
|
||||
# +trak/
|
||||
# +-tkhd
|
||||
# +-mdia/
|
||||
# +--mdhd
|
||||
# +--hdlr
|
||||
# +--minf/
|
||||
# +---smhd
|
||||
# +---dinf/
|
||||
# +----dref
|
||||
# +---stbl/
|
||||
# +----stsd
|
||||
# +----stts
|
||||
# +----stsz
|
||||
# +----stsc
|
||||
# +----stco
|
||||
# +----ctts
|
||||
# +udta/
|
||||
# +-meta
|
||||
#
|
||||
#Where atoms ending in / are container atoms and the rest are leaf atoms.
|
||||
#'mdat' is where the file's audio stream is stored
|
||||
#the rest are various bits of metadata
|
||||
|
||||
|
||||
def VersionLength(name):
|
||||
"""A struct for 32 or 64 bit fields, depending on version field."""
|
||||
|
||||
return Con.IfThenElse(name,
|
||||
lambda ctx: ctx["version"] == 0,
|
||||
Con.UBInt32(None),
|
||||
Con.UBInt64(None))
|
||||
|
||||
|
||||
class AtomAdapter(Con.Adapter):
|
||||
"""An adapter which manages a proper size field."""
|
||||
|
||||
def _encode(self, obj, context):
|
||||
obj.size = len(obj.data) + 8
|
||||
return obj
|
||||
|
||||
def _decode(self, obj, context):
|
||||
del(obj.size)
|
||||
return obj
|
||||
|
||||
|
||||
def Atom(name):
|
||||
"""A basic QuickTime atom struct."""
|
||||
|
||||
return AtomAdapter(Con.Struct(
|
||||
name,
|
||||
Con.UBInt32("size"),
|
||||
Con.String("type", 4),
|
||||
Con.String("data", lambda ctx: ctx["size"] - 8)))
|
||||
|
||||
|
||||
class AtomListAdapter(Con.Adapter):
|
||||
"""An adapter for turning an Atom into a list of atoms.
|
||||
|
||||
This works by parsing its data contents with Atom."""
|
||||
|
||||
ATOM_LIST = Con.GreedyRepeater(Atom("atoms"))
|
||||
|
||||
def _encode(self, obj, context):
|
||||
obj.data = self.ATOM_LIST.build(obj.data)
|
||||
return obj
|
||||
|
||||
def _decode(self, obj, context):
|
||||
obj.data = self.ATOM_LIST.parse(obj.data)
|
||||
return obj
|
||||
|
||||
|
||||
def AtomContainer(name):
|
||||
"""An instantiation of AtomListAdapter."""
|
||||
|
||||
return AtomListAdapter(Atom(name))
|
||||
|
||||
|
||||
class AtomWrapper(Con.Struct):
|
||||
"""Wraps around an existing sub_atom and automatically handles headers."""
|
||||
|
||||
def __init__(self, atom_name, sub_atom):
|
||||
Con.Struct.__init__(self, atom_name)
|
||||
self.atom_name = atom_name
|
||||
self.sub_atom = sub_atom
|
||||
self.header = Con.Struct(atom_name,
|
||||
Con.UBInt32("size"),
|
||||
Con.Const(Con.String("type", 4), atom_name))
|
||||
|
||||
def _parse(self, stream, context):
|
||||
header = self.header.parse_stream(stream)
|
||||
return self.sub_atom.parse_stream(stream)
|
||||
|
||||
def _build(self, obj, stream, context):
|
||||
data = self.sub_atom.build(obj)
|
||||
stream.write(self.header.build(Con.Container(type=self.atom_name,
|
||||
size=len(data) + 8)))
|
||||
stream.write(data)
|
||||
|
||||
def _sizeof(self, context):
|
||||
return self.sub_atom.sizeof(context) + 8
|
||||
|
||||
|
||||
ATOM_FTYP = Con.Struct(
|
||||
"ftyp",
|
||||
Con.String("major_brand", 4),
|
||||
Con.UBInt32("major_brand_version"),
|
||||
Con.GreedyRepeater(Con.String("compatible_brands", 4)))
|
||||
|
||||
ATOM_MVHD = Con.Struct(
|
||||
"mvhd",
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
VersionLength("created_mac_UTC_date"),
|
||||
VersionLength("modified_mac_UTC_date"),
|
||||
Con.UBInt32("time_scale"),
|
||||
VersionLength("duration"),
|
||||
Con.UBInt32("playback_speed"),
|
||||
Con.UBInt16("user_volume"),
|
||||
Con.Padding(10),
|
||||
Con.Struct("windows",
|
||||
Con.UBInt32("geometry_matrix_a"),
|
||||
Con.UBInt32("geometry_matrix_b"),
|
||||
Con.UBInt32("geometry_matrix_u"),
|
||||
Con.UBInt32("geometry_matrix_c"),
|
||||
Con.UBInt32("geometry_matrix_d"),
|
||||
Con.UBInt32("geometry_matrix_v"),
|
||||
Con.UBInt32("geometry_matrix_x"),
|
||||
Con.UBInt32("geometry_matrix_y"),
|
||||
Con.UBInt32("geometry_matrix_w")),
|
||||
Con.UBInt64("quicktime_preview"),
|
||||
Con.UBInt32("quicktime_still_poster"),
|
||||
Con.UBInt64("quicktime_selection_time"),
|
||||
Con.UBInt32("quicktime_current_time"),
|
||||
Con.UBInt32("next_track_id"))
|
||||
|
||||
ATOM_IODS = Con.Struct(
|
||||
"iods",
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.Byte("type_tag"),
|
||||
Con.Switch("descriptor",
|
||||
lambda ctx: ctx.type_tag,
|
||||
{0x10: Con.Struct(
|
||||
None,
|
||||
Con.StrictRepeater(3, Con.Byte("extended_descriptor_type")),
|
||||
Con.Byte("descriptor_type_length"),
|
||||
Con.UBInt16("OD_ID"),
|
||||
Con.Byte("OD_profile"),
|
||||
Con.Byte("scene_profile"),
|
||||
Con.Byte("audio_profile"),
|
||||
Con.Byte("video_profile"),
|
||||
Con.Byte("graphics_profile")),
|
||||
0x0E: Con.Struct(
|
||||
None,
|
||||
Con.StrictRepeater(3, Con.Byte("extended_descriptor_type")),
|
||||
Con.Byte("descriptor_type_length"),
|
||||
Con.String("track_id", 4))}))
|
||||
|
||||
ATOM_TKHD = Con.Struct(
|
||||
"tkhd",
|
||||
Con.Byte("version"),
|
||||
Con.BitStruct("flags",
|
||||
Con.Padding(20),
|
||||
Con.Flag("TrackInPoster"),
|
||||
Con.Flag("TrackInPreview"),
|
||||
Con.Flag("TrackInMovie"),
|
||||
Con.Flag("TrackEnabled")),
|
||||
VersionLength("created_mac_UTC_date"),
|
||||
VersionLength("modified_mac_UTC_date"),
|
||||
Con.UBInt32("track_id"),
|
||||
Con.Padding(4),
|
||||
VersionLength("duration"),
|
||||
Con.Padding(8),
|
||||
Con.UBInt16("video_layer"),
|
||||
Con.UBInt16("quicktime_alternate"),
|
||||
Con.UBInt16("volume"),
|
||||
Con.Padding(2),
|
||||
Con.Struct("video",
|
||||
Con.UBInt32("geometry_matrix_a"),
|
||||
Con.UBInt32("geometry_matrix_b"),
|
||||
Con.UBInt32("geometry_matrix_u"),
|
||||
Con.UBInt32("geometry_matrix_c"),
|
||||
Con.UBInt32("geometry_matrix_d"),
|
||||
Con.UBInt32("geometry_matrix_v"),
|
||||
Con.UBInt32("geometry_matrix_x"),
|
||||
Con.UBInt32("geometry_matrix_y"),
|
||||
Con.UBInt32("geometry_matrix_w")),
|
||||
Con.UBInt32("video_width"),
|
||||
Con.UBInt32("video_height"))
|
||||
|
||||
ATOM_MDHD = Con.Struct(
|
||||
"mdhd",
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
VersionLength("created_mac_UTC_date"),
|
||||
VersionLength("modified_mac_UTC_date"),
|
||||
Con.UBInt32("time_scale"),
|
||||
VersionLength("duration"),
|
||||
Con.BitStruct("languages",
|
||||
Con.Padding(1),
|
||||
Con.StrictRepeater(3,
|
||||
Con.Bits("language", 5))),
|
||||
Con.UBInt16("quicktime_quality"))
|
||||
|
||||
|
||||
ATOM_HDLR = Con.Struct(
|
||||
"hdlr",
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.String("quicktime_type", 4),
|
||||
Con.String("subtype", 4),
|
||||
Con.String("quicktime_manufacturer", 4),
|
||||
Con.UBInt32("quicktime_component_reserved_flags"),
|
||||
Con.UBInt32("quicktime_component_reserved_flags_mask"),
|
||||
Con.PascalString("component_name"),
|
||||
Con.Padding(1))
|
||||
|
||||
ATOM_SMHD = Con.Struct(
|
||||
'smhd',
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.String("audio_balance", 2),
|
||||
Con.Padding(2))
|
||||
|
||||
ATOM_DREF = Con.Struct(
|
||||
'dref',
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.PrefixedArray(
|
||||
length_field=Con.UBInt32("num_references"),
|
||||
subcon=Atom("references")))
|
||||
|
||||
|
||||
ATOM_STSD = Con.Struct(
|
||||
'stsd',
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.PrefixedArray(
|
||||
length_field=Con.UBInt32("num_descriptions"),
|
||||
subcon=Atom("descriptions")))
|
||||
|
||||
ATOM_MP4A = Con.Struct(
|
||||
"mp4a",
|
||||
Con.Padding(6),
|
||||
Con.UBInt16("reference_index"),
|
||||
Con.UBInt16("quicktime_audio_encoding_version"),
|
||||
Con.UBInt16("quicktime_audio_encoding_revision"),
|
||||
Con.String("quicktime_audio_encoding_vendor", 4),
|
||||
Con.UBInt16("channels"),
|
||||
Con.UBInt16("sample_size"),
|
||||
Con.UBInt16("audio_compression_id"),
|
||||
Con.UBInt16("quicktime_audio_packet_size"),
|
||||
Con.String("sample_rate", 4))
|
||||
|
||||
#out of all this mess, the only interesting bits are the _bit_rate fields
|
||||
#and (maybe) the buffer_size
|
||||
#everything else is a constant of some kind as far as I can tell
|
||||
ATOM_ESDS = Con.Struct(
|
||||
"esds",
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.Byte("ES_descriptor_type"),
|
||||
Con.StrictRepeater(
|
||||
3, Con.Byte("extended_descriptor_type_tag")),
|
||||
Con.Byte("descriptor_type_length"),
|
||||
Con.UBInt16("ES_ID"),
|
||||
Con.Byte("stream_priority"),
|
||||
Con.Byte("decoder_config_descriptor_type"),
|
||||
Con.StrictRepeater(
|
||||
3, Con.Byte("extended_descriptor_type_tag2")),
|
||||
Con.Byte("descriptor_type_length2"),
|
||||
Con.Byte("object_ID"),
|
||||
Con.Embed(
|
||||
Con.BitStruct(None, Con.Bits("stream_type", 6),
|
||||
Con.Flag("upstream_flag"),
|
||||
Con.Flag("reserved_flag"),
|
||||
Con.Bits("buffer_size", 24))),
|
||||
Con.UBInt32("maximum_bit_rate"),
|
||||
Con.UBInt32("average_bit_rate"),
|
||||
Con.Byte('decoder_specific_descriptor_type3'),
|
||||
Con.StrictRepeater(
|
||||
3, Con.Byte("extended_descriptor_type_tag2")),
|
||||
Con.PrefixedArray(
|
||||
length_field=Con.Byte("ES_header_length"),
|
||||
subcon=Con.Byte("ES_header_start_codes")),
|
||||
Con.Byte("SL_config_descriptor_type"),
|
||||
Con.StrictRepeater(
|
||||
3, Con.Byte("extended_descriptor_type_tag3")),
|
||||
Con.Byte("descriptor_type_length3"),
|
||||
Con.Byte("SL_value"))
|
||||
|
||||
|
||||
ATOM_STTS = Con.Struct(
|
||||
'stts',
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.PrefixedArray(length_field=Con.UBInt32("total_counts"),
|
||||
subcon=Con.Struct("frame_size_counts",
|
||||
Con.UBInt32("frame_count"),
|
||||
Con.UBInt32("duration"))))
|
||||
|
||||
|
||||
ATOM_STSZ = Con.Struct(
|
||||
'stsz',
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.UBInt32("block_byte_size"),
|
||||
Con.PrefixedArray(length_field=Con.UBInt32("total_sizes"),
|
||||
subcon=Con.UBInt32("block_byte_sizes")))
|
||||
|
||||
|
||||
ATOM_STSC = Con.Struct(
|
||||
'stsc',
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.PrefixedArray(
|
||||
length_field=Con.UBInt32("entry_count"),
|
||||
subcon=Con.Struct("block",
|
||||
Con.UBInt32("first_chunk"),
|
||||
Con.UBInt32("samples_per_chunk"),
|
||||
Con.UBInt32("sample_description_index"))))
|
||||
|
||||
ATOM_STCO = Con.Struct(
|
||||
'stco',
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.PrefixedArray(
|
||||
length_field=Con.UBInt32("total_offsets"),
|
||||
subcon=Con.UBInt32("offset")))
|
||||
|
||||
ATOM_CTTS = Con.Struct(
|
||||
'ctts',
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.PrefixedArray(
|
||||
length_field=Con.UBInt32("entry_count"),
|
||||
subcon=Con.Struct("sample",
|
||||
Con.UBInt32("sample_count"),
|
||||
Con.UBInt32("sample_offset"))))
|
||||
|
||||
ATOM_META = Con.Struct(
|
||||
'meta',
|
||||
Con.Byte("version"),
|
||||
Con.String("flags", 3),
|
||||
Con.GreedyRepeater(Atom("atoms")))
|
||||
|
||||
ATOM_ILST = Con.GreedyRepeater(AtomContainer('ilst'))
|
||||
|
||||
ATOM_TRKN = Con.Struct(
|
||||
'trkn',
|
||||
Con.Padding(2),
|
||||
Con.UBInt16('track_number'),
|
||||
Con.UBInt16('total_tracks'),
|
||||
Con.Padding(2))
|
||||
|
||||
ATOM_DISK = Con.Struct(
|
||||
'disk',
|
||||
Con.Padding(2),
|
||||
Con.UBInt16('disk_number'),
|
||||
Con.UBInt16('total_disks'))
|
973
Melodia/resources/audiotools/__mp3__.py
Normal file
973
Melodia/resources/audiotools/__mp3__.py
Normal file
@ -0,0 +1,973 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
from audiotools import (AudioFile, InvalidFile, PCMReader, PCMConverter,
|
||||
Con, transfer_data, transfer_framelist_data,
|
||||
subprocess, BIN, BIG_ENDIAN, ApeTag, ReplayGain,
|
||||
ignore_sigint, open_files, EncodingError,
|
||||
DecodingError, PCMReaderError, ChannelMask,
|
||||
__default_quality__, config)
|
||||
from __id3__ import *
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
|
||||
#######################
|
||||
#MP3
|
||||
#######################
|
||||
|
||||
|
||||
class MPEG_Frame_Header(Con.Adapter):
|
||||
#mpeg_version->sample_rate bits->Hz
|
||||
SAMPLE_RATE = [[11025, 12000, 8000, None],
|
||||
[None, None, None, None],
|
||||
[22050, 24000, 16000, None],
|
||||
[44100, 48000, 32000, None]]
|
||||
|
||||
#(mpeg_version, layer)->bitrate bits->bits per second
|
||||
BIT_RATE = {(3, 3): [0, 32000, 64000, 96000,
|
||||
128000, 160000, 192000, 224000,
|
||||
256000, 288000, 320000, 352000,
|
||||
384000, 416000, 448000, None],
|
||||
(3, 2): [0, 32000, 48000, 56000,
|
||||
64000, 80000, 96000, 112000,
|
||||
128000, 160000, 192000, 224000,
|
||||
256000, 320000, 384000, None],
|
||||
(3, 1): [0, 32000, 40000, 48000,
|
||||
56000, 64000, 80000, 96000,
|
||||
112000, 128000, 160000, 192000,
|
||||
224000, 256000, 320000, None],
|
||||
(2, 3): [0, 32000, 48000, 56000,
|
||||
64000, 80000, 96000, 112000,
|
||||
128000, 144000, 160000, 176000,
|
||||
192000, 224000, 256000, None],
|
||||
(2, 2): [0, 8000, 16000, 24000,
|
||||
32000, 40000, 48000, 56000,
|
||||
64000, 80000, 96000, 112000,
|
||||
128000, 144000, 160000, None]}
|
||||
|
||||
#mpeg_version->Hz->sample_rate bits
|
||||
SAMPLE_RATE_REVERSE = {0: {11025: 0,
|
||||
12000: 1,
|
||||
8000: 2},
|
||||
1: {None: 0},
|
||||
2: {22050: 0,
|
||||
24000: 1,
|
||||
16000: 2,
|
||||
None: 3},
|
||||
3: {44100: 0,
|
||||
48000: 1,
|
||||
32000: 2,
|
||||
None: 3}}
|
||||
|
||||
BIT_RATE_REVERSE = dict([(key, dict([(rate, i) for (i, rate) in
|
||||
enumerate(values)]))
|
||||
for (key, values) in BIT_RATE.items()])
|
||||
|
||||
def __init__(self, name):
|
||||
Con.Adapter.__init__(
|
||||
self,
|
||||
Con.BitStruct("mp3_header",
|
||||
Con.Const(Con.Bits("sync", 11), 0x7FF),
|
||||
Con.Bits("mpeg_version", 2),
|
||||
Con.Bits("layer", 2),
|
||||
Con.Flag("no_crc16", 1),
|
||||
Con.Bits("bitrate", 4),
|
||||
Con.Bits("sample_rate", 2),
|
||||
Con.Bits("pad", 1),
|
||||
Con.Bits("private", 1),
|
||||
Con.Bits("channel", 2),
|
||||
Con.Bits("mode_extension", 2),
|
||||
Con.Flag("copyright", 1),
|
||||
Con.Flag("original", 1),
|
||||
Con.Bits("emphasis", 2)))
|
||||
|
||||
def _encode(self, obj, content):
|
||||
obj.sample_rate = self.SAMPLE_RATE_REVERSE[obj.mpeg_version][
|
||||
obj.sample_rate]
|
||||
obj.bitrate = self.BIT_RATE_REVERSE[(obj.mpeg_version, obj.layer)][
|
||||
obj.bitrate]
|
||||
return obj
|
||||
|
||||
def _decode(self, obj, content):
|
||||
obj.sample_rate = self.SAMPLE_RATE[obj.mpeg_version][obj.sample_rate]
|
||||
obj.channel_count = [2, 2, 2, 1][obj.channel]
|
||||
obj.bitrate = self.BIT_RATE[(obj.mpeg_version, obj.layer)][obj.bitrate]
|
||||
|
||||
if (obj.layer == 3):
|
||||
obj.byte_length = (((12 * obj.bitrate) / obj.sample_rate) +
|
||||
obj.pad) * 4
|
||||
else:
|
||||
obj.byte_length = ((144 * obj.bitrate) / obj.sample_rate) + obj.pad
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def MPEG_crc16(data, total_bits):
|
||||
def crc16_val(value, crc, total_bits):
|
||||
value <<= 8
|
||||
for i in xrange(total_bits):
|
||||
value <<= 1
|
||||
crc <<= 1
|
||||
|
||||
if (((crc ^ value) & 0x10000)):
|
||||
crc ^= 0x8005
|
||||
|
||||
return crc & 0xFFFF
|
||||
|
||||
checksum = 0xFFFF
|
||||
data = map(ord, data)
|
||||
while (total_bits >= 8):
|
||||
checksum = crc16_val(data.pop(0), checksum, 8)
|
||||
total_bits -= 8
|
||||
|
||||
if (total_bits > 0):
|
||||
return crc16_val(data.pop(0), checksum, total_bits)
|
||||
else:
|
||||
return checksum
|
||||
|
||||
|
||||
class InvalidMP3(InvalidFile):
|
||||
"""Raised by invalid files during MP3 initialization."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class MP3Audio(AudioFile):
|
||||
"""An MP3 audio file."""
|
||||
|
||||
SUFFIX = "mp3"
|
||||
NAME = SUFFIX
|
||||
DEFAULT_COMPRESSION = "2"
|
||||
#0 is better quality/lower compression
|
||||
#9 is worse quality/higher compression
|
||||
COMPRESSION_MODES = ("0", "1", "2", "3", "4", "5", "6",
|
||||
"medium", "standard", "extreme", "insane")
|
||||
COMPRESSION_DESCRIPTIONS = {"0": _(u"high quality, larger files, " +
|
||||
u"corresponds to lame's -V0"),
|
||||
"6": _(u"lower quality, smaller files, " +
|
||||
u"corresponds to lame's -V6"),
|
||||
"medium": _(u"corresponds to lame's " +
|
||||
u"--preset medium"),
|
||||
"standard": _(u"corresponds to lame's " +
|
||||
u"--preset standard"),
|
||||
"extreme": _(u"corresponds to lame's " +
|
||||
u"--preset extreme"),
|
||||
"insane": _(u"corresponds to lame's " +
|
||||
u"--preset insane")}
|
||||
BINARIES = ("lame",)
|
||||
REPLAYGAIN_BINARIES = ("mp3gain", )
|
||||
|
||||
#MPEG1, Layer 1
|
||||
#MPEG1, Layer 2,
|
||||
#MPEG1, Layer 3,
|
||||
#MPEG2, Layer 1,
|
||||
#MPEG2, Layer 2,
|
||||
#MPEG2, Layer 3
|
||||
MP3_BITRATE = ((None, None, None, None, None, None),
|
||||
(32, 32, 32, 32, 8, 8),
|
||||
(64, 48, 40, 48, 16, 16),
|
||||
(96, 56, 48, 56, 24, 24),
|
||||
(128, 64, 56, 64, 32, 32),
|
||||
(160, 80, 64, 80, 40, 40),
|
||||
(192, 96, 80, 96, 48, 48),
|
||||
(224, 112, 96, 112, 56, 56),
|
||||
(256, 128, 112, 128, 64, 64),
|
||||
(288, 160, 128, 144, 80, 80),
|
||||
(320, 192, 160, 160, 96, 96),
|
||||
(352, 224, 192, 176, 112, 112),
|
||||
(384, 256, 224, 192, 128, 128),
|
||||
(416, 320, 256, 224, 144, 144),
|
||||
(448, 384, 320, 256, 160, 160))
|
||||
|
||||
#MPEG1, MPEG2, MPEG2.5
|
||||
MP3_SAMPLERATE = ((44100, 22050, 11025),
|
||||
(48000, 24000, 12000),
|
||||
(32000, 16000, 8000))
|
||||
|
||||
MP3_FRAME_HEADER = Con.BitStruct("mp3_header",
|
||||
Con.Const(Con.Bits("sync", 11), 0x7FF),
|
||||
Con.Bits("mpeg_version", 2),
|
||||
Con.Bits("layer", 2),
|
||||
Con.Flag("protection", 1),
|
||||
Con.Bits("bitrate", 4),
|
||||
Con.Bits("sampling_rate", 2),
|
||||
Con.Bits("padding", 1),
|
||||
Con.Bits("private", 1),
|
||||
Con.Bits("channel", 2),
|
||||
Con.Bits("mode_extension", 2),
|
||||
Con.Flag("copyright", 1),
|
||||
Con.Flag("original", 1),
|
||||
Con.Bits("emphasis", 2))
|
||||
|
||||
XING_HEADER = Con.Struct("xing_header",
|
||||
Con.Bytes("header_id", 4),
|
||||
Con.Bytes("flags", 4),
|
||||
Con.UBInt32("num_frames"),
|
||||
Con.UBInt32("bytes"),
|
||||
Con.StrictRepeater(100, Con.Byte("toc_entries")),
|
||||
Con.UBInt32("quality"))
|
||||
|
||||
def __init__(self, filename):
|
||||
"""filename is a plain string."""
|
||||
|
||||
AudioFile.__init__(self, filename)
|
||||
|
||||
try:
|
||||
mp3file = file(filename, "rb")
|
||||
except IOError, msg:
|
||||
raise InvalidMP3(str(msg))
|
||||
|
||||
try:
|
||||
try:
|
||||
MP3Audio.__find_next_mp3_frame__(mp3file)
|
||||
except ValueError:
|
||||
raise InvalidMP3(_(u"MP3 frame not found"))
|
||||
fr = MP3Audio.MP3_FRAME_HEADER.parse(mp3file.read(4))
|
||||
self.__samplerate__ = MP3Audio.__get_mp3_frame_sample_rate__(fr)
|
||||
self.__channels__ = MP3Audio.__get_mp3_frame_channels__(fr)
|
||||
self.__framelength__ = self.__length__()
|
||||
finally:
|
||||
mp3file.close()
|
||||
|
||||
@classmethod
|
||||
def is_type(cls, file):
|
||||
"""Returns True if the given file object describes this format.
|
||||
|
||||
Takes a seekable file pointer rewound to the start of the file."""
|
||||
|
||||
ID3v2Comment.skip(file)
|
||||
|
||||
try:
|
||||
frame = cls.MP3_FRAME_HEADER.parse_stream(file)
|
||||
if ((frame.sync == 0x07FF) and
|
||||
(frame.mpeg_version in (0x03, 0x02, 0x00)) and
|
||||
(frame.layer in (0x01, 0x03))):
|
||||
return True
|
||||
else:
|
||||
#oddly, MP3s sometimes turn up in RIFF containers
|
||||
#this isn't a good idea, but can be supported nonetheless
|
||||
file.seek(-cls.MP3_FRAME_HEADER.sizeof(), 1)
|
||||
header = file.read(12)
|
||||
if ((header[0:4] == 'RIFF') and
|
||||
(header[8:12] == 'RMP3')):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except:
|
||||
return False
|
||||
|
||||
def lossless(self):
|
||||
"""Returns False."""
|
||||
|
||||
return False
|
||||
|
||||
def to_pcm(self):
|
||||
"""Returns a PCMReader object containing the track's PCM data."""
|
||||
|
||||
#if mpg123 is available, use that for decoding
|
||||
if (BIN.can_execute(BIN["mpg123"])):
|
||||
sub = subprocess.Popen([BIN["mpg123"], "-qs", self.filename],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=file(os.devnull, "a"))
|
||||
return PCMReader(sub.stdout,
|
||||
sample_rate=self.sample_rate(),
|
||||
channels=self.channels(),
|
||||
bits_per_sample=16,
|
||||
channel_mask=int(ChannelMask.from_channels(
|
||||
self.channels())),
|
||||
process=sub,
|
||||
big_endian=BIG_ENDIAN)
|
||||
else:
|
||||
#if not, use LAME for decoding
|
||||
if (self.filename.endswith("." + self.SUFFIX)):
|
||||
if (BIG_ENDIAN):
|
||||
endian = ['-x']
|
||||
else:
|
||||
endian = []
|
||||
|
||||
sub = subprocess.Popen([BIN['lame']] + endian + \
|
||||
["--decode", "-t", "--quiet",
|
||||
self.filename, "-"],
|
||||
stdout=subprocess.PIPE)
|
||||
return PCMReader(
|
||||
sub.stdout,
|
||||
sample_rate=self.sample_rate(),
|
||||
channels=self.channels(),
|
||||
bits_per_sample=16,
|
||||
channel_mask=int(self.channel_mask()),
|
||||
process=sub)
|
||||
else:
|
||||
import tempfile
|
||||
from audiotools import TempWaveReader
|
||||
#copy our file to one that ends with .mp3
|
||||
tempmp3 = tempfile.NamedTemporaryFile(suffix='.' + self.SUFFIX)
|
||||
f = open(self.filename, 'rb')
|
||||
transfer_data(f.read, tempmp3.write)
|
||||
f.close()
|
||||
tempmp3.flush()
|
||||
|
||||
#decode the mp3 file to a WAVE file
|
||||
wave = tempfile.NamedTemporaryFile(suffix='.wav')
|
||||
returnval = subprocess.call([BIN['lame'], "--decode",
|
||||
"--quiet",
|
||||
tempmp3.name, wave.name])
|
||||
tempmp3.close()
|
||||
|
||||
if (returnval == 0):
|
||||
#return WAVE file as a stream
|
||||
wave.seek(0, 0)
|
||||
return TempWaveReader(wave)
|
||||
else:
|
||||
return PCMReaderError(
|
||||
error_message=u"lame exited with error",
|
||||
sample_rate=self.sample_rate(),
|
||||
channels=self.channels(),
|
||||
channel_mask=int(self.channel_mask()),
|
||||
bits_per_sample=16)
|
||||
|
||||
@classmethod
|
||||
def __help_output__(cls):
|
||||
import cStringIO
|
||||
help_data = cStringIO.StringIO()
|
||||
sub = subprocess.Popen([BIN['lame'], '--help'],
|
||||
stdout=subprocess.PIPE)
|
||||
transfer_data(sub.stdout.read, help_data.write)
|
||||
sub.wait()
|
||||
return help_data.getvalue()
|
||||
|
||||
@classmethod
|
||||
def __lame_version__(cls):
|
||||
try:
|
||||
version = re.findall(r'version \d+\.\d+',
|
||||
cls.__help_output__())[0]
|
||||
return tuple(map(int, version[len('version '):].split(".")))
|
||||
except IndexError:
|
||||
return (0, 0)
|
||||
|
||||
@classmethod
|
||||
def from_pcm(cls, filename, pcmreader, compression=None):
|
||||
"""Encodes a new file from PCM data.
|
||||
|
||||
Takes a filename string, PCMReader object
|
||||
and optional compression level string.
|
||||
Encodes a new audio file from pcmreader's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new MP3Audio object."""
|
||||
|
||||
import decimal
|
||||
import bisect
|
||||
|
||||
if ((compression is None) or
|
||||
(compression not in cls.COMPRESSION_MODES)):
|
||||
compression = __default_quality__(cls.NAME)
|
||||
|
||||
if ((pcmreader.channels > 2) or
|
||||
(pcmreader.sample_rate not in (32000, 48000, 44100))):
|
||||
pcmreader = PCMConverter(
|
||||
pcmreader,
|
||||
sample_rate=[32000, 32000, 44100, 48000][bisect.bisect(
|
||||
[32000, 44100, 48000], pcmreader.sample_rate)],
|
||||
channels=min(pcmreader.channels, 2),
|
||||
channel_mask=ChannelMask.from_channels(
|
||||
min(pcmreader.channels, 2)),
|
||||
bits_per_sample=16)
|
||||
|
||||
if (pcmreader.channels > 1):
|
||||
mode = "j"
|
||||
else:
|
||||
mode = "m"
|
||||
|
||||
#FIXME - not sure if all LAME versions support "--little-endian"
|
||||
# #LAME 3.98 (and up, presumably) handle the byteswap correctly
|
||||
# #LAME 3.97 always uses -x
|
||||
# if (BIG_ENDIAN or (cls.__lame_version__() < (3,98))):
|
||||
# endian = ['-x']
|
||||
# else:
|
||||
# endian = []
|
||||
|
||||
devnull = file(os.devnull, 'ab')
|
||||
|
||||
if (str(compression) in map(str, range(0, 10))):
|
||||
compression = ["-V" + str(compression)]
|
||||
else:
|
||||
compression = ["--preset", str(compression)]
|
||||
|
||||
sub = subprocess.Popen([
|
||||
BIN['lame'], "--quiet",
|
||||
"-r",
|
||||
"-s", str(decimal.Decimal(pcmreader.sample_rate) / 1000),
|
||||
"--bitwidth", str(pcmreader.bits_per_sample),
|
||||
"--signed", "--little-endian",
|
||||
"-m", mode] + compression + ["-", filename],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=devnull,
|
||||
stderr=devnull,
|
||||
preexec_fn=ignore_sigint)
|
||||
|
||||
try:
|
||||
transfer_framelist_data(pcmreader, sub.stdin.write)
|
||||
except (IOError, ValueError), err:
|
||||
sub.stdin.close()
|
||||
sub.wait()
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(err))
|
||||
except Exception, err:
|
||||
sub.stdin.close()
|
||||
sub.wait()
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
|
||||
try:
|
||||
pcmreader.close()
|
||||
except DecodingError, err:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(err.error_message)
|
||||
sub.stdin.close()
|
||||
|
||||
devnull.close()
|
||||
|
||||
if (sub.wait() == 0):
|
||||
return MP3Audio(filename)
|
||||
else:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(u"error encoding file with lame")
|
||||
|
||||
def bits_per_sample(self):
|
||||
"""Returns an integer number of bits-per-sample this track contains."""
|
||||
|
||||
return 16
|
||||
|
||||
def channels(self):
|
||||
"""Returns an integer number of channels this track contains."""
|
||||
|
||||
return self.__channels__
|
||||
|
||||
def sample_rate(self):
|
||||
"""Returns the rate of the track's audio as an integer number of Hz."""
|
||||
|
||||
return self.__samplerate__
|
||||
|
||||
def get_metadata(self):
|
||||
"""Returns a MetaData object, or None.
|
||||
|
||||
Raises IOError if unable to read the file."""
|
||||
|
||||
f = file(self.filename, "rb")
|
||||
try:
|
||||
if (f.read(3) != "ID3"): # no ID3v2 tag, try ID3v1
|
||||
id3v1 = ID3v1Comment.read_id3v1_comment(self.filename)
|
||||
if (id3v1[-1] == -1): # no ID3v1 either
|
||||
return None
|
||||
else:
|
||||
return ID3v1Comment(id3v1)
|
||||
else:
|
||||
id3v2 = ID3v2Comment.read_id3v2_comment(self.filename)
|
||||
|
||||
id3v1 = ID3v1Comment.read_id3v1_comment(self.filename)
|
||||
if (id3v1[-1] == -1): # only ID3v2, no ID3v1
|
||||
return id3v2
|
||||
else: # both ID3v2 and ID3v1
|
||||
return ID3CommentPair(
|
||||
id3v2,
|
||||
ID3v1Comment(id3v1))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def set_metadata(self, metadata):
|
||||
"""Takes a MetaData object and sets this track's metadata.
|
||||
|
||||
This metadata includes track name, album name, and so on.
|
||||
Raises IOError if unable to write the file."""
|
||||
|
||||
if (metadata is None):
|
||||
return
|
||||
|
||||
if ((not isinstance(metadata, ID3v2Comment)) and
|
||||
(not isinstance(metadata, ID3v1Comment))):
|
||||
DEFAULT_ID3V2 = "id3v2.3"
|
||||
DEFAULT_ID3V1 = "id3v1.1"
|
||||
|
||||
id3v2_class = {"id3v2.2": ID3v22Comment,
|
||||
"id3v2.3": ID3v23Comment,
|
||||
"id3v2.4": ID3v24Comment,
|
||||
"none": None}.get(config.get_default("ID3",
|
||||
"id3v2",
|
||||
DEFAULT_ID3V2),
|
||||
DEFAULT_ID3V2)
|
||||
id3v1_class = {"id3v1.1": ID3v1Comment,
|
||||
"none": None}.get(config.get_default("ID3",
|
||||
"id3v1",
|
||||
DEFAULT_ID3V1))
|
||||
if ((id3v2_class is not None) and (id3v1_class is not None)):
|
||||
metadata = ID3CommentPair.converted(metadata,
|
||||
id3v2_class=id3v2_class,
|
||||
id3v1_class=id3v1_class)
|
||||
elif (id3v2_class is not None):
|
||||
metadata = id3v2_class.converted(metadata)
|
||||
elif (id3v1_class is not None):
|
||||
metadata = id3v1_class.converted(metadata)
|
||||
else:
|
||||
return
|
||||
|
||||
#get the original MP3 data
|
||||
f = file(self.filename, "rb")
|
||||
MP3Audio.__find_mp3_start__(f)
|
||||
data_start = f.tell()
|
||||
MP3Audio.__find_last_mp3_frame__(f)
|
||||
data_end = f.tell()
|
||||
f.seek(data_start, 0)
|
||||
mp3_data = f.read(data_end - data_start)
|
||||
f.close()
|
||||
|
||||
if (isinstance(metadata, ID3CommentPair)):
|
||||
id3v2 = metadata.id3v2.build()
|
||||
id3v1 = metadata.id3v1.build_tag()
|
||||
elif (isinstance(metadata, ID3v2Comment)):
|
||||
id3v2 = metadata.build()
|
||||
id3v1 = ""
|
||||
elif (isinstance(metadata, ID3v1Comment)):
|
||||
id3v2 = ""
|
||||
id3v1 = metadata.build_tag()
|
||||
|
||||
#write id3v2 + data + id3v1 to file
|
||||
f = file(self.filename, "wb")
|
||||
f.write(id3v2)
|
||||
f.write(mp3_data)
|
||||
f.write(id3v1)
|
||||
f.close()
|
||||
|
||||
def delete_metadata(self):
|
||||
"""Deletes the track's MetaData.
|
||||
|
||||
This removes or unsets tags as necessary in order to remove all data.
|
||||
Raises IOError if unable to write the file."""
|
||||
|
||||
#get the original MP3 data
|
||||
f = file(self.filename, "rb")
|
||||
MP3Audio.__find_mp3_start__(f)
|
||||
data_start = f.tell()
|
||||
MP3Audio.__find_last_mp3_frame__(f)
|
||||
data_end = f.tell()
|
||||
f.seek(data_start, 0)
|
||||
mp3_data = f.read(data_end - data_start)
|
||||
f.close()
|
||||
|
||||
#write data to file
|
||||
f = file(self.filename, "wb")
|
||||
f.write(mp3_data)
|
||||
f.close()
|
||||
|
||||
#places mp3file at the position of the next MP3 frame's start
|
||||
@classmethod
|
||||
def __find_next_mp3_frame__(cls, mp3file):
|
||||
#if we're starting at an ID3v2 header, skip it to save a bunch of time
|
||||
ID3v2Comment.skip(mp3file)
|
||||
|
||||
#then find the next mp3 frame
|
||||
(b1, b2) = mp3file.read(2)
|
||||
while ((b1 != chr(0xFF)) or ((ord(b2) & 0xE0) != 0xE0)):
|
||||
mp3file.seek(-1, 1)
|
||||
(b1, b2) = mp3file.read(2)
|
||||
mp3file.seek(-2, 1)
|
||||
|
||||
#places mp3file at the position of the MP3 file's start
|
||||
#either at the next frame (most commonly)
|
||||
#or at the "RIFF????RMP3" header
|
||||
@classmethod
|
||||
def __find_mp3_start__(cls, mp3file):
|
||||
#if we're starting at an ID3v2 header, skip it to save a bunch of time
|
||||
ID3v2Comment.skip(mp3file)
|
||||
|
||||
while (True):
|
||||
byte = mp3file.read(1)
|
||||
while ((byte != chr(0xFF)) and (byte != 'R') and (len(byte) > 0)):
|
||||
byte = mp3file.read(1)
|
||||
|
||||
if (byte == chr(0xFF)): # possibly a frame sync
|
||||
mp3file.seek(-1, 1)
|
||||
try:
|
||||
header = cls.MP3_FRAME_HEADER.parse_stream(mp3file)
|
||||
if ((header.sync == 0x07FF) and
|
||||
(header.mpeg_version in (0x03, 0x02, 0x00)) and
|
||||
(header.layer in (0x01, 0x02, 0x03))):
|
||||
mp3file.seek(-4, 1)
|
||||
return
|
||||
else:
|
||||
mp3file.seek(-3, 1)
|
||||
except:
|
||||
continue
|
||||
elif (byte == 'R'): # possibly a 'RIFF????RMP3' header
|
||||
header = mp3file.read(11)
|
||||
if ((header[0:3] == 'IFF') and
|
||||
(header[7:11] == 'RMP3')):
|
||||
mp3file.seek(-12, 1)
|
||||
return
|
||||
else:
|
||||
mp3file.seek(-11, 1)
|
||||
elif (len(byte) == 0): # we've run out of MP3 file
|
||||
return
|
||||
|
||||
#places mp3file at the position of the last MP3 frame's end
|
||||
#(either the last byte in the file or just before the ID3v1 tag)
|
||||
#this may not be strictly accurate if ReplayGain data is present,
|
||||
#since APEv2 tags came before the ID3v1 tag,
|
||||
#but we're not planning to change that tag anyway
|
||||
@classmethod
|
||||
def __find_last_mp3_frame__(cls, mp3file):
|
||||
mp3file.seek(-128, 2)
|
||||
if (mp3file.read(3) == 'TAG'):
|
||||
mp3file.seek(-128, 2)
|
||||
return
|
||||
else:
|
||||
mp3file.seek(0, 2)
|
||||
return
|
||||
|
||||
#header is a Construct parsed from 4 bytes sent to MP3_FRAME_HEADER
|
||||
#returns the total length of the frame, including the header
|
||||
#(subtract 4 when doing a seek or read to the next one)
|
||||
@classmethod
|
||||
def __mp3_frame_length__(cls, header):
|
||||
layer = 4 - header.layer # layer 1, 2 or 3
|
||||
|
||||
bit_rate = MP3Audio.__get_mp3_frame_bitrate__(header)
|
||||
if (bit_rate is None):
|
||||
raise InvalidMP3(_(u"Invalid bit rate"))
|
||||
|
||||
sample_rate = MP3Audio.__get_mp3_frame_sample_rate__(header)
|
||||
|
||||
if (layer == 1):
|
||||
return (12 * (bit_rate * 1000) / sample_rate + header.padding) * 4
|
||||
else:
|
||||
return 144 * (bit_rate * 1000) / sample_rate + header.padding
|
||||
|
||||
#takes a parsed MP3_FRAME_HEADER
|
||||
#returns the mp3's sample rate based on that information
|
||||
#(typically 44100)
|
||||
@classmethod
|
||||
def __get_mp3_frame_sample_rate__(cls, frame):
|
||||
try:
|
||||
if (frame.mpeg_version == 0x00): # MPEG 2.5
|
||||
return MP3Audio.MP3_SAMPLERATE[frame.sampling_rate][2]
|
||||
elif (frame.mpeg_version == 0x02): # MPEG 2
|
||||
return MP3Audio.MP3_SAMPLERATE[frame.sampling_rate][1]
|
||||
else: # MPEG 1
|
||||
return MP3Audio.MP3_SAMPLERATE[frame.sampling_rate][0]
|
||||
except IndexError:
|
||||
raise InvalidMP3(_(u"Invalid sampling rate"))
|
||||
|
||||
@classmethod
|
||||
def __get_mp3_frame_channels__(cls, frame):
|
||||
if (frame.channel == 0x03):
|
||||
return 1
|
||||
else:
|
||||
return 2
|
||||
|
||||
@classmethod
|
||||
def __get_mp3_frame_bitrate__(cls, frame):
|
||||
layer = 4 - frame.layer # layer 1, 2 or 3
|
||||
|
||||
try:
|
||||
if (frame.mpeg_version == 0x00): # MPEG 2.5
|
||||
return MP3Audio.MP3_BITRATE[frame.bitrate][layer + 2]
|
||||
elif (frame.mpeg_version == 0x02): # MPEG 2
|
||||
return MP3Audio.MP3_BITRATE[frame.bitrate][layer + 2]
|
||||
elif (frame.mpeg_version == 0x03): # MPEG 1
|
||||
return MP3Audio.MP3_BITRATE[frame.bitrate][layer - 1]
|
||||
else:
|
||||
return 0
|
||||
except IndexError:
|
||||
raise InvalidMP3(_(u"Invalid bit rate"))
|
||||
|
||||
def cd_frames(self):
|
||||
"""Returns the total length of the track in CD frames.
|
||||
|
||||
Each CD frame is 1/75th of a second."""
|
||||
|
||||
#calculate length at create-time so that we can
|
||||
#throw InvalidMP3 as soon as possible
|
||||
return self.__framelength__
|
||||
|
||||
#returns the length of this file in CD frame
|
||||
#raises InvalidMP3 if any portion of the frame is invalid
|
||||
def __length__(self):
|
||||
mp3file = file(self.filename, "rb")
|
||||
|
||||
try:
|
||||
MP3Audio.__find_next_mp3_frame__(mp3file)
|
||||
|
||||
start_position = mp3file.tell()
|
||||
|
||||
fr = MP3Audio.MP3_FRAME_HEADER.parse(mp3file.read(4))
|
||||
|
||||
first_frame = mp3file.read(MP3Audio.__mp3_frame_length__(fr) - 4)
|
||||
|
||||
sample_rate = MP3Audio.__get_mp3_frame_sample_rate__(fr)
|
||||
|
||||
if (fr.mpeg_version == 0x00): # MPEG 2.5
|
||||
version = 3
|
||||
elif (fr.mpeg_version == 0x02): # MPEG 2
|
||||
version = 3
|
||||
else: # MPEG 1
|
||||
version = 0
|
||||
|
||||
try:
|
||||
if (fr.layer == 0x03): # layer 1
|
||||
frames_per_sample = 384
|
||||
bit_rate = MP3Audio.MP3_BITRATE[fr.bitrate][version]
|
||||
elif (fr.layer == 0x02): # layer 2
|
||||
frames_per_sample = 1152
|
||||
bit_rate = MP3Audio.MP3_BITRATE[fr.bitrate][version + 1]
|
||||
elif (fr.layer == 0x01): # layer 3
|
||||
frames_per_sample = 1152
|
||||
bit_rate = MP3Audio.MP3_BITRATE[fr.bitrate][version + 2]
|
||||
else:
|
||||
raise InvalidMP3(_(u"Unsupported MPEG layer"))
|
||||
except IndexError:
|
||||
raise InvalidMP3(_(u"Invalid bit rate"))
|
||||
|
||||
if ('Xing' in first_frame):
|
||||
#the first frame has a Xing header,
|
||||
#use that to calculate the mp3's length
|
||||
xing_header = MP3Audio.XING_HEADER.parse(
|
||||
first_frame[first_frame.index('Xing'):])
|
||||
|
||||
return (xing_header.num_frames * frames_per_sample * 75 /
|
||||
sample_rate)
|
||||
else:
|
||||
#no Xing header,
|
||||
#assume a constant bitrate file
|
||||
mp3file.seek(-128, 2)
|
||||
if (mp3file.read(3) == "TAG"):
|
||||
end_position = mp3file.tell() - 3
|
||||
else:
|
||||
mp3file.seek(0, 2)
|
||||
end_position = mp3file.tell()
|
||||
|
||||
return ((end_position - start_position) * 75 * 8 /
|
||||
(bit_rate * 1000))
|
||||
finally:
|
||||
mp3file.close()
|
||||
|
||||
def total_frames(self):
|
||||
"""Returns the total PCM frames of the track as an integer."""
|
||||
|
||||
return self.cd_frames() * self.sample_rate() / 75
|
||||
|
||||
@classmethod
|
||||
def can_add_replay_gain(cls):
|
||||
"""Returns True if we have the necessary binaries to add ReplayGain."""
|
||||
|
||||
return BIN.can_execute(BIN['mp3gain'])
|
||||
|
||||
@classmethod
|
||||
def lossless_replay_gain(cls):
|
||||
"""Returns False."""
|
||||
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def add_replay_gain(cls, filenames, progress=None):
|
||||
"""Adds ReplayGain values to a list of filename strings.
|
||||
|
||||
All the filenames must be of this AudioFile type.
|
||||
Raises ValueError if some problem occurs during ReplayGain application.
|
||||
"""
|
||||
|
||||
track_names = [track.filename for track in
|
||||
open_files(filenames) if
|
||||
isinstance(track, cls)]
|
||||
|
||||
if (progress is not None):
|
||||
progress(0, 1)
|
||||
|
||||
if ((len(track_names) > 0) and (BIN.can_execute(BIN['mp3gain']))):
|
||||
devnull = file(os.devnull, 'ab')
|
||||
sub = subprocess.Popen([BIN['mp3gain'], '-f', '-k', '-q', '-r'] + \
|
||||
track_names,
|
||||
stdout=devnull,
|
||||
stderr=devnull)
|
||||
sub.wait()
|
||||
|
||||
devnull.close()
|
||||
|
||||
if (progress is not None):
|
||||
progress(1, 1)
|
||||
|
||||
def mpeg_frames(self):
|
||||
"""Yields (header, data) tuples of the file's contents.
|
||||
|
||||
header is an MPEG_Frame_Header Construct.
|
||||
data is a string of MP3 data."""
|
||||
|
||||
header_struct = MPEG_Frame_Header("header")
|
||||
f = open(self.filename, 'rb')
|
||||
try:
|
||||
#FIXME - this won't handle RIFF RMP3 well
|
||||
#perhaps I should use tracklint to clean those up
|
||||
MP3Audio.__find_last_mp3_frame__(f)
|
||||
stop_position = f.tell()
|
||||
f.seek(0, 0)
|
||||
MP3Audio.__find_mp3_start__(f)
|
||||
while (f.tell() < stop_position):
|
||||
header = header_struct.parse_stream(f)
|
||||
data = f.read(header.byte_length - 4)
|
||||
yield (header, data)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def verify(self, progress=None):
|
||||
from . import verify
|
||||
try:
|
||||
f = open(self.filename, 'rb')
|
||||
except IOError, err:
|
||||
raise InvalidMP3(str(err))
|
||||
|
||||
#MP3 verification is likely to be so fast
|
||||
#that individual calls to progress() are
|
||||
#a waste of time.
|
||||
if (progress is not None):
|
||||
progress(0, 1)
|
||||
|
||||
try:
|
||||
try:
|
||||
#skip ID3v2/ID3v1 tags during verification
|
||||
self.__find_mp3_start__(f)
|
||||
start = f.tell()
|
||||
self.__find_last_mp3_frame__(f)
|
||||
end = f.tell()
|
||||
f.seek(start, 0)
|
||||
|
||||
verify.mpeg(f, start, end)
|
||||
if (progress is not None):
|
||||
progress(1, 1)
|
||||
|
||||
return True
|
||||
except (IOError, ValueError), err:
|
||||
raise InvalidMP3(str(err))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
#######################
|
||||
#MP2 AUDIO
|
||||
#######################
|
||||
|
||||
class MP2Audio(MP3Audio):
|
||||
"""An MP2 audio file."""
|
||||
|
||||
SUFFIX = "mp2"
|
||||
NAME = SUFFIX
|
||||
DEFAULT_COMPRESSION = str(192)
|
||||
COMPRESSION_MODES = tuple(map(str, (64, 96, 112, 128, 160, 192,
|
||||
224, 256, 320, 384)))
|
||||
COMPRESSION_DESCRIPTIONS = {"64": _(u"total bitrate of 64kbps"),
|
||||
"384": _(u"total bitrate of 384kbps")}
|
||||
BINARIES = ("lame", "twolame")
|
||||
|
||||
@classmethod
|
||||
def is_type(cls, file):
|
||||
"""Returns True if the given file object describes this format.
|
||||
|
||||
Takes a seekable file pointer rewound to the start of the file."""
|
||||
|
||||
ID3v2Comment.skip(file)
|
||||
|
||||
try:
|
||||
frame = cls.MP3_FRAME_HEADER.parse_stream(file)
|
||||
|
||||
return ((frame.sync == 0x07FF) and
|
||||
(frame.mpeg_version in (0x03, 0x02, 0x00)) and
|
||||
(frame.layer == 0x02))
|
||||
except:
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def from_pcm(cls, filename, pcmreader, compression=None):
|
||||
"""Encodes a new file from PCM data.
|
||||
|
||||
Takes a filename string, PCMReader object
|
||||
and optional compression level string.
|
||||
Encodes a new audio file from pcmreader's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new MP2Audio object."""
|
||||
|
||||
import decimal
|
||||
import bisect
|
||||
|
||||
if ((compression is None) or
|
||||
(compression not in cls.COMPRESSION_MODES)):
|
||||
compression = __default_quality__(cls.NAME)
|
||||
|
||||
if ((pcmreader.channels > 2) or
|
||||
(pcmreader.sample_rate not in (32000, 48000, 44100)) or
|
||||
(pcmreader.bits_per_sample != 16)):
|
||||
pcmreader = PCMConverter(
|
||||
pcmreader,
|
||||
sample_rate=[32000, 32000, 44100, 48000][bisect.bisect(
|
||||
[32000, 44100, 48000], pcmreader.sample_rate)],
|
||||
channels=min(pcmreader.channels, 2),
|
||||
channel_mask=pcmreader.channel_mask,
|
||||
bits_per_sample=16)
|
||||
|
||||
devnull = file(os.devnull, 'ab')
|
||||
|
||||
sub = subprocess.Popen([BIN['twolame'], "--quiet",
|
||||
"-r",
|
||||
"-s", str(pcmreader.sample_rate),
|
||||
"--samplesize", str(pcmreader.bits_per_sample),
|
||||
"-N", str(pcmreader.channels),
|
||||
"-m", "a",
|
||||
"-b", compression,
|
||||
"-",
|
||||
filename],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=devnull,
|
||||
stderr=devnull,
|
||||
preexec_fn=ignore_sigint)
|
||||
|
||||
try:
|
||||
transfer_framelist_data(pcmreader, sub.stdin.write)
|
||||
except (ValueError, IOError), err:
|
||||
sub.stdin.close()
|
||||
sub.wait()
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(err))
|
||||
except Exception, err:
|
||||
sub.stdin.close()
|
||||
sub.wait()
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
|
||||
try:
|
||||
pcmreader.close()
|
||||
except DecodingError, err:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(err.error_message)
|
||||
|
||||
sub.stdin.close()
|
||||
devnull.close()
|
||||
|
||||
if (sub.wait() == 0):
|
||||
return MP2Audio(filename)
|
||||
else:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(u"twolame exited with error")
|
332
Melodia/resources/audiotools/__musepack__.py
Normal file
332
Melodia/resources/audiotools/__musepack__.py
Normal file
@ -0,0 +1,332 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
from audiotools import (AudioFile, InvalidFile, InvalidFormat, PCMReader,
|
||||
PCMConverter, Con, subprocess, BIN, ApeTaggedAudio,
|
||||
os, TempWaveReader, ignore_sigint, transfer_data,
|
||||
EncodingError, DecodingError)
|
||||
from __wav__ import WaveAudio
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
#######################
|
||||
#Musepack Audio
|
||||
#######################
|
||||
|
||||
|
||||
class NutValue(Con.Adapter):
|
||||
"""A construct for Musepack Nut-encoded integer fields."""
|
||||
|
||||
def __init__(self, name):
|
||||
Con.Adapter.__init__(
|
||||
self,
|
||||
Con.RepeatUntil(lambda obj, ctx: (obj & 0x80) == 0x00,
|
||||
Con.UBInt8(name)))
|
||||
|
||||
def _encode(self, value, context):
|
||||
data = [value & 0x7F]
|
||||
value = value >> 7
|
||||
|
||||
while (value != 0):
|
||||
data.append(0x80 | (value & 0x7F))
|
||||
value = value >> 7
|
||||
|
||||
data.reverse()
|
||||
return data
|
||||
|
||||
def _decode(self, obj, context):
|
||||
i = 0
|
||||
for x in obj:
|
||||
i = (i << 7) | (x & 0x7F)
|
||||
return i
|
||||
|
||||
|
||||
class Musepack8StreamReader:
|
||||
"""An object for parsing Musepack SV8 streams."""
|
||||
|
||||
NUT_HEADER = Con.Struct('nut_header',
|
||||
Con.String('key', 2),
|
||||
NutValue('length'))
|
||||
|
||||
def __init__(self, stream):
|
||||
"""Initialized with a file object."""
|
||||
|
||||
self.stream = stream
|
||||
|
||||
def packets(self):
|
||||
"""Yields a set of (key, data) tuples."""
|
||||
|
||||
import string
|
||||
|
||||
UPPERCASE = frozenset(string.ascii_uppercase)
|
||||
|
||||
while (True):
|
||||
try:
|
||||
frame_header = self.NUT_HEADER.parse_stream(self.stream)
|
||||
except Con.core.FieldError:
|
||||
break
|
||||
|
||||
if (not frozenset(frame_header.key).issubset(UPPERCASE)):
|
||||
break
|
||||
|
||||
yield (frame_header.key,
|
||||
self.stream.read(frame_header.length -
|
||||
len(self.NUT_HEADER.build(frame_header))))
|
||||
|
||||
|
||||
class MusepackAudio(ApeTaggedAudio, AudioFile):
|
||||
"""A Musepack audio file."""
|
||||
|
||||
SUFFIX = "mpc"
|
||||
NAME = SUFFIX
|
||||
DEFAULT_COMPRESSION = "standard"
|
||||
COMPRESSION_MODES = ("thumb", "radio", "standard", "extreme", "insane")
|
||||
|
||||
###Musepack SV7###
|
||||
#BINARIES = ('mppdec','mppenc')
|
||||
|
||||
###Musepack SV8###
|
||||
BINARIES = ('mpcdec', 'mpcenc')
|
||||
|
||||
MUSEPACK8_HEADER = Con.Struct('musepack8_header',
|
||||
Con.UBInt32('crc32'),
|
||||
Con.Byte('bitstream_version'),
|
||||
NutValue('sample_count'),
|
||||
NutValue('beginning_silence'),
|
||||
Con.Embed(Con.BitStruct(
|
||||
'flags',
|
||||
Con.Bits('sample_frequency', 3),
|
||||
Con.Bits('max_used_bands', 5),
|
||||
Con.Bits('channel_count', 4),
|
||||
Con.Flag('mid_side_used'),
|
||||
Con.Bits('audio_block_frames', 3))))
|
||||
|
||||
#not sure about some of the flag locations
|
||||
#Musepack 7's header is very unusual
|
||||
MUSEPACK7_HEADER = Con.Struct('musepack7_header',
|
||||
Con.Const(Con.String('signature', 3), 'MP+'),
|
||||
Con.Byte('version'),
|
||||
Con.ULInt32('frame_count'),
|
||||
Con.ULInt16('max_level'),
|
||||
Con.Embed(
|
||||
Con.BitStruct('flags',
|
||||
Con.Bits('profile', 4),
|
||||
Con.Bits('link', 2),
|
||||
Con.Bits('sample_frequency', 2),
|
||||
Con.Flag('intensity_stereo'),
|
||||
Con.Flag('midside_stereo'),
|
||||
Con.Bits('maxband', 6))),
|
||||
Con.ULInt16('title_gain'),
|
||||
Con.ULInt16('title_peak'),
|
||||
Con.ULInt16('album_gain'),
|
||||
Con.ULInt16('album_peak'),
|
||||
Con.Embed(
|
||||
Con.BitStruct('more_flags',
|
||||
Con.Bits('unused1', 16),
|
||||
Con.Bits('last_frame_length_low', 4),
|
||||
Con.Flag('true_gapless'),
|
||||
Con.Bits('unused2', 3),
|
||||
Con.Flag('fast_seeking'),
|
||||
Con.Bits('last_frame_length_high', 7))),
|
||||
Con.Bytes('unknown', 3),
|
||||
Con.Byte('encoder_version'))
|
||||
|
||||
def __init__(self, filename):
|
||||
"""filename is a plain string."""
|
||||
|
||||
AudioFile.__init__(self, filename)
|
||||
f = file(filename, 'rb')
|
||||
try:
|
||||
if (f.read(4) == 'MPCK'): # a Musepack 8 stream
|
||||
for (key, packet) in Musepack8StreamReader(f).packets():
|
||||
if (key == 'SH'):
|
||||
header = MusepackAudio.MUSEPACK8_HEADER.parse(packet)
|
||||
|
||||
self.__sample_rate__ = (44100, 48000,
|
||||
37800, 32000)[
|
||||
header.sample_frequency]
|
||||
|
||||
self.__total_frames__ = header.sample_count
|
||||
self.__channels__ = header.channel_count + 1
|
||||
|
||||
break
|
||||
elif (key == 'SE'):
|
||||
raise InvalidFile(_(u'No Musepack header found'))
|
||||
|
||||
else: # a Musepack 7 stream
|
||||
f.seek(0, 0)
|
||||
|
||||
try:
|
||||
header = MusepackAudio.MUSEPACK7_HEADER.parse_stream(f)
|
||||
except Con.ConstError:
|
||||
raise InvalidFile(_(u'Musepack signature incorrect'))
|
||||
|
||||
header.last_frame_length = \
|
||||
(header.last_frame_length_high << 4) | \
|
||||
header.last_frame_length_low
|
||||
|
||||
self.__sample_rate__ = (44100, 48000,
|
||||
37800, 32000)[header.sample_frequency]
|
||||
self.__total_frames__ = (((header.frame_count - 1) * 1152) +
|
||||
header.last_frame_length)
|
||||
|
||||
self.__channels__ = 2
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
@classmethod
|
||||
def from_pcm(cls, filename, pcmreader, compression=None):
|
||||
"""Encodes a new file from PCM data.
|
||||
|
||||
Takes a filename string, PCMReader object
|
||||
and optional compression level string.
|
||||
Encodes a new audio file from pcmreader's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new MusepackAudio object."""
|
||||
|
||||
import tempfile
|
||||
import bisect
|
||||
|
||||
if (str(compression) not in cls.COMPRESSION_MODES):
|
||||
compression = cls.DEFAULT_COMPRESSION
|
||||
|
||||
if ((pcmreader.channels > 2) or
|
||||
(pcmreader.sample_rate not in (44100, 48000, 37800, 32000)) or
|
||||
(pcmreader.bits_per_sample != 16)):
|
||||
pcmreader = PCMConverter(
|
||||
pcmreader,
|
||||
sample_rate=[32000, 32000, 37800, 44100, 48000][bisect.bisect(
|
||||
[32000, 37800, 44100, 48000], pcmreader.sample_rate)],
|
||||
channels=min(pcmreader.channels, 2),
|
||||
bits_per_sample=16)
|
||||
|
||||
f = tempfile.NamedTemporaryFile(suffix=".wav")
|
||||
w = WaveAudio.from_pcm(f.name, pcmreader)
|
||||
try:
|
||||
return cls.__from_wave__(filename, f.name, compression)
|
||||
finally:
|
||||
del(w)
|
||||
f.close()
|
||||
|
||||
#While Musepack needs to pipe things through WAVE,
|
||||
#not all WAVEs are acceptable.
|
||||
#Use the *_pcm() methods first.
|
||||
def __to_wave__(self, wave_filename):
|
||||
devnull = file(os.devnull, "wb")
|
||||
try:
|
||||
sub = subprocess.Popen([BIN['mpcdec'],
|
||||
self.filename,
|
||||
wave_filename],
|
||||
stdout=devnull,
|
||||
stderr=devnull)
|
||||
|
||||
#FIXME - small files (~5 seconds) result in an error by mpcdec,
|
||||
#even if they decode correctly.
|
||||
#Not much we can do except try to workaround its bugs.
|
||||
if (sub.wait() not in [0, 250]):
|
||||
raise DecodingError()
|
||||
finally:
|
||||
devnull.close()
|
||||
|
||||
@classmethod
|
||||
def __from_wave__(cls, filename, wave_filename, compression=None):
|
||||
if (str(compression) not in cls.COMPRESSION_MODES):
|
||||
compression = cls.DEFAULT_COMPRESSION
|
||||
|
||||
#mppenc requires files to end with .mpc for some reason
|
||||
if (not filename.endswith(".mpc")):
|
||||
import tempfile
|
||||
actual_filename = filename
|
||||
tempfile = tempfile.NamedTemporaryFile(suffix=".mpc")
|
||||
filename = tempfile.name
|
||||
else:
|
||||
actual_filename = tempfile = None
|
||||
|
||||
###Musepack SV7###
|
||||
#sub = subprocess.Popen([BIN['mppenc'],
|
||||
# "--silent",
|
||||
# "--overwrite",
|
||||
# "--%s" % (compression),
|
||||
# wave_filename,
|
||||
# filename],
|
||||
# preexec_fn=ignore_sigint)
|
||||
|
||||
###Musepack SV8###
|
||||
sub = subprocess.Popen([BIN['mpcenc'],
|
||||
"--silent",
|
||||
"--overwrite",
|
||||
"--%s" % (compression),
|
||||
wave_filename,
|
||||
filename])
|
||||
|
||||
if (sub.wait() == 0):
|
||||
if (tempfile is not None):
|
||||
filename = actual_filename
|
||||
f = file(filename, 'wb')
|
||||
tempfile.seek(0, 0)
|
||||
transfer_data(tempfile.read, f.write)
|
||||
f.close()
|
||||
tempfile.close()
|
||||
|
||||
return MusepackAudio(filename)
|
||||
else:
|
||||
if (tempfile is not None):
|
||||
tempfile.close()
|
||||
raise EncodingError(u"error encoding file with mpcenc")
|
||||
|
||||
@classmethod
|
||||
def is_type(cls, file):
|
||||
"""Returns True if the given file object describes this format.
|
||||
|
||||
Takes a seekable file pointer rewound to the start of the file."""
|
||||
|
||||
header = file.read(4)
|
||||
|
||||
###Musepack SV7###
|
||||
#return header == 'MP+\x07'
|
||||
|
||||
###Musepack SV8###
|
||||
return (header == 'MP+\x07') or (header == 'MPCK')
|
||||
|
||||
def sample_rate(self):
|
||||
"""Returns the rate of the track's audio as an integer number of Hz."""
|
||||
|
||||
return self.__sample_rate__
|
||||
|
||||
def total_frames(self):
|
||||
"""Returns the total PCM frames of the track as an integer."""
|
||||
|
||||
return self.__total_frames__
|
||||
|
||||
def channels(self):
|
||||
"""Returns an integer number of channels this track contains."""
|
||||
|
||||
return self.__channels__
|
||||
|
||||
def bits_per_sample(self):
|
||||
"""Returns an integer number of bits-per-sample this track contains."""
|
||||
|
||||
return 16
|
||||
|
||||
def lossless(self):
|
||||
"""Returns False."""
|
||||
|
||||
return False
|
638
Melodia/resources/audiotools/__musicbrainz__.py
Normal file
638
Melodia/resources/audiotools/__musicbrainz__.py
Normal file
@ -0,0 +1,638 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
from audiotools import (MetaData, AlbumMetaData, AlbumMetaDataFile,
|
||||
MetaDataFileException,
|
||||
__most_numerous__, DummyAudioFile, sys)
|
||||
import urllib
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
|
||||
def get_xml_nodes(parent, child_tag):
|
||||
"""A helper routine for returning all children with the given XML tag."""
|
||||
|
||||
return [node for node in parent.childNodes
|
||||
if (hasattr(node, "tagName") and
|
||||
(node.tagName == child_tag))]
|
||||
|
||||
|
||||
def walk_xml_tree(parent, *child_tags):
|
||||
"""A helper routine for walking through several children."""
|
||||
|
||||
if (len(child_tags) == 0):
|
||||
return parent
|
||||
else:
|
||||
base_tag = child_tags[0]
|
||||
remaining_tags = child_tags[1:]
|
||||
for node in parent.childNodes:
|
||||
if (hasattr(node, "tagName") and
|
||||
(node.tagName == base_tag)):
|
||||
return walk_xml_tree(node, *remaining_tags)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def walk_xml_tree_build(dom, parent, *child_tags):
|
||||
|
||||
if (len(child_tags) == 0):
|
||||
return parent
|
||||
else:
|
||||
base_tag = child_tags[0]
|
||||
remaining_tags = child_tags[1:]
|
||||
for node in parent.childNodes:
|
||||
if (hasattr(node, "tagName") and
|
||||
(node.tagName == base_tag)):
|
||||
return walk_xml_tree_build(dom, node, *remaining_tags)
|
||||
else:
|
||||
new_child = dom.createElement(base_tag)
|
||||
parent.appendChild(new_child)
|
||||
return walk_xml_tree_build(dom, new_child, *remaining_tags)
|
||||
|
||||
|
||||
def get_xml_text_node(parent, child_tag):
|
||||
"""A helper routine for returning the first text child XML node."""
|
||||
|
||||
try:
|
||||
return get_xml_nodes(parent, child_tag)[0].childNodes[0].data.strip()
|
||||
except IndexError:
|
||||
return u''
|
||||
|
||||
|
||||
def reorder_xml_children(parent, child_order):
|
||||
"""Given an XML element with childNodes, reorders them to child_order.
|
||||
|
||||
child_order should be a list of unicode tag strings.
|
||||
"""
|
||||
|
||||
if (parent.childNodes is None):
|
||||
return
|
||||
|
||||
child_tags = {}
|
||||
leftovers = []
|
||||
for child in parent.childNodes:
|
||||
if (hasattr(child, "tagName")):
|
||||
child_tags.setdefault(child.tagName, []).append(child)
|
||||
else:
|
||||
leftovers.append(child)
|
||||
|
||||
#remove all the old childen from parent
|
||||
for child in parent.childNodes:
|
||||
parent.removeChild(child)
|
||||
|
||||
#re-add the childen in child_order
|
||||
for tagName in child_order:
|
||||
if (tagName in child_tags):
|
||||
for child in child_tags[tagName]:
|
||||
parent.appendChild(child)
|
||||
del(child_tags[tagName])
|
||||
|
||||
#re-add any leftover children tags or non-tags
|
||||
for child_tags in child_tags.values():
|
||||
for child in child_tags:
|
||||
parent.appendChild(child)
|
||||
|
||||
for child in leftovers:
|
||||
parent.appendChild(child)
|
||||
|
||||
|
||||
class MBDiscID:
|
||||
"""A MusicBrainz disc ID."""
|
||||
|
||||
def __init__(self, tracks=[], offsets=None, length=None, lead_in=150,
|
||||
first_track_number=None, last_track_number=None,
|
||||
lead_out_track_offset=None):
|
||||
"""Fields are as follows:
|
||||
|
||||
tracks - a list of track lengths in CD frames
|
||||
offsets - a list of track offsets in CD frames
|
||||
length - the length of the entire disc in CD frames
|
||||
lead_in - the location of the first track on the CD, in frames
|
||||
|
||||
first_track_number, last_track_number and lead_out_track_offset
|
||||
are integer values.
|
||||
|
||||
All fields are optional.
|
||||
One will presumably fill them with data later in that event.
|
||||
"""
|
||||
|
||||
self.tracks = tracks
|
||||
self.__offsets__ = offsets
|
||||
self.__length__ = length
|
||||
self.__lead_in__ = lead_in
|
||||
self.first_track_number = first_track_number
|
||||
self.last_track_number = last_track_number
|
||||
self.lead_out_track_offset = lead_out_track_offset
|
||||
|
||||
@classmethod
|
||||
def from_cdda(cls, cdda):
|
||||
"""Given a CDDA object, returns a populated MBDiscID
|
||||
|
||||
May raise ValueError if there are no audio tracks on the CD."""
|
||||
|
||||
tracks = list(cdda)
|
||||
if (len(tracks) < 1):
|
||||
raise ValueError(_(u"no audio tracks in CDDA object"))
|
||||
|
||||
return cls(
|
||||
tracks=[t.length() for t in tracks],
|
||||
offsets=[t.offset() for t in tracks],
|
||||
length=cdda.length(),
|
||||
lead_in=tracks[0].offset(),
|
||||
lead_out_track_offset=cdda.last_sector() + 150 + 1)
|
||||
|
||||
def offsets(self):
|
||||
"""Returns a list of calculated offset integers, from track lengths."""
|
||||
|
||||
if (self.__offsets__ is None):
|
||||
offsets = [self.__lead_in__]
|
||||
|
||||
for track in self.tracks[0:-1]:
|
||||
offsets.append(track + offsets[-1])
|
||||
|
||||
return offsets
|
||||
else:
|
||||
return self.__offsets__
|
||||
|
||||
def __repr__(self):
|
||||
return ("MBDiscID(tracks=%s,offsets=%s,length=%s,lead_in=%s," +
|
||||
"first_track_number=%s,last_track_number=%s," +
|
||||
"lead_out_track_offset=%s)") % \
|
||||
(repr(self.tracks),
|
||||
repr(self.__offsets__),
|
||||
repr(self.__length__),
|
||||
repr(self.__lead_in__),
|
||||
repr(self.first_track_number),
|
||||
repr(self.last_track_number),
|
||||
repr(self.lead_out_track_offset))
|
||||
|
||||
#returns a MusicBrainz DiscID value as a string
|
||||
def __str__(self):
|
||||
from hashlib import sha1
|
||||
|
||||
if (self.lead_out_track_offset is None):
|
||||
if (self.__length__ is None):
|
||||
lead_out_track_offset = sum(self.tracks) + self.__lead_in__
|
||||
else:
|
||||
lead_out_track_offset = self.__length__ + self.__lead_in__
|
||||
else:
|
||||
lead_out_track_offset = self.lead_out_track_offset
|
||||
|
||||
if (self.first_track_number is None):
|
||||
first_track_number = 1
|
||||
else:
|
||||
first_track_number = self.first_track_number
|
||||
|
||||
if (self.last_track_number is None):
|
||||
last_track_number = len(self.tracks)
|
||||
else:
|
||||
last_track_number = self.last_track_number
|
||||
|
||||
digest = sha1("%02X%02X%s" % \
|
||||
(first_track_number,
|
||||
last_track_number,
|
||||
"".join(["%08X" % (i) for i in
|
||||
[lead_out_track_offset] +
|
||||
self.offsets() +
|
||||
([0] * (99 - len(self.offsets())))])))
|
||||
|
||||
return "".join([{'=': '-', '+': '.', '/': '_'}.get(c, c) for c in
|
||||
digest.digest().encode('base64').rstrip('\n')])
|
||||
|
||||
def toxml(self, output):
|
||||
"""Writes an XML file to the output file object."""
|
||||
|
||||
output.write(MusicBrainzReleaseXML.from_tracks(
|
||||
[DummyAudioFile(length, None, i + 1)
|
||||
for (i, length) in enumerate(self.tracks)]).to_string())
|
||||
|
||||
|
||||
class MusicBrainz:
|
||||
"""A class for performing queries on a MusicBrainz or compatible server."""
|
||||
|
||||
def __init__(self, server, port, messenger):
|
||||
self.server = server
|
||||
self.port = port
|
||||
self.connection = None
|
||||
self.messenger = messenger
|
||||
|
||||
def connect(self):
|
||||
"""Performs the initial connection."""
|
||||
|
||||
import httplib
|
||||
|
||||
self.connection = httplib.HTTPConnection(self.server, self.port)
|
||||
|
||||
def close(self):
|
||||
"""Closes an open connection."""
|
||||
|
||||
if (self.connection is not None):
|
||||
self.connection.close()
|
||||
|
||||
def read_data(self, disc_id, output):
|
||||
"""Returns a (matches,dom) tuple from a MBDiscID object.
|
||||
|
||||
matches is an integer
|
||||
and dom is a minidom Document object or None."""
|
||||
|
||||
from xml.dom.minidom import parseString
|
||||
from xml.parsers.expat import ExpatError
|
||||
|
||||
self.connection.request(
|
||||
"GET",
|
||||
"%s?%s" % ("/ws/1/release",
|
||||
urllib.urlencode({"type": "xml",
|
||||
"discid": str(disc_id)})))
|
||||
|
||||
response = self.connection.getresponse()
|
||||
#FIXME - check for errors in the HTTP response
|
||||
|
||||
data = response.read()
|
||||
|
||||
try:
|
||||
dom = parseString(data)
|
||||
return (len(dom.getElementsByTagName(u'release')), dom)
|
||||
except ExpatError:
|
||||
return (0, None)
|
||||
|
||||
|
||||
class MBXMLException(MetaDataFileException):
|
||||
"""Raised if MusicBrainzReleaseXML.read() encounters an error."""
|
||||
|
||||
def __unicode__(self):
|
||||
return _(u"Invalid MusicBrainz XML file")
|
||||
|
||||
|
||||
class MusicBrainzReleaseXML(AlbumMetaDataFile):
|
||||
"""An XML file as returned by MusicBrainz."""
|
||||
|
||||
TAG_ORDER = {u"release": [u"title",
|
||||
u"text-representation",
|
||||
u"asin",
|
||||
u"artist",
|
||||
u"release-group",
|
||||
u"release-event-list",
|
||||
u"disc-list",
|
||||
u"puid-list",
|
||||
u"track-list",
|
||||
u"relation-list",
|
||||
u"tag-list",
|
||||
u"user-tag-list",
|
||||
u"rating",
|
||||
u"user-rating"],
|
||||
u"artist": [u"name",
|
||||
u"sort-name",
|
||||
u"disambiguation",
|
||||
u"life-span",
|
||||
u"alias-list",
|
||||
u"release-list",
|
||||
u"release-group-list",
|
||||
u"relation-list",
|
||||
u"tag-list",
|
||||
u"user-tag-list",
|
||||
u"rating"],
|
||||
u"track": [u"title",
|
||||
u"duration",
|
||||
u"isrc-list",
|
||||
u"artist",
|
||||
u"release-list",
|
||||
u"puid-list",
|
||||
u"relation-list",
|
||||
u"tag-list",
|
||||
u"user-tag-list",
|
||||
u"rating",
|
||||
u"user-rating"]}
|
||||
|
||||
def __init__(self, dom):
|
||||
self.dom = dom
|
||||
|
||||
def __getattr__(self, key):
|
||||
if (key == 'album_name'):
|
||||
try:
|
||||
return get_xml_text_node(
|
||||
walk_xml_tree(self.dom,
|
||||
u'metadata', u'release-list', u'release'),
|
||||
u'title')
|
||||
except AttributeError:
|
||||
return u""
|
||||
elif (key == 'artist_name'):
|
||||
try:
|
||||
return get_xml_text_node(
|
||||
walk_xml_tree(self.dom,
|
||||
u'metadata', u'release-list', u'release',
|
||||
u'artist'),
|
||||
u'name')
|
||||
except AttributeError:
|
||||
return u""
|
||||
elif (key == 'year'):
|
||||
try:
|
||||
return walk_xml_tree(
|
||||
self.dom, u'metadata', u'release-list', u'release',
|
||||
u'release-event-list',
|
||||
u'event').getAttribute('date')[0:4]
|
||||
except (IndexError, AttributeError):
|
||||
return u""
|
||||
elif (key == 'catalog'):
|
||||
try:
|
||||
return walk_xml_tree(
|
||||
self.dom, u'metadata', u'release-list', u'release',
|
||||
u'release-event-list',
|
||||
u'event').getAttribute('catalog-number')
|
||||
except (IndexError, AttributeError):
|
||||
return u""
|
||||
elif (key == 'extra'):
|
||||
return u""
|
||||
else:
|
||||
try:
|
||||
return self.__dict__[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
#FIXME - create nodes if they don't exist
|
||||
if (key == 'album_name'):
|
||||
title = walk_xml_tree(self.dom, u'metadata', u'release-list',
|
||||
u'release', u'title')
|
||||
if (len(title.childNodes) > 0):
|
||||
title.replaceChild(self.dom.createTextNode(value),
|
||||
title.firstChild)
|
||||
else:
|
||||
title.appendChild(self.dom.createTextNode(value))
|
||||
elif (key == 'artist_name'):
|
||||
name = walk_xml_tree(self.dom, u'metadata', u'release-list',
|
||||
u'release', u'artist', u'name')
|
||||
if (len(name.childNodes) > 0):
|
||||
name.replaceChild(self.dom.createTextNode(value),
|
||||
name.firstChild)
|
||||
else:
|
||||
name.appendChild(self.dom.createTextNode(value))
|
||||
elif (key == 'year'):
|
||||
walk_xml_tree_build(self.dom, self.dom,
|
||||
u'metadata', u'release-list',
|
||||
u'release', u'release-event-list',
|
||||
u'event').setAttribute(u"date", value)
|
||||
elif (key == 'catalog'):
|
||||
walk_xml_tree_build(self.dom, self.dom,
|
||||
u'metadata', u'release-list',
|
||||
u'release', u'release-event-list',
|
||||
u'event').setAttribute(u"catalog-number",
|
||||
value)
|
||||
elif (key == 'extra'):
|
||||
pass
|
||||
else:
|
||||
self.__dict__[key] = value
|
||||
|
||||
def __len__(self):
|
||||
return len(self.dom.getElementsByTagName(u'track'))
|
||||
|
||||
def to_string(self):
|
||||
for (tag, order) in MusicBrainzReleaseXML.TAG_ORDER.items():
|
||||
for parent in self.dom.getElementsByTagName(tag):
|
||||
reorder_xml_children(parent, order)
|
||||
|
||||
return self.dom.toxml(encoding='utf-8')
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, string):
|
||||
from xml.dom.minidom import parseString
|
||||
from xml.parsers.expat import ExpatError
|
||||
|
||||
try:
|
||||
return cls(parseString(string))
|
||||
except ExpatError:
|
||||
raise MBXMLException("")
|
||||
|
||||
def get_track(self, index):
|
||||
track_node = self.dom.getElementsByTagName(u'track')[index]
|
||||
track_name = get_xml_text_node(track_node, u'title')
|
||||
artist_node = walk_xml_tree(track_node, u'artist')
|
||||
if (artist_node is not None):
|
||||
artist_name = get_xml_text_node(artist_node, u'name')
|
||||
if (len(artist_name) == 0):
|
||||
artist_name = u""
|
||||
else:
|
||||
artist_name = u""
|
||||
return (track_name, artist_name, u"")
|
||||
|
||||
def set_track(self, index, name, artist, extra):
|
||||
track_node = self.dom.getElementsByTagName(u'track')[index]
|
||||
title = walk_xml_tree(track_node, 'title')
|
||||
if (len(title.childNodes) > 0):
|
||||
title.replaceChild(self.dom.createTextNode(name),
|
||||
title.firstChild)
|
||||
else:
|
||||
title.appendChild(self.dom.createTextNode(name))
|
||||
if (len(artist) > 0):
|
||||
artist_node = walk_xml_tree_build(self.dom,
|
||||
track_node,
|
||||
u'artist', u'name')
|
||||
if (artist_node.hasChildNodes()):
|
||||
artist_node.replaceChild(self.dom.createTextNode(artist),
|
||||
artist_node.firstChild)
|
||||
else:
|
||||
artist_node.appendChild(self.dom.createTextNode(artist))
|
||||
|
||||
@classmethod
|
||||
def from_tracks(cls, tracks):
|
||||
"""Returns a MusicBrainzReleaseXML from a list of AudioFile objects.
|
||||
|
||||
These objects are presumably from the same album.
|
||||
If not, these heuristics may generate something unexpected.
|
||||
"""
|
||||
|
||||
from xml.dom.minidom import parseString
|
||||
|
||||
def make_text_node(document, tagname, text):
|
||||
node = document.createElement(tagname)
|
||||
node.appendChild(document.createTextNode(text))
|
||||
return node
|
||||
|
||||
tracks.sort(lambda x, y: cmp(x.track_number(), y.track_number()))
|
||||
|
||||
#our base DOM to start with
|
||||
dom = parseString('<?xml version="1.0" encoding="UTF-8"?>' +
|
||||
'<metadata xmlns="http://musicbrainz.org/' +
|
||||
'ns/mmd-1.0#" xmlns:ext="http://musicbrainz.org/' +
|
||||
'ns/ext-1.0#"></metadata>')
|
||||
|
||||
release = dom.createElement(u'release')
|
||||
|
||||
track_metadata = [t.get_metadata() for t in tracks
|
||||
if (t.get_metadata() is not None)]
|
||||
|
||||
#add album title
|
||||
release.appendChild(make_text_node(
|
||||
dom, u'title', unicode(__most_numerous__(
|
||||
[m.album_name for m in track_metadata]))))
|
||||
|
||||
#add album artist
|
||||
if (len(set([m.artist_name for m in track_metadata])) <
|
||||
len(track_metadata)):
|
||||
artist = dom.createElement(u'artist')
|
||||
album_artist = unicode(__most_numerous__(
|
||||
[m.artist_name for m in track_metadata]))
|
||||
artist.appendChild(make_text_node(dom, u'name', album_artist))
|
||||
release.appendChild(artist)
|
||||
else:
|
||||
album_artist = u'' # all track artist names differ
|
||||
artist = dom.createElement(u'artist')
|
||||
artist.appendChild(make_text_node(dom, u'name', album_artist))
|
||||
release.appendChild(artist)
|
||||
|
||||
#add release info (catalog number, release date, media, etc.)
|
||||
event_list = dom.createElement(u'release-event-list')
|
||||
event = dom.createElement(u'event')
|
||||
|
||||
year = unicode(__most_numerous__(
|
||||
[m.year for m in track_metadata]))
|
||||
if (year != u""):
|
||||
event.setAttribute(u'date', year)
|
||||
|
||||
catalog_number = unicode(__most_numerous__(
|
||||
[m.catalog for m in track_metadata]))
|
||||
if (catalog_number != u""):
|
||||
event.setAttribute(u'catalog-number', catalog_number)
|
||||
|
||||
media = unicode(__most_numerous__(
|
||||
[m.media for m in track_metadata]))
|
||||
if (media != u""):
|
||||
event.setAttribute(u'format', media)
|
||||
|
||||
event_list.appendChild(event)
|
||||
release.appendChild(event_list)
|
||||
|
||||
#add tracks
|
||||
track_list = dom.createElement(u'track-list')
|
||||
|
||||
for track in tracks:
|
||||
node = dom.createElement(u'track')
|
||||
track_metadata = track.get_metadata()
|
||||
if (track_metadata is not None):
|
||||
node.appendChild(make_text_node(
|
||||
dom, u'title', track_metadata.track_name))
|
||||
else:
|
||||
node.appendChild(make_text_node(
|
||||
dom, u'title', u''))
|
||||
|
||||
node.appendChild(make_text_node(
|
||||
dom, u'duration',
|
||||
unicode((track.total_frames() * 1000) /
|
||||
track.sample_rate())))
|
||||
|
||||
if (track_metadata is not None):
|
||||
#add track artist, if different from album artist
|
||||
if (track_metadata.artist_name != album_artist):
|
||||
artist = dom.createElement(u'artist')
|
||||
artist.appendChild(make_text_node(
|
||||
dom, u'name', track_metadata.artist_name))
|
||||
node.appendChild(artist)
|
||||
|
||||
track_list.appendChild(node)
|
||||
|
||||
release.appendChild(track_list)
|
||||
|
||||
release_list = dom.createElement(u'release-list')
|
||||
release_list.appendChild(release)
|
||||
dom.getElementsByTagName(u'metadata')[0].appendChild(release_list)
|
||||
|
||||
return cls(dom)
|
||||
|
||||
|
||||
#takes a Document containing multiple <release> tags
|
||||
#and a Messenger object to query for output
|
||||
#returns a modified Document containing only one <release>
|
||||
def __select_match__(dom, messenger):
|
||||
messenger.info(_(u"Please Select the Closest Match:"))
|
||||
matches = dom.getElementsByTagName(u'release')
|
||||
selected = 0
|
||||
while ((selected < 1) or (selected > len(matches))):
|
||||
for i in range(len(matches)):
|
||||
messenger.info(_(u"%(choice)s) %(name)s") % \
|
||||
{"choice": i + 1,
|
||||
"name": get_xml_text_node(matches[i],
|
||||
u'title')})
|
||||
try:
|
||||
messenger.partial_info(_(u"Your Selection [1-%s]:") % \
|
||||
(len(matches)))
|
||||
selected = int(sys.stdin.readline().strip())
|
||||
except ValueError:
|
||||
selected = 0
|
||||
|
||||
for (i, release) in enumerate(dom.getElementsByTagName(u'release')):
|
||||
if (i != (selected - 1)):
|
||||
release.parentNode.removeChild(release)
|
||||
|
||||
return dom
|
||||
|
||||
|
||||
#takes a Document containing multiple <release> tags
|
||||
#and a default selection integer
|
||||
#returns a modified Document containing only one <release>
|
||||
def __select_default_match__(dom, selection):
|
||||
for (i, release) in enumerate(dom.getElementsByTagName(u'release')):
|
||||
if (i != selection):
|
||||
release.parentNode.removeChild(release)
|
||||
|
||||
return dom
|
||||
|
||||
|
||||
def get_mbxml(disc_id, output, musicbrainz_server, musicbrainz_port,
|
||||
messenger, default_selection=None):
|
||||
"""Runs through the entire MusicBrainz querying sequence.
|
||||
|
||||
Fields are as follows:
|
||||
disc_id - an MBDiscID object
|
||||
output - an open file object for writing
|
||||
musicbrainz_server - a server name string
|
||||
musicbrainz_port - a server port int
|
||||
messenger - a Messenger object
|
||||
default_selection - if given, the default match to choose
|
||||
"""
|
||||
|
||||
mb = MusicBrainz(musicbrainz_server, musicbrainz_port, messenger)
|
||||
|
||||
mb.connect()
|
||||
messenger.info(
|
||||
_(u"Sending Disc ID \"%(disc_id)s\" to server \"%(server)s\"") % \
|
||||
{"disc_id": str(disc_id).decode('ascii'),
|
||||
"server": musicbrainz_server.decode('ascii', 'replace')})
|
||||
|
||||
(matches, dom) = mb.read_data(disc_id, output)
|
||||
mb.close()
|
||||
|
||||
if (matches == 1):
|
||||
messenger.info(_(u"1 match found"))
|
||||
else:
|
||||
messenger.info(_(u"%s matches found") % (matches))
|
||||
|
||||
if (matches > 1):
|
||||
if (default_selection is None):
|
||||
output.write(__select_match__(
|
||||
dom, messenger).toxml(encoding='utf-8'))
|
||||
else:
|
||||
output.write(__select_default_match__(
|
||||
dom, default_selection).toxml(encoding='utf-8'))
|
||||
|
||||
output.flush()
|
||||
elif (matches == 1):
|
||||
output.write(dom.toxml(encoding='utf-8'))
|
||||
output.flush()
|
||||
else:
|
||||
return matches
|
507
Melodia/resources/audiotools/__shn__.py
Normal file
507
Melodia/resources/audiotools/__shn__.py
Normal file
@ -0,0 +1,507 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
from audiotools import (AudioFile, ChannelMask, PCMReader,
|
||||
transfer_framelist_data, WaveAudio,
|
||||
AiffAudio, cStringIO, EncodingError,
|
||||
UnsupportedBitsPerSample, InvalidFile,
|
||||
PCMReaderError,
|
||||
WaveContainer, AiffContainer, to_pcm_progress)
|
||||
|
||||
import audiotools.decoders
|
||||
import os.path
|
||||
|
||||
|
||||
class InvalidShorten(InvalidFile):
|
||||
pass
|
||||
|
||||
|
||||
class ShortenAudio(WaveContainer, AiffContainer):
|
||||
"""A Shorten audio file."""
|
||||
|
||||
SUFFIX = "shn"
|
||||
NAME = SUFFIX
|
||||
|
||||
def __init__(self, filename):
|
||||
"""filename is a plain string."""
|
||||
|
||||
AudioFile.__init__(self, filename)
|
||||
try:
|
||||
f = open(filename, 'rb')
|
||||
except IOError, msg:
|
||||
raise InvalidShorten(str(msg))
|
||||
try:
|
||||
if (not ShortenAudio.is_type(f)):
|
||||
raise InvalidShorten(_(u'Shorten header not detected'))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
#Why not call __populate_metadata__ here and raise InvalidShorten
|
||||
#if it errors out?
|
||||
#The problem is that __populate_metadata__ needs to walk
|
||||
#through the *entire* file in order to calculate total PCM frames
|
||||
#and so on.
|
||||
#That's an expensive operation to perform at init-time
|
||||
#so it's better to postpone it to an on-demand fetch.
|
||||
|
||||
def __populate_metadata__(self):
|
||||
#set up some default values
|
||||
self.__bits_per_sample__ = 16
|
||||
self.__channels__ = 2
|
||||
self.__channel_mask__ = 0x3
|
||||
self.__sample_rate__ = 44100
|
||||
self.__total_frames__ = 0
|
||||
self.__blocks__ = []
|
||||
self.__format__ = None
|
||||
|
||||
#grab a few pieces of technical metadata from the Shorten file itself
|
||||
#which requires a dry-run through the decoder
|
||||
try:
|
||||
decoder = audiotools.decoders.SHNDecoder(self.filename)
|
||||
try:
|
||||
|
||||
self.__bits_per_sample__ = decoder.bits_per_sample
|
||||
self.__channels__ = decoder.channels
|
||||
(self.__total_frames__,
|
||||
self.__blocks__) = decoder.metadata()
|
||||
finally:
|
||||
decoder.close()
|
||||
|
||||
try:
|
||||
self.__channel_mask__ = ChannelMask.from_channels(
|
||||
self.__channels__)
|
||||
except ValueError:
|
||||
self.__channel_mask__ = 0
|
||||
except (ValueError, IOError):
|
||||
#if we hit an error in SHNDecoder while reading
|
||||
#technical metadata, the default values will have to do
|
||||
return
|
||||
|
||||
#the remainder requires parsing the file's VERBATIM blocks
|
||||
#which may contain Wave, AIFF or Sun AU info
|
||||
if (self.__blocks__[0] is not None):
|
||||
header = cStringIO.StringIO(self.__blocks__[0])
|
||||
for format in WaveAudio, AiffAudio:
|
||||
header.seek(0, 0)
|
||||
if (format.is_type(header)):
|
||||
self.__format__ = format
|
||||
break
|
||||
if (self.__format__ is WaveAudio):
|
||||
for (chunk_id, chunk_data) in self.__wave_chunks__():
|
||||
if (chunk_id == 'fmt '):
|
||||
fmt_chunk = WaveAudio.FMT_CHUNK.parse(chunk_data)
|
||||
self.__sample_rate__ = fmt_chunk.sample_rate
|
||||
if (fmt_chunk.compression == 0xFFFE):
|
||||
self.__channel_mask__ = \
|
||||
WaveAudio.fmt_chunk_to_channel_mask(
|
||||
fmt_chunk.channel_mask)
|
||||
elif (self.__format__ is AiffAudio):
|
||||
for (chunk_id, chunk_data) in self.__aiff_chunks__():
|
||||
if (chunk_id == 'COMM'):
|
||||
comm_chunk = AiffAudio.COMM_CHUNK.parse(chunk_data)
|
||||
self.__sample_rate__ = comm_chunk.sample_rate
|
||||
|
||||
def __wave_chunks__(self):
|
||||
total_size = sum([len(block) for block in self.__blocks__
|
||||
if block is not None])
|
||||
wave_data = cStringIO.StringIO("".join([block for block in
|
||||
self.__blocks__
|
||||
if block is not None]))
|
||||
|
||||
wave_data.read(12) # skip the RIFFxxxxWAVE header data
|
||||
total_size -= 12
|
||||
|
||||
#iterate over all the non-data chunks
|
||||
while (total_size > 0):
|
||||
header = WaveAudio.CHUNK_HEADER.parse_stream(wave_data)
|
||||
total_size -= 8
|
||||
if (header.chunk_id != 'data'):
|
||||
yield (header.chunk_id, wave_data.read(header.chunk_length))
|
||||
total_size -= header.chunk_length
|
||||
else:
|
||||
continue
|
||||
|
||||
def __aiff_chunks__(self):
|
||||
total_size = sum([len(block) for block in self.__blocks__
|
||||
if block is not None])
|
||||
aiff_data = cStringIO.StringIO("".join([block for block in
|
||||
self.__blocks__
|
||||
if block is not None]))
|
||||
|
||||
aiff_data.read(12) # skip the FORMxxxxAIFF header data
|
||||
total_size -= 12
|
||||
|
||||
#iterate over all the chunks
|
||||
while (total_size > 0):
|
||||
header = AiffAudio.CHUNK_HEADER.parse_stream(aiff_data)
|
||||
total_size -= 8
|
||||
if (header.chunk_id != 'SSND'):
|
||||
yield (header.chunk_id, aiff_data.read(header.chunk_length))
|
||||
total_size -= header.chunk_length
|
||||
else:
|
||||
#This presumes that audiotools encoded
|
||||
#the Shorten file from an AIFF source.
|
||||
#The reference encoder places the 8 alignment
|
||||
#bytes in the PCM stream itself, which is wrong.
|
||||
yield (header.chunk_id, aiff_data.read(8))
|
||||
total_size -= 8
|
||||
|
||||
@classmethod
|
||||
def is_type(cls, file):
|
||||
"""Returns True if the given file object describes this format.
|
||||
|
||||
Takes a seekable file pointer rewound to the start of the file."""
|
||||
|
||||
return (file.read(4) == 'ajkg') and (ord(file.read(1)) == 2)
|
||||
|
||||
def bits_per_sample(self):
|
||||
"""Returns an integer number of bits-per-sample this track contains."""
|
||||
|
||||
if (not hasattr(self, "__bits_per_sample__")):
|
||||
self.__populate_metadata__()
|
||||
return self.__bits_per_sample__
|
||||
|
||||
def channels(self):
|
||||
"""Returns an integer number of channels this track contains."""
|
||||
|
||||
if (not hasattr(self, "__channels__")):
|
||||
self.__populate_metadata__()
|
||||
return self.__channels__
|
||||
|
||||
def channel_mask(self):
|
||||
"""Returns a ChannelMask object of this track's channel layout."""
|
||||
|
||||
if (not hasattr(self, "__channel_mask__")):
|
||||
self.__populate_metadata__()
|
||||
return self.__channel_mask__
|
||||
|
||||
def lossless(self):
|
||||
"""Returns True."""
|
||||
|
||||
return True
|
||||
|
||||
def total_frames(self):
|
||||
"""Returns the total PCM frames of the track as an integer."""
|
||||
|
||||
if (not hasattr(self, "__total_frames__")):
|
||||
self.__populate_metadata__()
|
||||
return self.__total_frames__
|
||||
|
||||
def sample_rate(self):
|
||||
"""Returns the rate of the track's audio as an integer number of Hz."""
|
||||
|
||||
if (not hasattr(self, "__sample_rate__")):
|
||||
self.__populate_metadata__()
|
||||
return self.__sample_rate__
|
||||
|
||||
def to_pcm(self):
|
||||
"""Returns a PCMReader object containing the track's PCM data."""
|
||||
|
||||
try:
|
||||
sample_rate = self.sample_rate()
|
||||
channels = self.channels()
|
||||
channel_mask = int(self.channel_mask())
|
||||
bits_per_sample = self.bits_per_sample()
|
||||
|
||||
decoder = audiotools.decoders.SHNDecoder(self.filename)
|
||||
decoder.sample_rate = sample_rate
|
||||
decoder.channel_mask = channel_mask
|
||||
return decoder
|
||||
except (IOError, ValueError), msg:
|
||||
#these may not be accurate if the Shorten file is broken
|
||||
#but if it is broken, there'll be no way to
|
||||
#cross-check the results anyway
|
||||
return PCMReaderError(error_message=str(msg),
|
||||
sample_rate=44100,
|
||||
channels=2,
|
||||
channel_mask=0x3,
|
||||
bits_per_sample=16)
|
||||
|
||||
@classmethod
|
||||
def from_pcm(cls, filename, pcmreader, compression=None,
|
||||
block_size=256):
|
||||
"""Encodes a new file from PCM data.
|
||||
|
||||
Takes a filename string, PCMReader object
|
||||
and optional compression level string.
|
||||
Encodes a new audio file from pcmreader's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new ShortenAudio object."""
|
||||
|
||||
if (pcmreader.bits_per_sample not in (8, 16)):
|
||||
raise UnsupportedBitsPerSample(filename, pcmreader.bits_per_sample)
|
||||
|
||||
import tempfile
|
||||
|
||||
f = tempfile.NamedTemporaryFile(suffix=".wav")
|
||||
try:
|
||||
w = WaveAudio.from_pcm(f.name, pcmreader)
|
||||
return cls.from_wave(filename, f.name, compression, block_size)
|
||||
finally:
|
||||
if (os.path.isfile(f.name)):
|
||||
f.close()
|
||||
else:
|
||||
f.close_called = True
|
||||
|
||||
def to_wave(self, wave_filename, progress=None):
|
||||
"""Writes the contents of this file to the given .wav filename string.
|
||||
|
||||
Raises EncodingError if some error occurs during decoding."""
|
||||
|
||||
if (not hasattr(self, "__format__")):
|
||||
try:
|
||||
self.__populate_metadata__()
|
||||
except IOError, msg:
|
||||
raise EncodingError(str(msg))
|
||||
|
||||
if (self.__format__ is WaveAudio):
|
||||
try:
|
||||
f = open(wave_filename, 'wb')
|
||||
except IOError, msg:
|
||||
raise EncodingError(str(msg))
|
||||
for block in self.__blocks__:
|
||||
if (block is not None):
|
||||
f.write(block)
|
||||
else:
|
||||
try:
|
||||
total_frames = self.total_frames()
|
||||
current_frames = 0
|
||||
decoder = audiotools.decoders.SHNDecoder(self.filename)
|
||||
frame = decoder.read(4096)
|
||||
while (len(frame) > 0):
|
||||
f.write(frame.to_bytes(False, True))
|
||||
current_frames += frame.frames
|
||||
if (progress is not None):
|
||||
progress(current_frames, total_frames)
|
||||
frame = decoder.read(4096)
|
||||
except IOError, msg:
|
||||
raise EncodingError(str(msg))
|
||||
else:
|
||||
WaveAudio.from_pcm(wave_filename, to_pcm_progress(self, progress))
|
||||
|
||||
def to_aiff(self, aiff_filename, progress=None):
|
||||
"""Writes the contents of this file to the given .aiff filename string.
|
||||
|
||||
Raises EncodingError if some error occurs during decoding."""
|
||||
|
||||
if (not hasattr(self, "__format__")):
|
||||
try:
|
||||
self.__populate_metadata__()
|
||||
except IOError, msg:
|
||||
raise EncodingError(str(msg))
|
||||
|
||||
if (self.__format__ is AiffAudio):
|
||||
try:
|
||||
f = open(aiff_filename, 'wb')
|
||||
except IOError, msg:
|
||||
raise EncodingError(str(msg))
|
||||
for block in self.__blocks__:
|
||||
if (block is not None):
|
||||
f.write(block)
|
||||
else:
|
||||
try:
|
||||
total_frames = self.total_frames()
|
||||
current_frames = 0
|
||||
decoder = audiotools.decoders.SHNDecoder(self.filename)
|
||||
frame = decoder.read(4096)
|
||||
while (len(frame) > 0):
|
||||
f.write(frame.to_bytes(True, True))
|
||||
current_frames += frame.frames
|
||||
if (progress is not None):
|
||||
progress(current_frames, total_frames)
|
||||
frame = decoder.read(4096)
|
||||
except IOError, msg:
|
||||
raise EncodingError(str(msg))
|
||||
else:
|
||||
AiffAudio.from_pcm(aiff_filename, to_pcm_progress(self, progress))
|
||||
|
||||
@classmethod
|
||||
def from_wave(cls, filename, wave_filename, compression=None,
|
||||
block_size=256, progress=None):
|
||||
"""Encodes a new AudioFile from an existing .wav file.
|
||||
|
||||
Takes a filename string, wave_filename string
|
||||
of an existing WaveAudio file
|
||||
and an optional compression level string.
|
||||
Encodes a new audio file from the wave's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new ShortenAudio object."""
|
||||
|
||||
wave = WaveAudio(wave_filename)
|
||||
|
||||
if (wave.bits_per_sample() not in (8, 16)):
|
||||
raise UnsupportedBitsPerSample(filename, wave.bits_per_sample())
|
||||
|
||||
(head, tail) = wave.pcm_split()
|
||||
if (len(tail) > 0):
|
||||
blocks = [head, None, tail]
|
||||
else:
|
||||
blocks = [head, None]
|
||||
|
||||
import audiotools.encoders
|
||||
|
||||
try:
|
||||
audiotools.encoders.encode_shn(
|
||||
filename=filename,
|
||||
pcmreader=to_pcm_progress(wave, progress),
|
||||
block_size=block_size,
|
||||
file_type={8: 2,
|
||||
16: 5}[wave.bits_per_sample()],
|
||||
verbatim_chunks=blocks)
|
||||
|
||||
return cls(filename)
|
||||
except IOError, err:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(err))
|
||||
except Exception, err:
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
|
||||
@classmethod
|
||||
def from_aiff(cls, filename, aiff_filename, compression=None,
|
||||
block_size=256, progress=None):
|
||||
"""Encodes a new AudioFile from an existing .aiff file.
|
||||
|
||||
Takes a filename string, aiff_filename string
|
||||
of an existing WaveAudio file
|
||||
and an optional compression level string.
|
||||
Encodes a new audio file from the aiff's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new ShortenAudio object."""
|
||||
|
||||
aiff = AiffAudio(aiff_filename)
|
||||
|
||||
if (aiff.bits_per_sample() not in (8, 16)):
|
||||
raise UnsupportedBitsPerSample(filename, aiff.bits_per_sample())
|
||||
|
||||
(head, tail) = aiff.pcm_split()
|
||||
if (len(tail) > 0):
|
||||
blocks = [head, None, tail]
|
||||
else:
|
||||
blocks = [head, None]
|
||||
|
||||
import audiotools.encoders
|
||||
|
||||
try:
|
||||
audiotools.encoders.encode_shn(
|
||||
filename=filename,
|
||||
pcmreader=to_pcm_progress(aiff, progress),
|
||||
block_size=block_size,
|
||||
file_type={8: 1, # 8-bit AIFF seems to be signed
|
||||
16: 3}[aiff.bits_per_sample()],
|
||||
verbatim_chunks=blocks)
|
||||
|
||||
return cls(filename)
|
||||
except IOError, err:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(err))
|
||||
except Exception, err:
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
|
||||
def convert(self, target_path, target_class, compression=None,
|
||||
progress=None):
|
||||
"""Encodes a new AudioFile from existing AudioFile.
|
||||
|
||||
Take a filename string, target class and optional compression string.
|
||||
Encodes a new AudioFile in the target class and returns
|
||||
the resulting object.
|
||||
Metadata is not copied during conversion, but embedded
|
||||
RIFF chunks are (if any).
|
||||
May raise EncodingError if some problem occurs during encoding."""
|
||||
|
||||
#Note that a Shorten file cannot contain
|
||||
#both RIFF chunks and AIFF chunks at the same time.
|
||||
|
||||
import tempfile
|
||||
|
||||
if (target_class == WaveAudio):
|
||||
self.to_wave(target_path, progress=progress)
|
||||
return WaveAudio(target_path)
|
||||
elif (target_class == AiffAudio):
|
||||
self.to_aiff(target_path, progress=progress)
|
||||
return AiffAudio(target_path)
|
||||
elif (self.has_foreign_riff_chunks() and
|
||||
hasattr(target_class, "from_wave")):
|
||||
temp_wave = tempfile.NamedTemporaryFile(suffix=".wav")
|
||||
try:
|
||||
#we'll only log the second leg of conversion,
|
||||
#since that's likely to be the slower portion
|
||||
self.to_wave(temp_wave.name)
|
||||
return target_class.from_wave(target_path,
|
||||
temp_wave.name,
|
||||
compression,
|
||||
progress=progress)
|
||||
finally:
|
||||
temp_wave.close()
|
||||
elif (self.has_foreign_aiff_chunks() and
|
||||
hasattr(target_class, "from_aiff")):
|
||||
temp_aiff = tempfile.NamedTemporaryFile(suffix=".aiff")
|
||||
try:
|
||||
self.to_aiff(temp_aiff.name)
|
||||
return target_class.from_aiff(target_path,
|
||||
temp_aiff.name,
|
||||
compression,
|
||||
progress=progress)
|
||||
finally:
|
||||
temp_aiff.close()
|
||||
else:
|
||||
return target_class.from_pcm(target_path,
|
||||
to_pcm_progress(self, progress),
|
||||
compression)
|
||||
|
||||
def has_foreign_riff_chunks(self):
|
||||
"""Returns True if the audio file contains non-audio RIFF chunks.
|
||||
|
||||
During transcoding, if the source audio file has foreign RIFF chunks
|
||||
and the target audio format supports foreign RIFF chunks,
|
||||
conversion should be routed through .wav conversion
|
||||
to avoid losing those chunks."""
|
||||
|
||||
if (not hasattr(self, "__format__")):
|
||||
self.__populate_metadata__()
|
||||
|
||||
if (self.__format__ is WaveAudio):
|
||||
for (chunk_id, chunk_data) in self.__wave_chunks__():
|
||||
if (chunk_id != 'fmt '):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
|
||||
def has_foreign_aiff_chunks(self):
|
||||
"""Returns True if the audio file contains non-audio AIFF chunks.
|
||||
|
||||
During transcoding, if the source audio file has foreign AIFF chunks
|
||||
and the target audio format supports foreign AIFF chunks,
|
||||
conversion should be routed through .aiff conversion
|
||||
to avoid losing those chunks."""
|
||||
|
||||
if (not hasattr(self, "__format__")):
|
||||
self.__populate_metadata__()
|
||||
|
||||
if (self.__format__ is AiffAudio):
|
||||
for (chunk_id, chunk_data) in self.__aiff_chunks__():
|
||||
if ((chunk_id != 'COMM') and (chunk_id != 'SSND')):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
return False
|
268
Melodia/resources/audiotools/__speex__.py
Normal file
268
Melodia/resources/audiotools/__speex__.py
Normal file
@ -0,0 +1,268 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
from audiotools import (AudioFile, InvalidFile, PCMReader, PCMConverter,
|
||||
Con, transfer_data, transfer_framelist_data,
|
||||
subprocess, BIN, cStringIO, os, ignore_sigint,
|
||||
EncodingError, DecodingError, ChannelMask,
|
||||
__default_quality__)
|
||||
from __vorbis__ import *
|
||||
|
||||
#######################
|
||||
#Speex File
|
||||
#######################
|
||||
|
||||
|
||||
class InvalidSpeex(InvalidFile):
|
||||
pass
|
||||
|
||||
|
||||
class UnframedVorbisComment(VorbisComment):
|
||||
"""An implementation of VorbisComment without the framing bit."""
|
||||
|
||||
VORBIS_COMMENT = Con.Struct("vorbis_comment",
|
||||
Con.PascalString(
|
||||
"vendor_string",
|
||||
length_field=Con.ULInt32("length")),
|
||||
Con.PrefixedArray(
|
||||
length_field=Con.ULInt32("length"),
|
||||
subcon=Con.PascalString("value",
|
||||
length_field=Con.ULInt32("length"))))
|
||||
|
||||
|
||||
class SpeexAudio(VorbisAudio):
|
||||
"""An Ogg Speex audio file."""
|
||||
|
||||
SUFFIX = "spx"
|
||||
NAME = SUFFIX
|
||||
DEFAULT_COMPRESSION = "8"
|
||||
COMPRESSION_MODES = tuple([str(i) for i in range(0, 11)])
|
||||
COMPRESSION_DESCRIPTIONS = {"0":
|
||||
_(u"corresponds to speexenc --quality 0"),
|
||||
"10":
|
||||
_(u"corresponds to speexenc --quality 10")}
|
||||
BINARIES = ("speexenc", "speexdec")
|
||||
REPLAYGAIN_BINARIES = tuple()
|
||||
|
||||
SPEEX_HEADER = Con.Struct('speex_header',
|
||||
Con.String('speex_string', 8),
|
||||
Con.String('speex_version', 20),
|
||||
Con.ULInt32('speex_version_id'),
|
||||
Con.ULInt32('header_size'),
|
||||
Con.ULInt32('sampling_rate'),
|
||||
Con.ULInt32('mode'),
|
||||
Con.ULInt32('mode_bitstream_version'),
|
||||
Con.ULInt32('channels'),
|
||||
Con.ULInt32('bitrate'),
|
||||
Con.ULInt32('frame_size'),
|
||||
Con.ULInt32('vbr'),
|
||||
Con.ULInt32('frame_per_packet'),
|
||||
Con.ULInt32('extra_headers'),
|
||||
Con.ULInt32('reserved1'),
|
||||
Con.ULInt32('reserved2'))
|
||||
|
||||
def __init__(self, filename):
|
||||
"""filename is a plain string."""
|
||||
|
||||
AudioFile.__init__(self, filename)
|
||||
try:
|
||||
self.__read_metadata__()
|
||||
except IOError, msg:
|
||||
raise InvalidSpeex(str(msg))
|
||||
|
||||
@classmethod
|
||||
def is_type(cls, file):
|
||||
"""Returns True if the given file object describes this format.
|
||||
|
||||
Takes a seekable file pointer rewound to the start of the file."""
|
||||
|
||||
header = file.read(0x23)
|
||||
|
||||
return (header.startswith('OggS') and
|
||||
header[0x1C:0x23] == 'Speex ')
|
||||
|
||||
def __read_metadata__(self):
|
||||
f = OggStreamReader(file(self.filename, "rb"))
|
||||
packets = f.packets()
|
||||
try:
|
||||
#first read the Header packet
|
||||
try:
|
||||
header = SpeexAudio.SPEEX_HEADER.parse(packets.next())
|
||||
except StopIteration:
|
||||
raise InvalidSpeex(_(u"Header packet not found"))
|
||||
|
||||
self.__sample_rate__ = header.sampling_rate
|
||||
self.__channels__ = header.channels
|
||||
|
||||
#the read the Comment packet
|
||||
comment_packet = packets.next()
|
||||
|
||||
self.comment = UnframedVorbisComment.VORBIS_COMMENT.parse(
|
||||
comment_packet)
|
||||
finally:
|
||||
del(packets)
|
||||
f.close()
|
||||
del(f)
|
||||
|
||||
def to_pcm(self):
|
||||
"""Returns a PCMReader object containing the track's PCM data."""
|
||||
|
||||
devnull = file(os.devnull, 'ab')
|
||||
sub = subprocess.Popen([BIN['speexdec'], self.filename, '-'],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=devnull)
|
||||
return PCMReader(
|
||||
sub.stdout,
|
||||
sample_rate=self.sample_rate(),
|
||||
channels=self.channels(),
|
||||
channel_mask=int(ChannelMask.from_channels(self.channels())),
|
||||
bits_per_sample=self.bits_per_sample(),
|
||||
process=sub)
|
||||
|
||||
@classmethod
|
||||
def from_pcm(cls, filename, pcmreader, compression=None):
|
||||
"""Encodes a new file from PCM data.
|
||||
|
||||
Takes a filename string, PCMReader object
|
||||
and optional compression level string.
|
||||
Encodes a new audio file from pcmreader's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new SpeexAudio object."""
|
||||
|
||||
import bisect
|
||||
|
||||
if ((compression is None) or
|
||||
(compression not in cls.COMPRESSION_MODES)):
|
||||
compression = __default_quality__(cls.NAME)
|
||||
|
||||
if ((pcmreader.bits_per_sample not in (8, 16)) or
|
||||
(pcmreader.channels > 2) or
|
||||
(pcmreader.sample_rate not in (8000, 16000, 32000, 44100))):
|
||||
pcmreader = PCMConverter(
|
||||
pcmreader,
|
||||
sample_rate=[8000, 8000, 16000, 32000, 44100][bisect.bisect(
|
||||
[8000, 16000, 32000, 44100], pcmreader.sample_rate)],
|
||||
channels=min(pcmreader.channels, 2),
|
||||
channel_mask=ChannelMask.from_channels(
|
||||
min(pcmreader.channels, 2)),
|
||||
bits_per_sample=min(pcmreader.bits_per_sample, 16))
|
||||
|
||||
BITS_PER_SAMPLE = {8: ['--8bit'],
|
||||
16: ['--16bit']}[pcmreader.bits_per_sample]
|
||||
|
||||
CHANNELS = {1: [], 2: ['--stereo']}[pcmreader.channels]
|
||||
|
||||
devnull = file(os.devnull, "ab")
|
||||
|
||||
sub = subprocess.Popen([BIN['speexenc'],
|
||||
'--quality', str(compression),
|
||||
'--rate', str(pcmreader.sample_rate),
|
||||
'--le'] + \
|
||||
BITS_PER_SAMPLE + \
|
||||
CHANNELS + \
|
||||
['-', filename],
|
||||
stdin=subprocess.PIPE,
|
||||
stderr=devnull,
|
||||
preexec_fn=ignore_sigint)
|
||||
|
||||
try:
|
||||
transfer_framelist_data(pcmreader, sub.stdin.write)
|
||||
except (IOError, ValueError), err:
|
||||
sub.stdin.close()
|
||||
sub.wait()
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(err))
|
||||
except Exception, err:
|
||||
sub.stdin.close()
|
||||
sub.wait()
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
|
||||
try:
|
||||
pcmreader.close()
|
||||
except DecodingError, err:
|
||||
raise EncodingError(err.error_message)
|
||||
sub.stdin.close()
|
||||
result = sub.wait()
|
||||
devnull.close()
|
||||
|
||||
if (result == 0):
|
||||
return SpeexAudio(filename)
|
||||
else:
|
||||
raise EncodingError(u"unable to encode file with speexenc")
|
||||
|
||||
def set_metadata(self, metadata):
|
||||
"""Takes a MetaData object and sets this track's metadata.
|
||||
|
||||
This metadata includes track name, album name, and so on.
|
||||
Raises IOError if unable to write the file."""
|
||||
|
||||
comment = VorbisComment.converted(metadata)
|
||||
|
||||
if (comment is None):
|
||||
return
|
||||
|
||||
reader = OggStreamReader(file(self.filename, 'rb'))
|
||||
new_file = cStringIO.StringIO()
|
||||
writer = OggStreamWriter(new_file)
|
||||
|
||||
pages = reader.pages()
|
||||
|
||||
#transfer our old header
|
||||
(header_page, header_data) = pages.next()
|
||||
writer.write_page(header_page, header_data)
|
||||
|
||||
#skip the existing comment packet
|
||||
(page, data) = pages.next()
|
||||
while (page.segment_lengths[-1] == 255):
|
||||
(page, data) = pages.next()
|
||||
|
||||
#write the pages for our new comment packet
|
||||
comment_pages = OggStreamWriter.build_pages(
|
||||
0,
|
||||
header_page.bitstream_serial_number,
|
||||
header_page.page_sequence_number + 1,
|
||||
comment.build())
|
||||
|
||||
for (page, data) in comment_pages:
|
||||
writer.write_page(page, data)
|
||||
|
||||
#write the rest of the pages, re-sequenced and re-checksummed
|
||||
sequence_number = comment_pages[-1][0].page_sequence_number + 1
|
||||
for (i, (page, data)) in enumerate(pages):
|
||||
page.page_sequence_number = i + sequence_number
|
||||
page.checksum = OggStreamReader.calculate_ogg_checksum(page, data)
|
||||
writer.write_page(page, data)
|
||||
|
||||
reader.close()
|
||||
|
||||
#re-write the file with our new data in "new_file"
|
||||
f = file(self.filename, "wb")
|
||||
f.write(new_file.getvalue())
|
||||
f.close()
|
||||
writer.close()
|
||||
|
||||
self.__read_metadata__()
|
||||
|
||||
@classmethod
|
||||
def can_add_replay_gain(cls):
|
||||
"""Returns False."""
|
||||
|
||||
return False
|
842
Melodia/resources/audiotools/__vorbis__.py
Normal file
842
Melodia/resources/audiotools/__vorbis__.py
Normal file
@ -0,0 +1,842 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
from audiotools import (AudioFile, InvalidFile, PCMReader,
|
||||
ReorderedPCMReader, Con, transfer_data,
|
||||
transfer_framelist_data, subprocess, BIN,
|
||||
cStringIO, open_files, os, ReplayGain,
|
||||
ignore_sigint, EncodingError, DecodingError,
|
||||
ChannelMask, UnsupportedChannelMask,
|
||||
__default_quality__)
|
||||
from __vorbiscomment__ import *
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
|
||||
class InvalidVorbis(InvalidFile):
|
||||
pass
|
||||
|
||||
|
||||
def verify_ogg_stream(stream):
|
||||
"""Verifies an Ogg stream file object.
|
||||
|
||||
This file must be rewound to the start of a page.
|
||||
Returns True if the file is valid.
|
||||
Raises IOError or ValueError if there is some problem with the file.
|
||||
"""
|
||||
|
||||
from . import verify
|
||||
verify.ogg(stream)
|
||||
return True
|
||||
|
||||
|
||||
class OggStreamReader:
|
||||
"""A class for walking through an Ogg stream."""
|
||||
|
||||
OGGS = Con.Struct(
|
||||
"oggs",
|
||||
Con.Const(Con.String("magic_number", 4), "OggS"),
|
||||
Con.Byte("version"),
|
||||
Con.Byte("header_type"),
|
||||
Con.SLInt64("granule_position"),
|
||||
Con.ULInt32("bitstream_serial_number"),
|
||||
Con.ULInt32("page_sequence_number"),
|
||||
Con.ULInt32("checksum"),
|
||||
Con.Byte("segments"),
|
||||
Con.MetaRepeater(lambda ctx: ctx["segments"],
|
||||
Con.Byte("segment_lengths")))
|
||||
|
||||
def __init__(self, stream):
|
||||
"""stream is a file-like object with read() and close() methods."""
|
||||
|
||||
self.stream = stream
|
||||
|
||||
def close(self):
|
||||
"""Closes the sub-stream."""
|
||||
|
||||
self.stream.close()
|
||||
|
||||
def packets(self, from_beginning=True):
|
||||
"""Yields one fully reassembled Ogg packet per pass.
|
||||
|
||||
Packets are returned as binary strings."""
|
||||
|
||||
if (from_beginning):
|
||||
self.stream.seek(0, 0)
|
||||
|
||||
segment = cStringIO.StringIO()
|
||||
|
||||
while (True):
|
||||
try:
|
||||
page = OggStreamReader.OGGS.parse_stream(self.stream)
|
||||
|
||||
for length in page.segment_lengths:
|
||||
if (length == 255):
|
||||
segment.write(self.stream.read(length))
|
||||
else:
|
||||
segment.write(self.stream.read(length))
|
||||
yield segment.getvalue()
|
||||
segment = cStringIO.StringIO()
|
||||
|
||||
except Con.core.FieldError:
|
||||
break
|
||||
except Con.ConstError:
|
||||
break
|
||||
|
||||
def pages(self, from_beginning=True):
|
||||
"""Yields a (Container,string) tuple per pass.
|
||||
|
||||
Container is parsed from OggStreamReader.OGGS.
|
||||
string is a binary string of combined segments
|
||||
(which may not be a complete packet)."""
|
||||
|
||||
if (from_beginning):
|
||||
self.stream.seek(0, 0)
|
||||
|
||||
while (True):
|
||||
try:
|
||||
page = OggStreamReader.OGGS.parse_stream(self.stream)
|
||||
yield (page, self.stream.read(sum(page.segment_lengths)))
|
||||
except Con.core.FieldError:
|
||||
break
|
||||
except Con.ConstError:
|
||||
break
|
||||
|
||||
@classmethod
|
||||
def pages_to_packet(cls, pages_iter):
|
||||
"""Returns a complete packet as a list of (Container,string) tuples.
|
||||
|
||||
pages_iter should be an iterator of (Container,string) tuples
|
||||
as returned from the pages() method.
|
||||
"""
|
||||
|
||||
packet = [pages_iter.next()]
|
||||
while (packet[-1][0].segment_lengths[-1] == 255):
|
||||
packet.append(pages_iter.next())
|
||||
return packet
|
||||
|
||||
CRC_LOOKUP = (0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9,
|
||||
0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005,
|
||||
0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
|
||||
0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd,
|
||||
0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9,
|
||||
0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
|
||||
0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011,
|
||||
0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd,
|
||||
0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
|
||||
0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5,
|
||||
0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81,
|
||||
0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
|
||||
0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49,
|
||||
0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95,
|
||||
0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
|
||||
0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d,
|
||||
0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae,
|
||||
0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
|
||||
0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16,
|
||||
0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca,
|
||||
0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
|
||||
0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02,
|
||||
0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066,
|
||||
0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
|
||||
0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e,
|
||||
0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692,
|
||||
0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
|
||||
0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a,
|
||||
0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e,
|
||||
0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
|
||||
0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686,
|
||||
0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a,
|
||||
0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
|
||||
0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb,
|
||||
0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f,
|
||||
0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
|
||||
0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47,
|
||||
0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b,
|
||||
0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
|
||||
0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623,
|
||||
0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7,
|
||||
0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
|
||||
0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f,
|
||||
0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3,
|
||||
0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
|
||||
0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b,
|
||||
0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f,
|
||||
0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
|
||||
0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640,
|
||||
0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c,
|
||||
0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
|
||||
0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24,
|
||||
0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30,
|
||||
0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
|
||||
0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088,
|
||||
0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654,
|
||||
0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
|
||||
0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c,
|
||||
0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18,
|
||||
0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
|
||||
0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0,
|
||||
0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c,
|
||||
0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
|
||||
0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4)
|
||||
|
||||
@classmethod
|
||||
def calculate_ogg_checksum(cls, page_header, page_data):
|
||||
"""Calculates an Ogg checksum integer.
|
||||
|
||||
page_header is a Container object parsed through OGGS.
|
||||
page_data is a string of data contained by the page.
|
||||
"""
|
||||
|
||||
old_checksum = page_header.checksum
|
||||
try:
|
||||
page_header.checksum = 0
|
||||
sum = 0
|
||||
for c in cls.OGGS.build(page_header) + page_data:
|
||||
sum = ((sum << 8) ^ \
|
||||
cls.CRC_LOOKUP[((sum >> 24) & 0xFF) ^ ord(c)]) \
|
||||
& 0xFFFFFFFF
|
||||
return sum
|
||||
finally:
|
||||
page_header.checksum = old_checksum
|
||||
|
||||
|
||||
class OggStreamWriter:
|
||||
"""A class for building an Ogg stream."""
|
||||
|
||||
def __init__(self, stream):
|
||||
"""stream is a file-like object with read() and close() methods."""
|
||||
|
||||
self.stream = stream
|
||||
|
||||
def close(self):
|
||||
"""Closes the sub-stream."""
|
||||
|
||||
self.stream.close()
|
||||
|
||||
def write_page(self, page_header, page_data):
|
||||
"""Writes a complete Ogg page to the stream.
|
||||
|
||||
page_header is an OGGS-generated Container with all of the
|
||||
fields properly set.
|
||||
page_data is a string containing all of the page's segment data.
|
||||
"""
|
||||
|
||||
self.stream.write(OggStreamReader.OGGS.build(page_header))
|
||||
self.stream.write(page_data)
|
||||
|
||||
@classmethod
|
||||
def build_pages(cls, granule_position, serial_number,
|
||||
starting_sequence_number, packet_data,
|
||||
header_type=0):
|
||||
"""Constructs an Ogg packet for page data.
|
||||
|
||||
takes serial_number, granule_position and starting_sequence_number
|
||||
integers and a packet_data string.
|
||||
Returns a list of (page_header,page_data) tuples containing
|
||||
all of the Ogg pages necessary to contain the packet.
|
||||
"""
|
||||
|
||||
page = Con.Container(magic_number='OggS',
|
||||
version=0,
|
||||
header_type=header_type,
|
||||
granule_position=granule_position,
|
||||
bitstream_serial_number=serial_number,
|
||||
page_sequence_number=starting_sequence_number,
|
||||
checksum=0)
|
||||
|
||||
if (len(packet_data) == 0):
|
||||
#an empty Ogg page, but possibly a continuation
|
||||
|
||||
page.segments = 0
|
||||
page.segment_lengths = []
|
||||
page.checksum = OggStreamReader.calculate_ogg_checksum(
|
||||
page, packet_data)
|
||||
return [(page, "")]
|
||||
if (len(packet_data) > (255 * 255)):
|
||||
#if we need more than one Ogg page to store the packet,
|
||||
#handle that case recursively
|
||||
|
||||
page.segments = 255
|
||||
page.segment_lengths = [255] * 255
|
||||
page.checksum = OggStreamReader.calculate_ogg_checksum(
|
||||
page, packet_data[0:255 * 255])
|
||||
|
||||
return [(page, packet_data[0:255 * 255])] + \
|
||||
cls.build_pages(granule_position,
|
||||
serial_number,
|
||||
starting_sequence_number + 1,
|
||||
packet_data[255 * 255:],
|
||||
header_type)
|
||||
elif (len(packet_data) == (255 * 255)):
|
||||
#we need two Ogg pages, one of which is empty
|
||||
|
||||
return cls.build_pages(granule_position,
|
||||
serial_number,
|
||||
starting_sequence_number,
|
||||
packet_data,
|
||||
header_type) + \
|
||||
cls.build_pages(granule_position,
|
||||
serial_number,
|
||||
starting_sequence_number + 1,
|
||||
"",
|
||||
header_type)
|
||||
else:
|
||||
#we just need one Ogg page
|
||||
|
||||
page.segments = len(packet_data) / 255
|
||||
if ((len(packet_data) % 255) > 0):
|
||||
page.segments += 1
|
||||
|
||||
page.segment_lengths = [255] * (len(packet_data) / 255)
|
||||
if ((len(packet_data) % 255) > 0):
|
||||
page.segment_lengths += [len(packet_data) % 255]
|
||||
|
||||
page.checksum = OggStreamReader.calculate_ogg_checksum(
|
||||
page, packet_data)
|
||||
return [(page, packet_data)]
|
||||
|
||||
|
||||
#######################
|
||||
#Vorbis File
|
||||
#######################
|
||||
|
||||
class VorbisAudio(AudioFile):
|
||||
"""An Ogg Vorbis file."""
|
||||
|
||||
SUFFIX = "ogg"
|
||||
NAME = SUFFIX
|
||||
DEFAULT_COMPRESSION = "3"
|
||||
COMPRESSION_MODES = tuple([str(i) for i in range(0, 11)])
|
||||
COMPRESSION_DESCRIPTIONS = {"0": _(u"very low quality, " +
|
||||
u"corresponds to oggenc -q 0"),
|
||||
"10": _(u"very high quality, " +
|
||||
u"corresponds to oggenc -q 10")}
|
||||
BINARIES = ("oggenc", "oggdec")
|
||||
REPLAYGAIN_BINARIES = ("vorbisgain", )
|
||||
|
||||
OGG_IDENTIFICATION = Con.Struct(
|
||||
"ogg_id",
|
||||
Con.ULInt32("vorbis_version"),
|
||||
Con.Byte("channels"),
|
||||
Con.ULInt32("sample_rate"),
|
||||
Con.ULInt32("bitrate_maximum"),
|
||||
Con.ULInt32("bitrate_nominal"),
|
||||
Con.ULInt32("bitrate_minimum"),
|
||||
Con.Embed(Con.BitStruct("flags",
|
||||
Con.Bits("blocksize_0", 4),
|
||||
Con.Bits("blocksize_1", 4))),
|
||||
Con.Byte("framing"))
|
||||
|
||||
COMMENT_HEADER = Con.Struct(
|
||||
"comment_header",
|
||||
Con.Byte("packet_type"),
|
||||
Con.String("vorbis", 6))
|
||||
|
||||
def __init__(self, filename):
|
||||
"""filename is a plain string."""
|
||||
|
||||
AudioFile.__init__(self, filename)
|
||||
try:
|
||||
self.__read_metadata__()
|
||||
except IOError, msg:
|
||||
raise InvalidVorbis(str(msg))
|
||||
|
||||
@classmethod
|
||||
def is_type(cls, file):
|
||||
"""Returns True if the given file object describes this format.
|
||||
|
||||
Takes a seekable file pointer rewound to the start of the file."""
|
||||
|
||||
header = file.read(0x23)
|
||||
|
||||
return (header.startswith('OggS') and
|
||||
header[0x1C:0x23] == '\x01vorbis')
|
||||
|
||||
def __read_metadata__(self):
|
||||
f = OggStreamReader(file(self.filename, "rb"))
|
||||
packets = f.packets()
|
||||
|
||||
try:
|
||||
#we'll assume this Vorbis file isn't interleaved
|
||||
#with any other Ogg stream
|
||||
|
||||
#the Identification packet comes first
|
||||
try:
|
||||
id_packet = packets.next()
|
||||
except StopIteration:
|
||||
raise InvalidVorbis("Vorbis identification packet not found")
|
||||
|
||||
header = VorbisAudio.COMMENT_HEADER.parse(
|
||||
id_packet[0:VorbisAudio.COMMENT_HEADER.sizeof()])
|
||||
if ((header.packet_type == 0x01) and
|
||||
(header.vorbis == 'vorbis')):
|
||||
identification = VorbisAudio.OGG_IDENTIFICATION.parse(
|
||||
id_packet[VorbisAudio.COMMENT_HEADER.sizeof():])
|
||||
self.__sample_rate__ = identification.sample_rate
|
||||
self.__channels__ = identification.channels
|
||||
else:
|
||||
raise InvalidVorbis(_(u'First packet is not Vorbis'))
|
||||
|
||||
#the Comment packet comes next
|
||||
comment_packet = packets.next()
|
||||
header = VorbisAudio.COMMENT_HEADER.parse(
|
||||
comment_packet[0:VorbisAudio.COMMENT_HEADER.sizeof()])
|
||||
if ((header.packet_type == 0x03) and
|
||||
(header.vorbis == 'vorbis')):
|
||||
self.comment = VorbisComment.VORBIS_COMMENT.parse(
|
||||
comment_packet[VorbisAudio.COMMENT_HEADER.sizeof():])
|
||||
|
||||
finally:
|
||||
del(packets)
|
||||
f.close()
|
||||
del(f)
|
||||
|
||||
def lossless(self):
|
||||
"""Returns False."""
|
||||
|
||||
return False
|
||||
|
||||
def bits_per_sample(self):
|
||||
"""Returns an integer number of bits-per-sample this track contains."""
|
||||
|
||||
return 16
|
||||
|
||||
def channels(self):
|
||||
"""Returns an integer number of channels this track contains."""
|
||||
|
||||
return self.__channels__
|
||||
|
||||
def channel_mask(self):
|
||||
"""Returns a ChannelMask object of this track's channel layout."""
|
||||
|
||||
if (self.channels() == 1):
|
||||
return ChannelMask.from_fields(
|
||||
front_center=True)
|
||||
elif (self.channels() == 2):
|
||||
return ChannelMask.from_fields(
|
||||
front_left=True, front_right=True)
|
||||
elif (self.channels() == 3):
|
||||
return ChannelMask.from_fields(
|
||||
front_left=True, front_right=True,
|
||||
front_center=True)
|
||||
elif (self.channels() == 4):
|
||||
return ChannelMask.from_fields(
|
||||
front_left=True, front_right=True,
|
||||
back_left=True, back_right=True)
|
||||
elif (self.channels() == 5):
|
||||
return ChannelMask.from_fields(
|
||||
front_left=True, front_right=True,
|
||||
front_center=True,
|
||||
back_left=True, back_right=True)
|
||||
elif (self.channels() == 6):
|
||||
return ChannelMask.from_fields(
|
||||
front_left=True, front_right=True,
|
||||
front_center=True,
|
||||
back_left=True, back_right=True,
|
||||
low_frequency=True)
|
||||
elif (self.channels() == 7):
|
||||
return ChannelMask.from_fields(
|
||||
front_left=True, front_right=True,
|
||||
front_center=True,
|
||||
side_left=True, side_right=True,
|
||||
back_center=True, low_frequency=True)
|
||||
elif (self.channels() == 8):
|
||||
return ChannelMask.from_fields(
|
||||
front_left=True, front_right=True,
|
||||
side_left=True, side_right=True,
|
||||
back_left=True, back_right=True,
|
||||
front_center=True, low_frequency=True)
|
||||
else:
|
||||
return ChannelMask(0)
|
||||
|
||||
def total_frames(self):
|
||||
"""Returns the total PCM frames of the track as an integer."""
|
||||
|
||||
pcm_samples = 0
|
||||
f = file(self.filename, "rb")
|
||||
try:
|
||||
while (True):
|
||||
try:
|
||||
page = OggStreamReader.OGGS.parse_stream(f)
|
||||
pcm_samples = page.granule_position
|
||||
f.seek(sum(page.segment_lengths), 1)
|
||||
except Con.core.FieldError:
|
||||
break
|
||||
except Con.ConstError:
|
||||
break
|
||||
|
||||
return pcm_samples
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def sample_rate(self):
|
||||
"""Returns the rate of the track's audio as an integer number of Hz."""
|
||||
|
||||
return self.__sample_rate__
|
||||
|
||||
def to_pcm(self):
|
||||
"""Returns a PCMReader object containing the track's PCM data."""
|
||||
|
||||
sub = subprocess.Popen([BIN['oggdec'], '-Q',
|
||||
'-b', str(16),
|
||||
'-e', str(0),
|
||||
'-s', str(1),
|
||||
'-R',
|
||||
'-o', '-',
|
||||
self.filename],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=file(os.devnull, "a"))
|
||||
|
||||
pcmreader = PCMReader(sub.stdout,
|
||||
sample_rate=self.sample_rate(),
|
||||
channels=self.channels(),
|
||||
channel_mask=int(self.channel_mask()),
|
||||
bits_per_sample=self.bits_per_sample(),
|
||||
process=sub)
|
||||
|
||||
if (self.channels() <= 2):
|
||||
return pcmreader
|
||||
elif (self.channels() <= 8):
|
||||
#these mappings transform Vorbis order into ChannelMask order
|
||||
standard_channel_mask = self.channel_mask()
|
||||
vorbis_channel_mask = VorbisChannelMask(self.channel_mask())
|
||||
return ReorderedPCMReader(
|
||||
pcmreader,
|
||||
[vorbis_channel_mask.channels().index(channel) for channel in
|
||||
standard_channel_mask.channels()])
|
||||
else:
|
||||
return pcmreader
|
||||
|
||||
@classmethod
|
||||
def from_pcm(cls, filename, pcmreader, compression=None):
|
||||
"""Returns a PCMReader object containing the track's PCM data."""
|
||||
|
||||
if ((compression is None) or
|
||||
(compression not in cls.COMPRESSION_MODES)):
|
||||
compression = __default_quality__(cls.NAME)
|
||||
|
||||
devnull = file(os.devnull, 'ab')
|
||||
|
||||
sub = subprocess.Popen([BIN['oggenc'], '-Q',
|
||||
'-r',
|
||||
'-B', str(pcmreader.bits_per_sample),
|
||||
'-C', str(pcmreader.channels),
|
||||
'-R', str(pcmreader.sample_rate),
|
||||
'--raw-endianness', str(0),
|
||||
'-q', compression,
|
||||
'-o', filename, '-'],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=devnull,
|
||||
stderr=devnull,
|
||||
preexec_fn=ignore_sigint)
|
||||
|
||||
if ((pcmreader.channels <= 2) or (int(pcmreader.channel_mask) == 0)):
|
||||
try:
|
||||
transfer_framelist_data(pcmreader, sub.stdin.write)
|
||||
except (IOError, ValueError), err:
|
||||
sub.stdin.close()
|
||||
sub.wait()
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(err))
|
||||
except Exception, err:
|
||||
sub.stdin.close()
|
||||
sub.wait()
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
|
||||
elif (pcmreader.channels <= 8):
|
||||
if (int(pcmreader.channel_mask) in
|
||||
(0x7, # FR, FC, FL
|
||||
0x33, # FR, FL, BR, BL
|
||||
0x37, # FR, FC, FL, BL, BR
|
||||
0x3f, # FR, FC, FL, BL, BR, LFE
|
||||
0x70f, # FL, FC, FR, SL, SR, BC, LFE
|
||||
0x63f)): # FL, FC, FR, SL, SR, BL, BR, LFE
|
||||
|
||||
standard_channel_mask = ChannelMask(pcmreader.channel_mask)
|
||||
vorbis_channel_mask = VorbisChannelMask(standard_channel_mask)
|
||||
else:
|
||||
raise UnsupportedChannelMask(filename,
|
||||
int(pcmreader.channel_mask))
|
||||
|
||||
try:
|
||||
transfer_framelist_data(ReorderedPCMReader(
|
||||
pcmreader,
|
||||
[standard_channel_mask.channels().index(channel)
|
||||
for channel in vorbis_channel_mask.channels()]),
|
||||
sub.stdin.write)
|
||||
except (IOError, ValueError), err:
|
||||
sub.stdin.close()
|
||||
sub.wait()
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(err))
|
||||
except Exception, err:
|
||||
sub.stdin.close()
|
||||
sub.wait()
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
|
||||
else:
|
||||
raise UnsupportedChannelMask(filename,
|
||||
int(pcmreader.channel_mask))
|
||||
|
||||
try:
|
||||
pcmreader.close()
|
||||
except DecodingError, err:
|
||||
raise EncodingError(err.error_message)
|
||||
|
||||
sub.stdin.close()
|
||||
devnull.close()
|
||||
|
||||
if (sub.wait() == 0):
|
||||
return VorbisAudio(filename)
|
||||
else:
|
||||
raise EncodingError(u"unable to encode file with oggenc")
|
||||
|
||||
def set_metadata(self, metadata):
|
||||
"""Takes a MetaData object and sets this track's metadata.
|
||||
|
||||
This metadata includes track name, album name, and so on.
|
||||
Raises IOError if unable to write the file."""
|
||||
|
||||
metadata = VorbisComment.converted(metadata)
|
||||
|
||||
if (metadata is None):
|
||||
return
|
||||
|
||||
reader = OggStreamReader(file(self.filename, 'rb'))
|
||||
new_file = cStringIO.StringIO()
|
||||
writer = OggStreamWriter(new_file)
|
||||
current_sequence_number = 0
|
||||
|
||||
pages = reader.pages()
|
||||
|
||||
#transfer our old header
|
||||
#this must always be the first packet and the first page
|
||||
(header_page, header_data) = pages.next()
|
||||
writer.write_page(header_page, header_data)
|
||||
current_sequence_number += 1
|
||||
|
||||
#grab the current "comment" and "setup headers" packets
|
||||
#these may take one or more pages,
|
||||
#but will always end on a page boundary
|
||||
del(pages)
|
||||
packets = reader.packets(from_beginning=False)
|
||||
|
||||
comment_packet = packets.next()
|
||||
headers_packet = packets.next()
|
||||
|
||||
#write the pages for our new "comment" packet
|
||||
for (page, data) in OggStreamWriter.build_pages(
|
||||
0,
|
||||
header_page.bitstream_serial_number,
|
||||
current_sequence_number,
|
||||
VorbisAudio.COMMENT_HEADER.build(Con.Container(
|
||||
packet_type=3,
|
||||
vorbis='vorbis')) + metadata.build()):
|
||||
writer.write_page(page, data)
|
||||
current_sequence_number += 1
|
||||
|
||||
#write the pages for the old "setup headers" packet
|
||||
for (page, data) in OggStreamWriter.build_pages(
|
||||
0,
|
||||
header_page.bitstream_serial_number,
|
||||
current_sequence_number,
|
||||
headers_packet):
|
||||
writer.write_page(page, data)
|
||||
current_sequence_number += 1
|
||||
|
||||
#write the rest of the pages, re-sequenced and re-checksummed
|
||||
del(packets)
|
||||
pages = reader.pages(from_beginning=False)
|
||||
|
||||
for (i, (page, data)) in enumerate(pages):
|
||||
page.page_sequence_number = i + current_sequence_number
|
||||
page.checksum = OggStreamReader.calculate_ogg_checksum(page, data)
|
||||
writer.write_page(page, data)
|
||||
|
||||
reader.close()
|
||||
|
||||
#re-write the file with our new data in "new_file"
|
||||
f = file(self.filename, "wb")
|
||||
f.write(new_file.getvalue())
|
||||
f.close()
|
||||
writer.close()
|
||||
|
||||
self.__read_metadata__()
|
||||
|
||||
def get_metadata(self):
|
||||
"""Returns a MetaData object, or None.
|
||||
|
||||
Raises IOError if unable to read the file."""
|
||||
|
||||
self.__read_metadata__()
|
||||
data = {}
|
||||
for pair in self.comment.value:
|
||||
try:
|
||||
(key, value) = pair.split('=', 1)
|
||||
data.setdefault(key, []).append(value.decode('utf-8'))
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
return VorbisComment(data)
|
||||
|
||||
def delete_metadata(self):
|
||||
"""Deletes the track's MetaData.
|
||||
|
||||
This removes or unsets tags as necessary in order to remove all data.
|
||||
Raises IOError if unable to write the file."""
|
||||
|
||||
self.set_metadata(MetaData())
|
||||
|
||||
@classmethod
|
||||
def add_replay_gain(cls, filenames, progress=None):
|
||||
"""Adds ReplayGain values to a list of filename strings.
|
||||
|
||||
All the filenames must be of this AudioFile type.
|
||||
Raises ValueError if some problem occurs during ReplayGain application.
|
||||
"""
|
||||
|
||||
track_names = [track.filename for track in
|
||||
open_files(filenames) if
|
||||
isinstance(track, cls)]
|
||||
|
||||
if (progress is not None):
|
||||
progress(0, 1)
|
||||
|
||||
if ((len(track_names) > 0) and
|
||||
BIN.can_execute(BIN['vorbisgain'])):
|
||||
devnull = file(os.devnull, 'ab')
|
||||
|
||||
sub = subprocess.Popen([BIN['vorbisgain'],
|
||||
'-q', '-a'] + track_names,
|
||||
stdout=devnull,
|
||||
stderr=devnull)
|
||||
sub.wait()
|
||||
devnull.close()
|
||||
|
||||
if (progress is not None):
|
||||
progress(1, 1)
|
||||
|
||||
@classmethod
|
||||
def can_add_replay_gain(cls):
|
||||
"""Returns True if we have the necessary binaries to add ReplayGain."""
|
||||
|
||||
return BIN.can_execute(BIN['vorbisgain'])
|
||||
|
||||
@classmethod
|
||||
def lossless_replay_gain(cls):
|
||||
"""Returns True."""
|
||||
|
||||
return True
|
||||
|
||||
def replay_gain(self):
|
||||
"""Returns a ReplayGain object of our ReplayGain values.
|
||||
|
||||
Returns None if we have no values."""
|
||||
|
||||
vorbis_metadata = self.get_metadata()
|
||||
|
||||
if (set(['REPLAYGAIN_TRACK_PEAK', 'REPLAYGAIN_TRACK_GAIN',
|
||||
'REPLAYGAIN_ALBUM_PEAK', 'REPLAYGAIN_ALBUM_GAIN']).issubset(
|
||||
vorbis_metadata.keys())): # we have ReplayGain data
|
||||
try:
|
||||
return ReplayGain(
|
||||
vorbis_metadata['REPLAYGAIN_TRACK_GAIN'][0][0:-len(" dB")],
|
||||
vorbis_metadata['REPLAYGAIN_TRACK_PEAK'][0],
|
||||
vorbis_metadata['REPLAYGAIN_ALBUM_GAIN'][0][0:-len(" dB")],
|
||||
vorbis_metadata['REPLAYGAIN_ALBUM_PEAK'][0])
|
||||
except ValueError:
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
def verify(self, progress=None):
|
||||
"""Verifies the current file for correctness.
|
||||
|
||||
Returns True if the file is okay.
|
||||
Raises an InvalidFile with an error message if there is
|
||||
some problem with the file."""
|
||||
|
||||
#Ogg stream verification is likely to be so fast
|
||||
#that individual calls to progress() are
|
||||
#a waste of time.
|
||||
if (progress is not None):
|
||||
progress(0, 1)
|
||||
|
||||
try:
|
||||
f = open(self.filename, 'rb')
|
||||
except IOError, err:
|
||||
raise InvalidVorbis(str(err))
|
||||
try:
|
||||
try:
|
||||
result = verify_ogg_stream(f)
|
||||
if (progress is not None):
|
||||
progress(1, 1)
|
||||
return result
|
||||
except (IOError, ValueError), err:
|
||||
raise InvalidVorbis(str(err))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
class VorbisChannelMask(ChannelMask):
|
||||
"""The Vorbis-specific channel mapping."""
|
||||
|
||||
def __repr__(self):
|
||||
return "VorbisChannelMask(%s)" % \
|
||||
",".join(["%s=%s" % (field, getattr(self, field))
|
||||
for field in self.SPEAKER_TO_MASK.keys()
|
||||
if (getattr(self, field))])
|
||||
|
||||
def channels(self):
|
||||
"""Returns a list of speaker strings this mask contains.
|
||||
|
||||
Returned in the order in which they should appear
|
||||
in the PCM stream.
|
||||
"""
|
||||
|
||||
count = len(self)
|
||||
if (count == 1):
|
||||
return ["front_center"]
|
||||
elif (count == 2):
|
||||
return ["front_left", "front_right"]
|
||||
elif (count == 3):
|
||||
return ["front_left", "front_center", "front_right"]
|
||||
elif (count == 4):
|
||||
return ["front_left", "front_right",
|
||||
"back_left", "back_right"]
|
||||
elif (count == 5):
|
||||
return ["front_left", "front_center", "front_right",
|
||||
"back_left", "back_right"]
|
||||
elif (count == 6):
|
||||
return ["front_left", "front_center", "front_right",
|
||||
"back_left", "back_right", "low_frequency"]
|
||||
elif (count == 7):
|
||||
return ["front_left", "front_center", "front_right",
|
||||
"side_left", "side_right", "back_center",
|
||||
"low_frequency"]
|
||||
elif (count == 8):
|
||||
return ["front_left", "front_center", "front_right",
|
||||
"side_left", "side_right",
|
||||
"back_left", "back_right", "low_frequency"]
|
||||
else:
|
||||
return []
|
294
Melodia/resources/audiotools/__vorbiscomment__.py
Normal file
294
Melodia/resources/audiotools/__vorbiscomment__.py
Normal file
@ -0,0 +1,294 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
from audiotools import MetaData, Con, VERSION, re
|
||||
|
||||
|
||||
class VorbisComment(MetaData, dict):
|
||||
"""A complete Vorbis Comment tag."""
|
||||
|
||||
VORBIS_COMMENT = Con.Struct(
|
||||
"vorbis_comment",
|
||||
Con.PascalString("vendor_string",
|
||||
length_field=Con.ULInt32("length")),
|
||||
Con.PrefixedArray(
|
||||
length_field=Con.ULInt32("length"),
|
||||
subcon=Con.PascalString("value",
|
||||
length_field=Con.ULInt32("length"))),
|
||||
Con.Const(Con.Byte("framing"), 1))
|
||||
|
||||
ATTRIBUTE_MAP = {'track_name': 'TITLE',
|
||||
'track_number': 'TRACKNUMBER',
|
||||
'track_total': 'TRACKTOTAL',
|
||||
'album_name': 'ALBUM',
|
||||
'artist_name': 'ARTIST',
|
||||
'performer_name': 'PERFORMER',
|
||||
'composer_name': 'COMPOSER',
|
||||
'conductor_name': 'CONDUCTOR',
|
||||
'media': 'SOURCE MEDIUM',
|
||||
'ISRC': 'ISRC',
|
||||
'catalog': 'CATALOG',
|
||||
'copyright': 'COPYRIGHT',
|
||||
'publisher': 'PUBLISHER',
|
||||
'year': 'DATE',
|
||||
'album_number': 'DISCNUMBER',
|
||||
'album_total': 'DISCTOTAL',
|
||||
'comment': 'COMMENT'}
|
||||
|
||||
ITEM_MAP = dict(map(reversed, ATTRIBUTE_MAP.items()))
|
||||
|
||||
def __init__(self, vorbis_data, vendor_string=u""):
|
||||
"""Initialized with a key->[value1,value2] dict.
|
||||
|
||||
keys are generally upper case.
|
||||
values are unicode string.
|
||||
vendor_string is an optional unicode string."""
|
||||
|
||||
dict.__init__(self, [(key.upper(), values)
|
||||
for (key, values) in vorbis_data.items()])
|
||||
self.vendor_string = vendor_string
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
dict.__setitem__(self, key.upper(), value)
|
||||
|
||||
def __getattr__(self, key):
|
||||
if (key == 'track_number'):
|
||||
match = re.match(r'^\d+$',
|
||||
self.get('TRACKNUMBER', [u''])[0])
|
||||
if (match):
|
||||
return int(match.group(0))
|
||||
else:
|
||||
match = re.match('^(\d+)/\d+$',
|
||||
self.get('TRACKNUMBER', [u''])[0])
|
||||
if (match):
|
||||
return int(match.group(1))
|
||||
else:
|
||||
return 0
|
||||
elif (key == 'track_total'):
|
||||
match = re.match(r'^\d+$',
|
||||
self.get('TRACKTOTAL', [u''])[0])
|
||||
if (match):
|
||||
return int(match.group(0))
|
||||
else:
|
||||
match = re.match('^\d+/(\d+)$',
|
||||
self.get('TRACKNUMBER', [u''])[0])
|
||||
if (match):
|
||||
return int(match.group(1))
|
||||
else:
|
||||
return 0
|
||||
elif (key == 'album_number'):
|
||||
match = re.match(r'^\d+$',
|
||||
self.get('DISCNUMBER', [u''])[0])
|
||||
if (match):
|
||||
return int(match.group(0))
|
||||
else:
|
||||
match = re.match('^(\d+)/\d+$',
|
||||
self.get('DISCNUMBER', [u''])[0])
|
||||
if (match):
|
||||
return int(match.group(1))
|
||||
else:
|
||||
return 0
|
||||
elif (key == 'album_total'):
|
||||
match = re.match(r'^\d+$',
|
||||
self.get('DISCTOTAL', [u''])[0])
|
||||
if (match):
|
||||
return int(match.group(0))
|
||||
else:
|
||||
match = re.match('^\d+/(\d+)$',
|
||||
self.get('DISCNUMBER', [u''])[0])
|
||||
if (match):
|
||||
return int(match.group(1))
|
||||
else:
|
||||
return 0
|
||||
elif (key in self.ATTRIBUTE_MAP):
|
||||
return self.get(self.ATTRIBUTE_MAP[key], [u''])[0]
|
||||
elif (key in MetaData.__FIELDS__):
|
||||
return u''
|
||||
else:
|
||||
try:
|
||||
return self.__dict__[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def __delattr__(self, key):
|
||||
if (key == 'track_number'):
|
||||
track_number = self.get('TRACKNUMBER', [u''])[0]
|
||||
if (re.match(r'^\d+$', track_number)):
|
||||
del(self['TRACKNUMBER'])
|
||||
elif (re.match('^\d+/(\d+)$', track_number)):
|
||||
self['TRACKNUMBER'] = u"0/%s" % (
|
||||
re.match('^\d+/(\d+)$', track_number).group(1))
|
||||
elif (key == 'track_total'):
|
||||
track_number = self.get('TRACKNUMBER', [u''])[0]
|
||||
if (re.match('^(\d+)/\d+$', track_number)):
|
||||
self['TRACKNUMBER'] = u"%s" % (
|
||||
re.match('^(\d+)/\d+$', track_number).group(1))
|
||||
if ('TRACKTOTAL' in self):
|
||||
del(self['TRACKTOTAL'])
|
||||
elif (key == 'album_number'):
|
||||
album_number = self.get('DISCNUMBER', [u''])[0]
|
||||
if (re.match(r'^\d+$', album_number)):
|
||||
del(self['DISCNUMBER'])
|
||||
elif (re.match('^\d+/(\d+)$', album_number)):
|
||||
self['DISCNUMBER'] = u"0/%s" % (
|
||||
re.match('^\d+/(\d+)$', album_number).group(1))
|
||||
elif (key == 'album_total'):
|
||||
album_number = self.get('DISCNUMBER', [u''])[0]
|
||||
if (re.match('^(\d+)/\d+$', album_number)):
|
||||
self['DISCNUMBER'] = u"%s" % (
|
||||
re.match('^(\d+)/\d+$', album_number).group(1))
|
||||
if ('DISCTOTAL' in self):
|
||||
del(self['DISCTOTAL'])
|
||||
elif (key in self.ATTRIBUTE_MAP):
|
||||
if (self.ATTRIBUTE_MAP[key] in self):
|
||||
del(self[self.ATTRIBUTE_MAP[key]])
|
||||
elif (key in MetaData.__FIELDS__):
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
del(self.__dict__[key])
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
@classmethod
|
||||
def supports_images(cls):
|
||||
"""Returns False."""
|
||||
|
||||
#There's actually a (proposed?) standard to add embedded covers
|
||||
#to Vorbis Comments by base64 encoding them.
|
||||
#This strikes me as messy and convoluted.
|
||||
#In addition, I'd have to perform a special case of
|
||||
#image extraction and re-insertion whenever converting
|
||||
#to FlacMetaData. The whole thought gives me a headache.
|
||||
|
||||
return False
|
||||
|
||||
def images(self):
|
||||
"""Returns an empty list of Image objects."""
|
||||
|
||||
return list()
|
||||
|
||||
#if an attribute is updated (e.g. self.track_name)
|
||||
#make sure to update the corresponding dict pair
|
||||
def __setattr__(self, key, value):
|
||||
if (key in self.ATTRIBUTE_MAP):
|
||||
if (key not in MetaData.__INTEGER_FIELDS__):
|
||||
self[self.ATTRIBUTE_MAP[key]] = [value]
|
||||
else:
|
||||
self[self.ATTRIBUTE_MAP[key]] = [unicode(value)]
|
||||
else:
|
||||
self.__dict__[key] = value
|
||||
|
||||
@classmethod
|
||||
def converted(cls, metadata):
|
||||
"""Converts a MetaData object to a VorbisComment object."""
|
||||
|
||||
if ((metadata is None) or (isinstance(metadata, VorbisComment))):
|
||||
return metadata
|
||||
elif (metadata.__class__.__name__ == 'FlacMetaData'):
|
||||
return cls(vorbis_data=dict(metadata.vorbis_comment.items()),
|
||||
vendor_string=metadata.vorbis_comment.vendor_string)
|
||||
else:
|
||||
values = {}
|
||||
for key in cls.ATTRIBUTE_MAP.keys():
|
||||
if (key in cls.__INTEGER_FIELDS__):
|
||||
if (getattr(metadata, key) != 0):
|
||||
values[cls.ATTRIBUTE_MAP[key]] = \
|
||||
[unicode(getattr(metadata, key))]
|
||||
elif (getattr(metadata, key) != u""):
|
||||
values[cls.ATTRIBUTE_MAP[key]] = \
|
||||
[unicode(getattr(metadata, key))]
|
||||
|
||||
return VorbisComment(values)
|
||||
|
||||
def merge(self, metadata):
|
||||
"""Updates any currently empty entries from metadata's values."""
|
||||
|
||||
metadata = self.__class__.converted(metadata)
|
||||
if (metadata is None):
|
||||
return
|
||||
|
||||
for (key, values) in metadata.items():
|
||||
if ((len(values) > 0) and
|
||||
(len(self.get(key, [])) == 0)):
|
||||
self[key] = values
|
||||
|
||||
def __comment_name__(self):
|
||||
return u'Vorbis'
|
||||
|
||||
#takes two (key,value) vorbiscomment pairs
|
||||
#returns cmp on the weighted set of them
|
||||
#(title first, then artist, album, tracknumber, ... , replaygain)
|
||||
@classmethod
|
||||
def __by_pair__(cls, pair1, pair2):
|
||||
KEY_MAP = {"TITLE": 1,
|
||||
"ALBUM": 2,
|
||||
"TRACKNUMBER": 3,
|
||||
"TRACKTOTAL": 4,
|
||||
"DISCNUMBER": 5,
|
||||
"DISCTOTAL": 6,
|
||||
"ARTIST": 7,
|
||||
"PERFORMER": 8,
|
||||
"COMPOSER": 9,
|
||||
"CONDUCTOR": 10,
|
||||
"CATALOG": 11,
|
||||
"PUBLISHER": 12,
|
||||
"ISRC": 13,
|
||||
"SOURCE MEDIUM": 14,
|
||||
#"YEAR": 15,
|
||||
"DATE": 16,
|
||||
"COPYRIGHT": 17,
|
||||
"REPLAYGAIN_ALBUM_GAIN": 19,
|
||||
"REPLAYGAIN_ALBUM_PEAK": 19,
|
||||
"REPLAYGAIN_TRACK_GAIN": 19,
|
||||
"REPLAYGAIN_TRACK_PEAK": 19,
|
||||
"REPLAYGAIN_REFERENCE_LOUDNESS": 20}
|
||||
return cmp((KEY_MAP.get(pair1[0].upper(), 18),
|
||||
pair1[0].upper(),
|
||||
pair1[1]),
|
||||
(KEY_MAP.get(pair2[0].upper(), 18),
|
||||
pair2[0].upper(),
|
||||
pair2[1]))
|
||||
|
||||
def __comment_pairs__(self):
|
||||
pairs = []
|
||||
for (key, values) in self.items():
|
||||
for value in values:
|
||||
pairs.append((key, value))
|
||||
|
||||
pairs.sort(VorbisComment.__by_pair__)
|
||||
return pairs
|
||||
|
||||
def build(self):
|
||||
"""Returns this VorbisComment as a binary string."""
|
||||
|
||||
comment = Con.Container(vendor_string=self.vendor_string,
|
||||
framing=1,
|
||||
value=[])
|
||||
|
||||
for (key, values) in self.items():
|
||||
for value in values:
|
||||
if ((value != u"") and not
|
||||
((key in ("TRACKNUMBER", "TRACKTOTAL",
|
||||
"DISCNUMBER", "DISCTOTAL")) and
|
||||
(value == u"0"))):
|
||||
comment.value.append("%s=%s" % (key,
|
||||
value.encode('utf-8')))
|
||||
return self.VORBIS_COMMENT.build(comment)
|
1023
Melodia/resources/audiotools/__wav__.py
Normal file
1023
Melodia/resources/audiotools/__wav__.py
Normal file
File diff suppressed because it is too large
Load Diff
661
Melodia/resources/audiotools/__wavpack__.py
Normal file
661
Melodia/resources/audiotools/__wavpack__.py
Normal file
@ -0,0 +1,661 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
from audiotools import (AudioFile, InvalidFile, Con, subprocess, BIN,
|
||||
open_files, os, ReplayGain, ignore_sigint,
|
||||
transfer_data, transfer_framelist_data,
|
||||
BufferedPCMReader, Image, MetaData, sheet_to_unicode,
|
||||
calculate_replay_gain, ApeTagItem,
|
||||
EncodingError, DecodingError, PCMReaderError,
|
||||
PCMReader, ChannelMask,
|
||||
InvalidWave, __default_quality__,
|
||||
WaveContainer, to_pcm_progress)
|
||||
from __wav__ import WaveAudio, WaveReader
|
||||
from __ape__ import ApeTaggedAudio, ApeTag, __number_pair__
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
|
||||
class InvalidWavPack(InvalidFile):
|
||||
pass
|
||||
|
||||
|
||||
class __24BitsLE__(Con.Adapter):
|
||||
def _encode(self, value, context):
|
||||
return chr(value & 0x0000FF) + \
|
||||
chr((value & 0x00FF00) >> 8) + \
|
||||
chr((value & 0xFF0000) >> 16)
|
||||
|
||||
def _decode(self, obj, context):
|
||||
return (ord(obj[2]) << 16) | (ord(obj[1]) << 8) | ord(obj[0])
|
||||
|
||||
|
||||
def ULInt24(name):
|
||||
return __24BitsLE__(Con.Bytes(name, 3))
|
||||
|
||||
|
||||
def __riff_chunk_ids__(data):
|
||||
import cStringIO
|
||||
|
||||
total_size = len(data)
|
||||
data = cStringIO.StringIO(data)
|
||||
header = WaveAudio.WAVE_HEADER.parse_stream(data)
|
||||
|
||||
while (data.tell() < total_size):
|
||||
chunk_header = WaveAudio.CHUNK_HEADER.parse_stream(data)
|
||||
chunk_size = chunk_header.chunk_length
|
||||
if ((chunk_size & 1) == 1):
|
||||
chunk_size += 1
|
||||
data.seek(chunk_size, 1)
|
||||
yield chunk_header.chunk_id
|
||||
|
||||
|
||||
#######################
|
||||
#WavPack APEv2
|
||||
#######################
|
||||
|
||||
|
||||
class WavPackAPEv2(ApeTag):
|
||||
"""A WavPack-specific APEv2 implementation with minor differences."""
|
||||
|
||||
def __init__(self, tags, tag_length=None, frame_count=0):
|
||||
"""Constructs an ApeTag from a list of ApeTagItem objects.
|
||||
|
||||
tag_length is an optional total length integer.
|
||||
frame_count is an optional number of PCM frames
|
||||
to be used by cuesheets."""
|
||||
|
||||
ApeTag.__init__(self, tags=tags, tag_length=tag_length)
|
||||
self.frame_count = frame_count
|
||||
|
||||
def __comment_pairs__(self):
|
||||
return filter(lambda pair: pair[0] != 'Cuesheet',
|
||||
ApeTag.__comment_pairs__(self))
|
||||
|
||||
def __unicode__(self):
|
||||
if ('Cuesheet' not in self.keys()):
|
||||
return ApeTag.__unicode__(self)
|
||||
else:
|
||||
import cue
|
||||
|
||||
try:
|
||||
return u"%s%sCuesheet:\n%s" % \
|
||||
(MetaData.__unicode__(self),
|
||||
os.linesep * 2,
|
||||
sheet_to_unicode(
|
||||
cue.parse(
|
||||
cue.tokens(unicode(self['Cuesheet']).encode(
|
||||
'ascii', 'replace'))),
|
||||
self.frame_count))
|
||||
except cue.CueException:
|
||||
return ApeTag.__unicode__(self)
|
||||
|
||||
@classmethod
|
||||
def converted(cls, metadata):
|
||||
"""Converts a MetaData object to a WavPackAPEv2 object."""
|
||||
|
||||
if ((metadata is None) or (isinstance(metadata, WavPackAPEv2))):
|
||||
return metadata
|
||||
elif (isinstance(metadata, ApeTag)):
|
||||
return WavPackAPEv2(metadata.tags)
|
||||
else:
|
||||
return WavPackAPEv2(ApeTag.converted(metadata).tags)
|
||||
|
||||
WavePackAPEv2 = WavPackAPEv2
|
||||
|
||||
#######################
|
||||
#WavPack
|
||||
#######################
|
||||
|
||||
|
||||
class WavPackAudio(ApeTaggedAudio, WaveContainer):
|
||||
"""A WavPack audio file."""
|
||||
|
||||
SUFFIX = "wv"
|
||||
NAME = SUFFIX
|
||||
DEFAULT_COMPRESSION = "standard"
|
||||
COMPRESSION_MODES = ("veryfast", "fast", "standard", "high", "veryhigh")
|
||||
COMPRESSION_DESCRIPTIONS = {"veryfast": _(u"fastest encode/decode, " +
|
||||
u"worst compression"),
|
||||
"veryhigh": _(u"slowest encode/decode, " +
|
||||
u"best compression")}
|
||||
|
||||
APE_TAG_CLASS = WavPackAPEv2
|
||||
|
||||
HEADER = Con.Struct("wavpackheader",
|
||||
Con.Const(Con.String("id", 4), 'wvpk'),
|
||||
Con.ULInt32("block_size"),
|
||||
Con.ULInt16("version"),
|
||||
Con.ULInt8("track_number"),
|
||||
Con.ULInt8("index_number"),
|
||||
Con.ULInt32("total_samples"),
|
||||
Con.ULInt32("block_index"),
|
||||
Con.ULInt32("block_samples"),
|
||||
Con.Embed(
|
||||
Con.BitStruct("flags",
|
||||
Con.Flag("floating_point_data"),
|
||||
Con.Flag("hybrid_noise_shaping"),
|
||||
Con.Flag("cross_channel_decorrelation"),
|
||||
Con.Flag("joint_stereo"),
|
||||
Con.Flag("hybrid_mode"),
|
||||
Con.Flag("mono_output"),
|
||||
Con.Bits("bits_per_sample", 2),
|
||||
|
||||
Con.Bits("left_shift_data_low", 3),
|
||||
Con.Flag("final_block_in_sequence"),
|
||||
Con.Flag("initial_block_in_sequence"),
|
||||
Con.Flag("hybrid_noise_balanced"),
|
||||
Con.Flag("hybrid_mode_control_bitrate"),
|
||||
Con.Flag("extended_size_integers"),
|
||||
|
||||
Con.Bit("sampling_rate_low"),
|
||||
Con.Bits("maximum_magnitude", 5),
|
||||
Con.Bits("left_shift_data_high", 2),
|
||||
|
||||
Con.Flag("reserved2"),
|
||||
Con.Flag("false_stereo"),
|
||||
Con.Flag("use_IIR"),
|
||||
Con.Bits("reserved1", 2),
|
||||
Con.Bits("sampling_rate_high", 3))),
|
||||
Con.ULInt32("crc"))
|
||||
|
||||
SUB_HEADER = Con.Struct("wavpacksubheader",
|
||||
Con.Embed(
|
||||
Con.BitStruct("flags",
|
||||
Con.Flag("large_block"),
|
||||
Con.Flag("actual_size_1_less"),
|
||||
Con.Flag("nondecoder_data"),
|
||||
Con.Bits("metadata_function", 5))),
|
||||
Con.IfThenElse('size',
|
||||
lambda ctx: ctx['large_block'],
|
||||
ULInt24('s'),
|
||||
Con.Byte('s')))
|
||||
|
||||
BITS_PER_SAMPLE = (8, 16, 24, 32)
|
||||
SAMPLING_RATE = (6000, 8000, 9600, 11025,
|
||||
12000, 16000, 22050, 24000,
|
||||
32000, 44100, 48000, 64000,
|
||||
88200, 96000, 192000, 0)
|
||||
|
||||
__options__ = {"veryfast": {"block_size": 44100,
|
||||
"joint_stereo": True,
|
||||
"false_stereo": True,
|
||||
"wasted_bits": True,
|
||||
"decorrelation_passes": 1},
|
||||
"fast": {"block_size": 44100,
|
||||
"joint_stereo": True,
|
||||
"false_stereo": True,
|
||||
"wasted_bits": True,
|
||||
"decorrelation_passes": 2},
|
||||
"standard": {"block_size": 44100,
|
||||
"joint_stereo": True,
|
||||
"false_stereo": True,
|
||||
"wasted_bits": True,
|
||||
"decorrelation_passes": 5},
|
||||
"high": {"block_size": 44100,
|
||||
"joint_stereo": True,
|
||||
"false_stereo": True,
|
||||
"wasted_bits": True,
|
||||
"decorrelation_passes": 10},
|
||||
"veryhigh": {"block_size": 44100,
|
||||
"joint_stereo": True,
|
||||
"false_stereo": True,
|
||||
"wasted_bits": True,
|
||||
"decorrelation_passes": 16}}
|
||||
|
||||
def __init__(self, filename):
|
||||
"""filename is a plain string."""
|
||||
|
||||
self.filename = filename
|
||||
self.__samplerate__ = 0
|
||||
self.__channels__ = 0
|
||||
self.__bitspersample__ = 0
|
||||
self.__total_frames__ = 0
|
||||
|
||||
try:
|
||||
self.__read_info__()
|
||||
except IOError, msg:
|
||||
raise InvalidWavPack(str(msg))
|
||||
|
||||
@classmethod
|
||||
def is_type(cls, file):
|
||||
"""Returns True if the given file object describes this format.
|
||||
|
||||
Takes a seekable file pointer rewound to the start of the file."""
|
||||
|
||||
return file.read(4) == 'wvpk'
|
||||
|
||||
def lossless(self):
|
||||
"""Returns True."""
|
||||
|
||||
return True
|
||||
|
||||
def channel_mask(self):
|
||||
"""Returns a ChannelMask object of this track's channel layout."""
|
||||
|
||||
if ((self.__channels__ == 1) or (self.__channels__ == 2)):
|
||||
return ChannelMask.from_channels(self.__channels__)
|
||||
else:
|
||||
for (block_id, nondecoder, data) in self.sub_frames():
|
||||
if ((block_id == 0xD) and not nondecoder):
|
||||
mask = 0
|
||||
for byte in reversed(map(ord, data[1:])):
|
||||
mask = (mask << 8) | byte
|
||||
return ChannelMask(mask)
|
||||
else:
|
||||
return ChannelMask(0)
|
||||
|
||||
def get_metadata(self):
|
||||
"""Returns a MetaData object, or None.
|
||||
|
||||
Raises IOError if unable to read the file."""
|
||||
|
||||
metadata = ApeTaggedAudio.get_metadata(self)
|
||||
if (metadata is not None):
|
||||
metadata.frame_count = self.total_frames()
|
||||
return metadata
|
||||
|
||||
def has_foreign_riff_chunks(self):
|
||||
"""Returns True if the audio file contains non-audio RIFF chunks.
|
||||
|
||||
During transcoding, if the source audio file has foreign RIFF chunks
|
||||
and the target audio format supports foreign RIFF chunks,
|
||||
conversion should be routed through .wav conversion
|
||||
to avoid losing those chunks."""
|
||||
|
||||
for (sub_header, nondecoder, data) in self.sub_frames():
|
||||
if ((sub_header == 1) and nondecoder):
|
||||
if (set(__riff_chunk_ids__(data)) != set(['fmt ', 'data'])):
|
||||
return True
|
||||
elif ((sub_header == 2) and nondecoder):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def frames(self):
|
||||
"""Yields (header, data) tuples of WavPack frames.
|
||||
|
||||
header is a Container parsed from WavPackAudio.HEADER.
|
||||
data is a binary string.
|
||||
"""
|
||||
|
||||
f = file(self.filename)
|
||||
total_size = os.path.getsize(self.filename)
|
||||
try:
|
||||
while (f.tell() < total_size):
|
||||
try:
|
||||
header = WavPackAudio.HEADER.parse(f.read(
|
||||
WavPackAudio.HEADER.sizeof()))
|
||||
except Con.ConstError:
|
||||
break
|
||||
|
||||
data = f.read(header.block_size - 24)
|
||||
|
||||
yield (header, data)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def sub_frames(self):
|
||||
"""Yields (function,nondecoder,data) tuples.
|
||||
|
||||
function is an integer.
|
||||
nondecoder is a boolean indicating non-decoder data.
|
||||
data is a binary string.
|
||||
"""
|
||||
|
||||
import cStringIO
|
||||
|
||||
for (header, data) in self.frames():
|
||||
total_size = len(data)
|
||||
data = cStringIO.StringIO(data)
|
||||
while (data.tell() < total_size):
|
||||
sub_header = WavPackAudio.SUB_HEADER.parse_stream(data)
|
||||
if (sub_header.actual_size_1_less):
|
||||
yield (sub_header.metadata_function,
|
||||
sub_header.nondecoder_data,
|
||||
data.read((sub_header.size * 2) - 1))
|
||||
data.read(1)
|
||||
else:
|
||||
yield (sub_header.metadata_function,
|
||||
sub_header.nondecoder_data,
|
||||
data.read(sub_header.size * 2))
|
||||
|
||||
def __read_info__(self):
|
||||
f = file(self.filename)
|
||||
try:
|
||||
try:
|
||||
header = WavPackAudio.HEADER.parse(f.read(
|
||||
WavPackAudio.HEADER.sizeof()))
|
||||
except Con.ConstError:
|
||||
raise InvalidWavPack(_(u'WavPack header ID invalid'))
|
||||
except Con.FieldError:
|
||||
raise InvalidWavPack(_(u'WavPack header ID invalid'))
|
||||
|
||||
self.__samplerate__ = WavPackAudio.SAMPLING_RATE[
|
||||
(header.sampling_rate_high << 1) |
|
||||
header.sampling_rate_low]
|
||||
|
||||
if (self.__samplerate__ == 0):
|
||||
#if unknown, pull from the RIFF WAVE header
|
||||
for (function, nondecoder, data) in self.sub_frames():
|
||||
if ((function == 1) and nondecoder):
|
||||
#fmt chunk must be in the header
|
||||
#since it must come before the data chunk
|
||||
|
||||
import cStringIO
|
||||
|
||||
chunks = cStringIO.StringIO(data[12:-8])
|
||||
try:
|
||||
while (True):
|
||||
chunk_header = \
|
||||
WaveAudio.CHUNK_HEADER.parse_stream(
|
||||
chunks)
|
||||
chunk_data = chunks.read(
|
||||
chunk_header.chunk_length)
|
||||
if (chunk_header.chunk_id == 'fmt '):
|
||||
self.__samplerate__ = \
|
||||
WaveAudio.FMT_CHUNK.parse(
|
||||
chunk_data).sample_rate
|
||||
except Con.FieldError:
|
||||
pass # finished with chunks
|
||||
|
||||
self.__bitspersample__ = WavPackAudio.BITS_PER_SAMPLE[
|
||||
header.bits_per_sample]
|
||||
self.__total_frames__ = header.total_samples
|
||||
|
||||
self.__channels__ = 0
|
||||
|
||||
#go through as many headers as necessary
|
||||
#to count the number of channels
|
||||
if (header.mono_output):
|
||||
self.__channels__ += 1
|
||||
else:
|
||||
self.__channels__ += 2
|
||||
|
||||
while (not header.final_block_in_sequence):
|
||||
f.seek(header.block_size - 24, 1)
|
||||
header = WavPackAudio.HEADER.parse(f.read(
|
||||
WavPackAudio.HEADER.sizeof()))
|
||||
if (header.mono_output):
|
||||
self.__channels__ += 1
|
||||
else:
|
||||
self.__channels__ += 2
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def bits_per_sample(self):
|
||||
"""Returns an integer number of bits-per-sample this track contains."""
|
||||
|
||||
return self.__bitspersample__
|
||||
|
||||
def channels(self):
|
||||
"""Returns an integer number of channels this track contains."""
|
||||
|
||||
return self.__channels__
|
||||
|
||||
def total_frames(self):
|
||||
"""Returns the total PCM frames of the track as an integer."""
|
||||
|
||||
return self.__total_frames__
|
||||
|
||||
def sample_rate(self):
|
||||
"""Returns the rate of the track's audio as an integer number of Hz."""
|
||||
|
||||
return self.__samplerate__
|
||||
|
||||
@classmethod
|
||||
def from_pcm(cls, filename, pcmreader, compression=None):
|
||||
"""Encodes a new file from PCM data.
|
||||
|
||||
Takes a filename string, PCMReader object
|
||||
and optional compression level string.
|
||||
Encodes a new audio file from pcmreader's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new WavPackAudio object."""
|
||||
|
||||
from . import encoders
|
||||
|
||||
if ((compression is None) or
|
||||
(compression not in cls.COMPRESSION_MODES)):
|
||||
compression = __default_quality__(cls.NAME)
|
||||
|
||||
try:
|
||||
encoders.encode_wavpack(filename,
|
||||
BufferedPCMReader(pcmreader),
|
||||
**cls.__options__[compression])
|
||||
|
||||
return cls(filename)
|
||||
except (ValueError, IOError), msg:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(msg))
|
||||
except Exception, err:
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
|
||||
def to_wave(self, wave_filename, progress=None):
|
||||
"""Writes the contents of this file to the given .wav filename string.
|
||||
|
||||
Raises EncodingError if some error occurs during decoding."""
|
||||
|
||||
from . import decoders
|
||||
|
||||
try:
|
||||
f = open(wave_filename, 'wb')
|
||||
except IOError, msg:
|
||||
raise EncodingError(str(msg))
|
||||
|
||||
(head, tail) = self.pcm_split()
|
||||
|
||||
try:
|
||||
f.write(head)
|
||||
total_frames = self.total_frames()
|
||||
current_frames = 0
|
||||
decoder = decoders.WavPackDecoder(self.filename)
|
||||
frame = decoder.read(4096)
|
||||
while (len(frame) > 0):
|
||||
f.write(frame.to_bytes(False, self.bits_per_sample() > 8))
|
||||
current_frames += frame.frames
|
||||
if (progress is not None):
|
||||
progress(current_frames, total_frames)
|
||||
frame = decoder.read(4096)
|
||||
f.write(tail)
|
||||
f.close()
|
||||
except IOError, msg:
|
||||
self.__unlink__(wave_filename)
|
||||
raise EncodingError(str(msg))
|
||||
|
||||
def to_pcm(self):
|
||||
"""Returns a PCMReader object containing the track's PCM data."""
|
||||
|
||||
from . import decoders
|
||||
|
||||
try:
|
||||
return decoders.WavPackDecoder(self.filename,
|
||||
self.__samplerate__)
|
||||
except (IOError, ValueError), msg:
|
||||
return PCMReaderError(error_message=str(msg),
|
||||
sample_rate=self.__samplerate__,
|
||||
channels=self.__channels__,
|
||||
channel_mask=int(self.channel_mask()),
|
||||
bits_per_sample=self.__bitspersample__)
|
||||
|
||||
@classmethod
|
||||
def from_wave(cls, filename, wave_filename, compression=None,
|
||||
progress=None):
|
||||
"""Encodes a new AudioFile from an existing .wav file.
|
||||
|
||||
Takes a filename string, wave_filename string
|
||||
of an existing WaveAudio file
|
||||
and an optional compression level string.
|
||||
Encodes a new audio file from the wave's data
|
||||
at the given filename with the specified compression level
|
||||
and returns a new WavPackAudio object."""
|
||||
|
||||
from . import encoders
|
||||
|
||||
if ((compression is None) or
|
||||
(compression not in cls.COMPRESSION_MODES)):
|
||||
compression = __default_quality__(cls.NAME)
|
||||
|
||||
wave = WaveAudio(wave_filename)
|
||||
|
||||
(head, tail) = wave.pcm_split()
|
||||
|
||||
try:
|
||||
encoders.encode_wavpack(filename,
|
||||
to_pcm_progress(wave, progress),
|
||||
wave_header=head,
|
||||
wave_footer=tail,
|
||||
**cls.__options__[compression])
|
||||
|
||||
return cls(filename)
|
||||
except (ValueError, IOError), msg:
|
||||
cls.__unlink__(filename)
|
||||
raise EncodingError(str(msg))
|
||||
except Exception, err:
|
||||
cls.__unlink__(filename)
|
||||
raise err
|
||||
|
||||
def pcm_split(self):
|
||||
"""Returns a pair of data strings before and after PCM data."""
|
||||
|
||||
head = ""
|
||||
tail = ""
|
||||
|
||||
for (sub_block_id, nondecoder, data) in self.sub_frames():
|
||||
if ((sub_block_id == 1) and nondecoder):
|
||||
head = data
|
||||
elif ((sub_block_id == 2) and nondecoder):
|
||||
tail = data
|
||||
|
||||
return (head, tail)
|
||||
|
||||
@classmethod
|
||||
def add_replay_gain(cls, filenames, progress=None):
|
||||
"""Adds ReplayGain values to a list of filename strings.
|
||||
|
||||
All the filenames must be of this AudioFile type.
|
||||
Raises ValueError if some problem occurs during ReplayGain application.
|
||||
"""
|
||||
|
||||
tracks = [track for track in open_files(filenames) if
|
||||
isinstance(track, cls)]
|
||||
|
||||
if (len(tracks) > 0):
|
||||
for (track,
|
||||
track_gain,
|
||||
track_peak,
|
||||
album_gain,
|
||||
album_peak) in calculate_replay_gain(tracks, progress):
|
||||
metadata = track.get_metadata()
|
||||
if (metadata is None):
|
||||
metadata = WavPackAPEv2([])
|
||||
metadata["replaygain_track_gain"] = ApeTagItem.string(
|
||||
"replaygain_track_gain",
|
||||
u"%+1.2f dB" % (track_gain))
|
||||
metadata["replaygain_track_peak"] = ApeTagItem.string(
|
||||
"replaygain_track_peak",
|
||||
u"%1.6f" % (track_peak))
|
||||
metadata["replaygain_album_gain"] = ApeTagItem.string(
|
||||
"replaygain_album_gain",
|
||||
u"%+1.2f dB" % (album_gain))
|
||||
metadata["replaygain_album_peak"] = ApeTagItem.string(
|
||||
"replaygain_album_peak",
|
||||
u"%1.6f" % (album_peak))
|
||||
track.set_metadata(metadata)
|
||||
|
||||
@classmethod
|
||||
def can_add_replay_gain(cls):
|
||||
"""Returns True."""
|
||||
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def lossless_replay_gain(cls):
|
||||
"""Returns True."""
|
||||
|
||||
return True
|
||||
|
||||
def replay_gain(self):
|
||||
"""Returns a ReplayGain object of our ReplayGain values.
|
||||
|
||||
Returns None if we have no values."""
|
||||
|
||||
metadata = self.get_metadata()
|
||||
if (metadata is None):
|
||||
return None
|
||||
|
||||
if (set(['replaygain_track_gain', 'replaygain_track_peak',
|
||||
'replaygain_album_gain', 'replaygain_album_peak']).issubset(
|
||||
metadata.keys())): # we have ReplayGain data
|
||||
try:
|
||||
return ReplayGain(
|
||||
unicode(metadata['replaygain_track_gain'])[0:-len(" dB")],
|
||||
unicode(metadata['replaygain_track_peak']),
|
||||
unicode(metadata['replaygain_album_gain'])[0:-len(" dB")],
|
||||
unicode(metadata['replaygain_album_peak']))
|
||||
except ValueError:
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_cuesheet(self):
|
||||
"""Returns the embedded Cuesheet-compatible object, or None.
|
||||
|
||||
Raises IOError if a problem occurs when reading the file."""
|
||||
|
||||
import cue
|
||||
|
||||
metadata = self.get_metadata()
|
||||
|
||||
if ((metadata is not None) and ('Cuesheet' in metadata.keys())):
|
||||
try:
|
||||
return cue.parse(cue.tokens(
|
||||
unicode(metadata['Cuesheet']).encode('utf-8',
|
||||
'replace')))
|
||||
except cue.CueException:
|
||||
#unlike FLAC, just because a cuesheet is embedded
|
||||
#does not mean it is compliant
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
def set_cuesheet(self, cuesheet):
|
||||
"""Imports cuesheet data from a Cuesheet-compatible object.
|
||||
|
||||
This are objects with catalog(), ISRCs(), indexes(), and pcm_lengths()
|
||||
methods. Raises IOError if an error occurs setting the cuesheet."""
|
||||
|
||||
import os.path
|
||||
import cue
|
||||
|
||||
if (cuesheet is None):
|
||||
return
|
||||
|
||||
metadata = self.get_metadata()
|
||||
if (metadata is None):
|
||||
metadata = WavPackAPEv2.converted(MetaData())
|
||||
|
||||
metadata['Cuesheet'] = WavPackAPEv2.ITEM.string('Cuesheet',
|
||||
cue.Cuesheet.file(
|
||||
cuesheet,
|
||||
os.path.basename(self.filename)).decode('ascii', 'replace'))
|
||||
self.set_metadata(metadata)
|
BIN
Melodia/resources/audiotools/cdio.so
Executable file
BIN
Melodia/resources/audiotools/cdio.so
Executable file
Binary file not shown.
115
Melodia/resources/audiotools/construct/__init__.py
Normal file
115
Melodia/resources/audiotools/construct/__init__.py
Normal file
@ -0,0 +1,115 @@
|
||||
"""
|
||||
. #### ####
|
||||
## #### ## ## #### ###### ##### ## ## #### ###### ## ##
|
||||
## ## ## ### ## ## ## ## ## ## ## ## ## #### ##
|
||||
## ## ## ###### ### ## ##### ## ## ## ## ##
|
||||
## ## ## ## ### ## ## ## ## ## ## ## ## ##
|
||||
#### #### ## ## #### ## ## ## ##### #### ## ######
|
||||
|
||||
Parsing made even more fun (and faster too)
|
||||
|
||||
Homepage:
|
||||
http://construct.wikispaces.com (including online tutorial)
|
||||
|
||||
Typical usage:
|
||||
>>> from construct import *
|
||||
|
||||
Hands-on example:
|
||||
>>> from construct import *
|
||||
>>>
|
||||
>>> s = Struct("foo",
|
||||
... UBInt8("a"),
|
||||
... UBInt16("b"),
|
||||
... )
|
||||
>>>
|
||||
>>> s.parse("\\x01\\x02\\x03")
|
||||
Container(a = 1, b = 515)
|
||||
>>>
|
||||
>>> print s.parse("\\x01\\x02\\x03")
|
||||
Container:
|
||||
a = 1
|
||||
b = 515
|
||||
>>>
|
||||
>>> s.build(Container(a = 1, b = 0x0203))
|
||||
"\\x01\\x02\\x03"
|
||||
"""
|
||||
from core import *
|
||||
from adapters import *
|
||||
from macros import *
|
||||
from debug import Probe, Debugger
|
||||
|
||||
|
||||
#===============================================================================
|
||||
# meta data
|
||||
#===============================================================================
|
||||
__author__ = "tomer filiba (tomerfiliba [at] gmail.com)"
|
||||
__version__ = "2.04"
|
||||
|
||||
#===============================================================================
|
||||
# shorthands
|
||||
#===============================================================================
|
||||
Bits = BitField
|
||||
Byte = UBInt8
|
||||
Bytes = Field
|
||||
Const = ConstAdapter
|
||||
Tunnel = TunnelAdapter
|
||||
Embed = Embedded
|
||||
|
||||
#===============================================================================
|
||||
# deprecated names (kept for backward compatibility with RC1)
|
||||
#===============================================================================
|
||||
MetaField = Field
|
||||
MetaBytes = Field
|
||||
GreedyRepeater = GreedyRange
|
||||
OptionalGreedyRepeater = OptionalGreedyRange
|
||||
Repeater = Range
|
||||
StrictRepeater = Array
|
||||
MetaRepeater = Array
|
||||
OneOfValidator = OneOf
|
||||
NoneOfValidator = NoneOf
|
||||
|
||||
#===============================================================================
|
||||
# exposed names
|
||||
#===============================================================================
|
||||
__all__ = [
|
||||
'AdaptationError', 'Adapter', 'Alias', 'Aligned', 'AlignedStruct',
|
||||
'Anchor', 'Array', 'ArrayError', 'BFloat32', 'BFloat64', 'Bit', 'BitField',
|
||||
'BitIntegerAdapter', 'BitIntegerError', 'BitStruct', 'Bits', 'Bitwise',
|
||||
'Buffered', 'Byte', 'Bytes', 'CString', 'CStringAdapter', 'Const',
|
||||
'ConstAdapter', 'ConstError', 'Construct', 'ConstructError', 'Container',
|
||||
'Debugger', 'Embed', 'Embedded', 'EmbeddedBitStruct', 'Enum', 'ExprAdapter',
|
||||
'Field', 'FieldError', 'Flag', 'FlagsAdapter', 'FlagsContainer',
|
||||
'FlagsEnum', 'FormatField', 'GreedyRange', 'GreedyRepeater',
|
||||
'HexDumpAdapter', 'If', 'IfThenElse', 'IndexingAdapter', 'LFloat32',
|
||||
'LFloat64', 'LazyBound', 'LengthValueAdapter', 'ListContainer',
|
||||
'MappingAdapter', 'MappingError', 'MetaArray', 'MetaBytes', 'MetaField',
|
||||
'MetaRepeater', 'NFloat32', 'NFloat64', 'Nibble', 'NoneOf',
|
||||
'NoneOfValidator', 'Octet', 'OnDemand', 'OnDemandPointer', 'OneOf',
|
||||
'OneOfValidator', 'OpenRange', 'Optional', 'OptionalGreedyRange',
|
||||
'OptionalGreedyRepeater', 'PaddedStringAdapter', 'Padding',
|
||||
'PaddingAdapter', 'PaddingError', 'PascalString', 'Pass', 'Peek',
|
||||
'Pointer', 'PrefixedArray', 'Probe', 'Range', 'RangeError', 'Reconfig',
|
||||
'Rename', 'RepeatUntil', 'Repeater', 'Restream', 'SBInt16', 'SBInt32',
|
||||
'SBInt64', 'SBInt8', 'SLInt16', 'SLInt32', 'SLInt64', 'SLInt8', 'SNInt16',
|
||||
'SNInt32', 'SNInt64', 'SNInt8', 'Select', 'SelectError', 'Sequence',
|
||||
'SizeofError', 'SlicingAdapter', 'StaticField', 'StrictRepeater', 'String',
|
||||
'StringAdapter', 'Struct', 'Subconstruct', 'Switch', 'SwitchError',
|
||||
'SymmetricMapping', 'Terminator', 'TerminatorError', 'Tunnel',
|
||||
'TunnelAdapter', 'UBInt16', 'UBInt32', 'UBInt64', 'UBInt8', 'ULInt16',
|
||||
'ULInt32', 'ULInt64', 'ULInt8', 'UNInt16', 'UNInt32', 'UNInt64', 'UNInt8',
|
||||
'Union', 'ValidationError', 'Validator', 'Value', "Magic",
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
482
Melodia/resources/audiotools/construct/adapters.py
Normal file
482
Melodia/resources/audiotools/construct/adapters.py
Normal file
@ -0,0 +1,482 @@
|
||||
from core import Adapter, AdaptationError, Pass
|
||||
from lib import int_to_bin, bin_to_int, swap_bytes, StringIO
|
||||
from lib import FlagsContainer, HexString
|
||||
|
||||
|
||||
#===============================================================================
|
||||
# exceptions
|
||||
#===============================================================================
|
||||
class BitIntegerError(AdaptationError):
|
||||
__slots__ = []
|
||||
class MappingError(AdaptationError):
|
||||
__slots__ = []
|
||||
class ConstError(AdaptationError):
|
||||
__slots__ = []
|
||||
class ValidationError(AdaptationError):
|
||||
__slots__ = []
|
||||
class PaddingError(AdaptationError):
|
||||
__slots__ = []
|
||||
|
||||
#===============================================================================
|
||||
# adapters
|
||||
#===============================================================================
|
||||
class BitIntegerAdapter(Adapter):
|
||||
"""
|
||||
Adapter for bit-integers (converts bitstrings to integers, and vice versa).
|
||||
See BitField.
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to adapt
|
||||
* width - the size of the subcon, in bits
|
||||
* swapped - whether to swap byte order (little endian/big endian).
|
||||
default is False (big endian)
|
||||
* signed - whether the value is signed (two's complement). the default
|
||||
is False (unsigned)
|
||||
* bytesize - number of bits per byte, used for byte-swapping (if swapped).
|
||||
default is 8.
|
||||
"""
|
||||
__slots__ = ["width", "swapped", "signed", "bytesize"]
|
||||
def __init__(self, subcon, width, swapped = False, signed = False,
|
||||
bytesize = 8):
|
||||
Adapter.__init__(self, subcon)
|
||||
self.width = width
|
||||
self.swapped = swapped
|
||||
self.signed = signed
|
||||
self.bytesize = bytesize
|
||||
def _encode(self, obj, context):
|
||||
if obj < 0 and not self.signed:
|
||||
raise BitIntegerError("object is negative, but field is not signed",
|
||||
obj)
|
||||
obj2 = int_to_bin(obj, width = self.width)
|
||||
if self.swapped:
|
||||
obj2 = swap_bytes(obj2, bytesize = self.bytesize)
|
||||
return obj2
|
||||
def _decode(self, obj, context):
|
||||
if self.swapped:
|
||||
obj = swap_bytes(obj, bytesize = self.bytesize)
|
||||
return bin_to_int(obj, signed = self.signed)
|
||||
|
||||
class MappingAdapter(Adapter):
|
||||
"""
|
||||
Adapter that maps objects to other objects.
|
||||
See SymmetricMapping and Enum.
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to map
|
||||
* decoding - the decoding (parsing) mapping (a dict)
|
||||
* encoding - the encoding (building) mapping (a dict)
|
||||
* decdefault - the default return value when the object is not found
|
||||
in the decoding mapping. if no object is given, an exception is raised.
|
||||
if `Pass` is used, the unmapped object will be passed as-is
|
||||
* encdefault - the default return value when the object is not found
|
||||
in the encoding mapping. if no object is given, an exception is raised.
|
||||
if `Pass` is used, the unmapped object will be passed as-is
|
||||
"""
|
||||
__slots__ = ["encoding", "decoding", "encdefault", "decdefault"]
|
||||
def __init__(self, subcon, decoding, encoding,
|
||||
decdefault = NotImplemented, encdefault = NotImplemented):
|
||||
Adapter.__init__(self, subcon)
|
||||
self.decoding = decoding
|
||||
self.encoding = encoding
|
||||
self.decdefault = decdefault
|
||||
self.encdefault = encdefault
|
||||
def _encode(self, obj, context):
|
||||
try:
|
||||
return self.encoding[obj]
|
||||
except (KeyError, TypeError):
|
||||
if self.encdefault is NotImplemented:
|
||||
raise MappingError("no encoding mapping for %r" % (obj,))
|
||||
if self.encdefault is Pass:
|
||||
return obj
|
||||
return self.encdefault
|
||||
def _decode(self, obj, context):
|
||||
try:
|
||||
return self.decoding[obj]
|
||||
except (KeyError, TypeError):
|
||||
if self.decdefault is NotImplemented:
|
||||
raise MappingError("no decoding mapping for %r" % (obj,))
|
||||
if self.decdefault is Pass:
|
||||
return obj
|
||||
return self.decdefault
|
||||
|
||||
class FlagsAdapter(Adapter):
|
||||
"""
|
||||
Adapter for flag fields. Each flag is extracted from the number, resulting
|
||||
in a FlagsContainer object. Not intended for direct usage.
|
||||
See FlagsEnum.
|
||||
|
||||
Parameters
|
||||
* subcon - the subcon to extract
|
||||
* flags - a dictionary mapping flag-names to their value
|
||||
"""
|
||||
__slots__ = ["flags"]
|
||||
def __init__(self, subcon, flags):
|
||||
Adapter.__init__(self, subcon)
|
||||
self.flags = flags
|
||||
def _encode(self, obj, context):
|
||||
flags = 0
|
||||
for name, value in self.flags.iteritems():
|
||||
if getattr(obj, name, False):
|
||||
flags |= value
|
||||
return flags
|
||||
def _decode(self, obj, context):
|
||||
obj2 = FlagsContainer()
|
||||
for name, value in self.flags.iteritems():
|
||||
setattr(obj2, name, bool(obj & value))
|
||||
return obj2
|
||||
|
||||
class StringAdapter(Adapter):
|
||||
"""
|
||||
Adapter for strings. Converts a sequence of characters into a python
|
||||
string, and optionally handles character encoding.
|
||||
See String.
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to convert
|
||||
* encoding - the character encoding name (e.g., "utf8"), or None to
|
||||
return raw bytes (usually 8-bit ASCII).
|
||||
"""
|
||||
__slots__ = ["encoding"]
|
||||
def __init__(self, subcon, encoding = None):
|
||||
Adapter.__init__(self, subcon)
|
||||
self.encoding = encoding
|
||||
def _encode(self, obj, context):
|
||||
if self.encoding:
|
||||
obj = obj.encode(self.encoding)
|
||||
return obj
|
||||
def _decode(self, obj, context):
|
||||
obj = "".join(obj)
|
||||
if self.encoding:
|
||||
obj = obj.decode(self.encoding)
|
||||
return obj
|
||||
|
||||
class PaddedStringAdapter(Adapter):
|
||||
r"""
|
||||
Adapter for padded strings.
|
||||
See String.
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to adapt
|
||||
* padchar - the padding character. default is "\x00".
|
||||
* paddir - the direction where padding is placed ("right", "left", or
|
||||
"center"). the default is "right".
|
||||
* trimdir - the direction where trimming will take place ("right" or
|
||||
"left"). the default is "right". trimming is only meaningful for
|
||||
building, when the given string is too long.
|
||||
"""
|
||||
__slots__ = ["padchar", "paddir", "trimdir"]
|
||||
def __init__(self, subcon, padchar = "\x00", paddir = "right",
|
||||
trimdir = "right"):
|
||||
if paddir not in ("right", "left", "center"):
|
||||
raise ValueError("paddir must be 'right', 'left' or 'center'",
|
||||
paddir)
|
||||
if trimdir not in ("right", "left"):
|
||||
raise ValueError("trimdir must be 'right' or 'left'", trimdir)
|
||||
Adapter.__init__(self, subcon)
|
||||
self.padchar = padchar
|
||||
self.paddir = paddir
|
||||
self.trimdir = trimdir
|
||||
def _decode(self, obj, context):
|
||||
if self.paddir == "right":
|
||||
obj = obj.rstrip(self.padchar)
|
||||
elif self.paddir == "left":
|
||||
obj = obj.lstrip(self.padchar)
|
||||
else:
|
||||
obj = obj.strip(self.padchar)
|
||||
return obj
|
||||
def _encode(self, obj, context):
|
||||
size = self._sizeof(context)
|
||||
if self.paddir == "right":
|
||||
obj = obj.ljust(size, self.padchar)
|
||||
elif self.paddir == "left":
|
||||
obj = obj.rjust(size, self.padchar)
|
||||
else:
|
||||
obj = obj.center(size, self.padchar)
|
||||
if len(obj) > size:
|
||||
if self.trimdir == "right":
|
||||
obj = obj[:size]
|
||||
else:
|
||||
obj = obj[-size:]
|
||||
return obj
|
||||
|
||||
class LengthValueAdapter(Adapter):
|
||||
"""
|
||||
Adapter for length-value pairs. It extracts only the value from the
|
||||
pair, and calculates the length based on the value.
|
||||
See PrefixedArray and PascalString.
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon returning a length-value pair
|
||||
"""
|
||||
__slots__ = []
|
||||
def _encode(self, obj, context):
|
||||
return (len(obj), obj)
|
||||
def _decode(self, obj, context):
|
||||
return obj[1]
|
||||
|
||||
class CStringAdapter(StringAdapter):
|
||||
r"""
|
||||
Adapter for C-style strings (strings terminated by a terminator char).
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to convert
|
||||
* terminators - a sequence of terminator chars. default is "\x00".
|
||||
* encoding - the character encoding to use (e.g., "utf8"), or None to
|
||||
return raw-bytes. the terminator characters are not affected by the
|
||||
encoding.
|
||||
"""
|
||||
__slots__ = ["terminators"]
|
||||
def __init__(self, subcon, terminators = "\x00", encoding = None):
|
||||
StringAdapter.__init__(self, subcon, encoding = encoding)
|
||||
self.terminators = terminators
|
||||
def _encode(self, obj, context):
|
||||
return StringAdapter._encode(self, obj, context) + self.terminators[0]
|
||||
def _decode(self, obj, context):
|
||||
return StringAdapter._decode(self, obj[:-1], context)
|
||||
|
||||
class TunnelAdapter(Adapter):
|
||||
"""
|
||||
Adapter for tunneling (as in protocol tunneling). A tunnel is construct
|
||||
nested upon another (layering). For parsing, the lower layer first parses
|
||||
the data (note: it must return a string!), then the upper layer is called
|
||||
to parse that data (bottom-up). For building it works in a top-down manner;
|
||||
first the upper layer builds the data, then the lower layer takes it and
|
||||
writes it to the stream.
|
||||
|
||||
Parameters:
|
||||
* subcon - the lower layer subcon
|
||||
* inner_subcon - the upper layer (tunneled/nested) subcon
|
||||
|
||||
Example:
|
||||
# a pascal string containing compressed data (zlib encoding), so first
|
||||
# the string is read, decompressed, and finally re-parsed as an array
|
||||
# of UBInt16
|
||||
TunnelAdapter(
|
||||
PascalString("data", encoding = "zlib"),
|
||||
GreedyRange(UBInt16("elements"))
|
||||
)
|
||||
"""
|
||||
__slots__ = ["inner_subcon"]
|
||||
def __init__(self, subcon, inner_subcon):
|
||||
Adapter.__init__(self, subcon)
|
||||
self.inner_subcon = inner_subcon
|
||||
def _decode(self, obj, context):
|
||||
return self.inner_subcon._parse(StringIO(obj), context)
|
||||
def _encode(self, obj, context):
|
||||
stream = StringIO()
|
||||
self.inner_subcon._build(obj, stream, context)
|
||||
return stream.getvalue()
|
||||
|
||||
class ExprAdapter(Adapter):
|
||||
"""
|
||||
A generic adapter that accepts 'encoder' and 'decoder' as parameters. You
|
||||
can use ExprAdapter instead of writing a full-blown class when only a
|
||||
simple expression is needed.
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to adapt
|
||||
* encoder - a function that takes (obj, context) and returns an encoded
|
||||
version of obj
|
||||
* decoder - a function that takes (obj, context) and returns an decoded
|
||||
version of obj
|
||||
|
||||
Example:
|
||||
ExprAdapter(UBInt8("foo"),
|
||||
encoder = lambda obj, ctx: obj / 4,
|
||||
decoder = lambda obj, ctx: obj * 4,
|
||||
)
|
||||
"""
|
||||
__slots__ = ["_encode", "_decode"]
|
||||
def __init__(self, subcon, encoder, decoder):
|
||||
Adapter.__init__(self, subcon)
|
||||
self._encode = encoder
|
||||
self._decode = decoder
|
||||
|
||||
class HexDumpAdapter(Adapter):
|
||||
"""
|
||||
Adapter for hex-dumping strings. It returns a HexString, which is a string
|
||||
"""
|
||||
__slots__ = ["linesize"]
|
||||
def __init__(self, subcon, linesize = 16):
|
||||
Adapter.__init__(self, subcon)
|
||||
self.linesize = linesize
|
||||
def _encode(self, obj, context):
|
||||
return obj
|
||||
def _decode(self, obj, context):
|
||||
return HexString(obj, linesize = self.linesize)
|
||||
|
||||
class ConstAdapter(Adapter):
|
||||
"""
|
||||
Adapter for enforcing a constant value ("magic numbers"). When decoding,
|
||||
the return value is checked; when building, the value is substituted in.
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to validate
|
||||
* value - the expected value
|
||||
|
||||
Example:
|
||||
Const(Field("signature", 2), "MZ")
|
||||
"""
|
||||
__slots__ = ["value"]
|
||||
def __init__(self, subcon, value):
|
||||
Adapter.__init__(self, subcon)
|
||||
self.value = value
|
||||
def _encode(self, obj, context):
|
||||
if obj is None or obj == self.value:
|
||||
return self.value
|
||||
else:
|
||||
raise ConstError("expected %r, found %r" % (self.value, obj))
|
||||
def _decode(self, obj, context):
|
||||
if obj != self.value:
|
||||
raise ConstError("expected %r, found %r" % (self.value, obj))
|
||||
return obj
|
||||
|
||||
class SlicingAdapter(Adapter):
|
||||
"""
|
||||
Adapter for slicing a list (getting a slice from that list)
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to slice
|
||||
* start - start index
|
||||
* stop - stop index (or None for up-to-end)
|
||||
* step - step (or None for every element)
|
||||
"""
|
||||
__slots__ = ["start", "stop", "step"]
|
||||
def __init__(self, subcon, start, stop = None):
|
||||
Adapter.__init__(self, subcon)
|
||||
self.start = start
|
||||
self.stop = stop
|
||||
def _encode(self, obj, context):
|
||||
if self.start is None:
|
||||
return obj
|
||||
return [None] * self.start + obj
|
||||
def _decode(self, obj, context):
|
||||
return obj[self.start:self.stop]
|
||||
|
||||
class IndexingAdapter(Adapter):
|
||||
"""
|
||||
Adapter for indexing a list (getting a single item from that list)
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to index
|
||||
* index - the index of the list to get
|
||||
"""
|
||||
__slots__ = ["index"]
|
||||
def __init__(self, subcon, index):
|
||||
Adapter.__init__(self, subcon)
|
||||
if type(index) is not int:
|
||||
raise TypeError("index must be an integer", type(index))
|
||||
self.index = index
|
||||
def _encode(self, obj, context):
|
||||
return [None] * self.index + [obj]
|
||||
def _decode(self, obj, context):
|
||||
return obj[self.index]
|
||||
|
||||
class PaddingAdapter(Adapter):
|
||||
r"""
|
||||
Adapter for padding.
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to pad
|
||||
* pattern - the padding pattern (character). default is "\x00"
|
||||
* strict - whether or not to verify, during parsing, that the given
|
||||
padding matches the padding pattern. default is False (unstrict)
|
||||
"""
|
||||
__slots__ = ["pattern", "strict"]
|
||||
def __init__(self, subcon, pattern = "\x00", strict = False):
|
||||
Adapter.__init__(self, subcon)
|
||||
self.pattern = pattern
|
||||
self.strict = strict
|
||||
def _encode(self, obj, context):
|
||||
return self._sizeof(context) * self.pattern
|
||||
def _decode(self, obj, context):
|
||||
if self.strict:
|
||||
expected = self._sizeof(context) * self.pattern
|
||||
if obj != expected:
|
||||
raise PaddingError("expected %r, found %r" % (expected, obj))
|
||||
return obj
|
||||
|
||||
|
||||
#===============================================================================
|
||||
# validators
|
||||
#===============================================================================
|
||||
class Validator(Adapter):
|
||||
"""
|
||||
Abstract class: validates a condition on the encoded/decoded object.
|
||||
Override _validate(obj, context) in deriving classes.
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to validate
|
||||
"""
|
||||
__slots__ = []
|
||||
def _decode(self, obj, context):
|
||||
if not self._validate(obj, context):
|
||||
raise ValidationError("invalid object", obj)
|
||||
return obj
|
||||
def _encode(self, obj, context):
|
||||
return self._decode(obj, context)
|
||||
def _validate(self, obj, context):
|
||||
raise NotImplementedError()
|
||||
|
||||
class OneOf(Validator):
|
||||
"""
|
||||
Validates that the value is one of the listed values
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to validate
|
||||
* valids - a set of valid values
|
||||
"""
|
||||
__slots__ = ["valids"]
|
||||
def __init__(self, subcon, valids):
|
||||
Validator.__init__(self, subcon)
|
||||
self.valids = valids
|
||||
def _validate(self, obj, context):
|
||||
return obj in self.valids
|
||||
|
||||
class NoneOf(Validator):
|
||||
"""
|
||||
Validates that the value is none of the listed values
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to validate
|
||||
* invalids - a set of invalid values
|
||||
"""
|
||||
__slots__ = ["invalids"]
|
||||
def __init__(self, subcon, invalids):
|
||||
Validator.__init__(self, subcon)
|
||||
self.invalids = invalids
|
||||
def _validate(self, obj, context):
|
||||
return obj not in self.invalids
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
1249
Melodia/resources/audiotools/construct/core.py
Normal file
1249
Melodia/resources/audiotools/construct/core.py
Normal file
File diff suppressed because it is too large
Load Diff
160
Melodia/resources/audiotools/construct/debug.py
Normal file
160
Melodia/resources/audiotools/construct/debug.py
Normal file
@ -0,0 +1,160 @@
|
||||
"""
|
||||
Debugging utilities for constructs
|
||||
"""
|
||||
import sys
|
||||
import traceback
|
||||
import pdb
|
||||
import inspect
|
||||
from core import Construct, Subconstruct
|
||||
from lib import HexString, Container, ListContainer, AttrDict
|
||||
|
||||
|
||||
class Probe(Construct):
|
||||
"""
|
||||
A probe: dumps the context, stack frames, and stream content to the screen
|
||||
to aid the debugging process.
|
||||
See also Debugger.
|
||||
|
||||
Parameters:
|
||||
* name - the display name
|
||||
* show_stream - whether or not to show stream contents. default is True.
|
||||
the stream must be seekable.
|
||||
* show_context - whether or not to show the context. default is True.
|
||||
* show_stack - whether or not to show the upper stack frames. default
|
||||
is True.
|
||||
* stream_lookahead - the number of bytes to dump when show_stack is set.
|
||||
default is 100.
|
||||
|
||||
Example:
|
||||
Struct("foo",
|
||||
UBInt8("a"),
|
||||
Probe("between a and b"),
|
||||
UBInt8("b"),
|
||||
)
|
||||
"""
|
||||
__slots__ = [
|
||||
"printname", "show_stream", "show_context", "show_stack",
|
||||
"stream_lookahead"
|
||||
]
|
||||
counter = 0
|
||||
|
||||
def __init__(self, name = None, show_stream = True,
|
||||
show_context = True, show_stack = True,
|
||||
stream_lookahead = 100):
|
||||
Construct.__init__(self, None)
|
||||
if name is None:
|
||||
Probe.counter += 1
|
||||
name = "<unnamed %d>" % (Probe.counter,)
|
||||
self.printname = name
|
||||
self.show_stream = show_stream
|
||||
self.show_context = show_context
|
||||
self.show_stack = show_stack
|
||||
self.stream_lookahead = stream_lookahead
|
||||
def __repr__(self):
|
||||
return "%s(%r)" % (self.__class__.__name__, self.printname)
|
||||
def _parse(self, stream, context):
|
||||
self.printout(stream, context)
|
||||
def _build(self, obj, stream, context):
|
||||
self.printout(stream, context)
|
||||
def _sizeof(self, context):
|
||||
return 0
|
||||
|
||||
def printout(self, stream, context):
|
||||
obj = Container()
|
||||
if self.show_stream:
|
||||
obj.stream_position = stream.tell()
|
||||
follows = stream.read(self.stream_lookahead)
|
||||
if not follows:
|
||||
obj.following_stream_data = "EOF reached"
|
||||
else:
|
||||
stream.seek(-len(follows), 1)
|
||||
obj.following_stream_data = HexString(follows)
|
||||
print
|
||||
|
||||
if self.show_context:
|
||||
obj.context = context
|
||||
|
||||
if self.show_stack:
|
||||
obj.stack = ListContainer()
|
||||
frames = [s[0] for s in inspect.stack()][1:-1]
|
||||
frames.reverse()
|
||||
for f in frames:
|
||||
a = AttrDict()
|
||||
a.__update__(f.f_locals)
|
||||
obj.stack.append(a)
|
||||
|
||||
print "=" * 80
|
||||
print "Probe", self.printname
|
||||
print obj
|
||||
print "=" * 80
|
||||
|
||||
class Debugger(Subconstruct):
|
||||
"""
|
||||
A pdb-based debugger. When an exception occurs in the subcon, a debugger
|
||||
will appear and allow you to debug the error (and even fix on-the-fly).
|
||||
|
||||
Parameters:
|
||||
* subcon - the subcon to debug
|
||||
|
||||
Example:
|
||||
Debugger(
|
||||
Enum(UBInt8("foo"),
|
||||
a = 1,
|
||||
b = 2,
|
||||
c = 3
|
||||
)
|
||||
)
|
||||
"""
|
||||
__slots__ = ["retval"]
|
||||
def _parse(self, stream, context):
|
||||
try:
|
||||
return self.subcon._parse(stream, context)
|
||||
except Exception:
|
||||
self.retval = NotImplemented
|
||||
self.handle_exc("(you can set the value of 'self.retval', "
|
||||
"which will be returned)")
|
||||
if self.retval is NotImplemented:
|
||||
raise
|
||||
else:
|
||||
return self.retval
|
||||
def _build(self, obj, stream, context):
|
||||
try:
|
||||
self.subcon._build(obj, stream, context)
|
||||
except Exception:
|
||||
self.handle_exc()
|
||||
def handle_exc(self, msg = None):
|
||||
print "=" * 80
|
||||
print "Debugging exception of %s:" % (self.subcon,)
|
||||
print "".join(traceback.format_exception(*sys.exc_info())[1:])
|
||||
if msg:
|
||||
print msg
|
||||
pdb.post_mortem(sys.exc_info()[2])
|
||||
print "=" * 80
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
10
Melodia/resources/audiotools/construct/lib/__init__.py
Normal file
10
Melodia/resources/audiotools/construct/lib/__init__.py
Normal file
@ -0,0 +1,10 @@
|
||||
from binary import int_to_bin, bin_to_int, swap_bytes, encode_bin, decode_bin
|
||||
from bitstream import BitStreamReader, BitStreamWriter
|
||||
from container import (Container, AttrDict, FlagsContainer,
|
||||
ListContainer, LazyContainer)
|
||||
from hex import HexString, hexdump
|
||||
from utils import Packer, StringIO
|
||||
from path import drill
|
||||
|
||||
|
||||
|
61
Melodia/resources/audiotools/construct/lib/binary.py
Normal file
61
Melodia/resources/audiotools/construct/lib/binary.py
Normal file
@ -0,0 +1,61 @@
|
||||
def int_to_bin(number, width = 32):
|
||||
if number < 0:
|
||||
number += 1 << width
|
||||
i = width - 1
|
||||
bits = ["\x00"] * width
|
||||
while number and i >= 0:
|
||||
bits[i] = "\x00\x01"[number & 1]
|
||||
number >>= 1
|
||||
i -= 1
|
||||
return "".join(bits)
|
||||
|
||||
_bit_values = {"\x00" : 0, "\x01" : 1, "0" : 0, "1" : 1}
|
||||
def bin_to_int(bits, signed = False):
|
||||
number = 0
|
||||
bias = 0
|
||||
if signed and _bit_values[bits[0]] == 1:
|
||||
bits = bits[1:]
|
||||
bias = 1 << len(bits)
|
||||
for b in bits:
|
||||
number <<= 1
|
||||
number |= _bit_values[b]
|
||||
return number - bias
|
||||
|
||||
def swap_bytes(bits, bytesize = 8):
|
||||
i = 0
|
||||
l = len(bits)
|
||||
output = [""] * ((l // bytesize) + 1)
|
||||
j = len(output) - 1
|
||||
while i < l:
|
||||
output[j] = bits[i : i + bytesize]
|
||||
i += bytesize
|
||||
j -= 1
|
||||
return "".join(output)
|
||||
|
||||
_char_to_bin = {}
|
||||
_bin_to_char = {}
|
||||
for i in range(256):
|
||||
ch = chr(i)
|
||||
bin = int_to_bin(i, 8)
|
||||
_char_to_bin[ch] = bin
|
||||
_bin_to_char[bin] = ch
|
||||
_bin_to_char[bin] = ch
|
||||
|
||||
def encode_bin(data):
|
||||
return "".join(_char_to_bin[ch] for ch in data)
|
||||
|
||||
def decode_bin(data):
|
||||
assert len(data) & 7 == 0, "data length must be a multiple of 8"
|
||||
i = 0
|
||||
j = 0
|
||||
l = len(data) // 8
|
||||
chars = [""] * l
|
||||
while j < l:
|
||||
chars[j] = _bin_to_char[data[i:i+8]]
|
||||
i += 8
|
||||
j += 1
|
||||
return "".join(chars)
|
||||
|
||||
|
||||
|
||||
|
80
Melodia/resources/audiotools/construct/lib/bitstream.py
Normal file
80
Melodia/resources/audiotools/construct/lib/bitstream.py
Normal file
@ -0,0 +1,80 @@
|
||||
from binary import encode_bin, decode_bin
|
||||
|
||||
|
||||
class BitStreamReader(object):
|
||||
__slots__ = ["substream", "buffer", "total_size"]
|
||||
def __init__(self, substream):
|
||||
self.substream = substream
|
||||
self.total_size = 0
|
||||
self.buffer = ""
|
||||
def close(self):
|
||||
if self.total_size % 8 != 0:
|
||||
raise ValueError("total size of read data must be a multiple of 8",
|
||||
self.total_size)
|
||||
def tell(self):
|
||||
return self.substream.tell()
|
||||
def seek(self, pos, whence = 0):
|
||||
self.buffer = ""
|
||||
self.total_size = 0
|
||||
self.substream.seek(pos, whence)
|
||||
def read(self, count):
|
||||
assert count >= 0
|
||||
l = len(self.buffer)
|
||||
if count == 0:
|
||||
data = ""
|
||||
elif count <= l:
|
||||
data = self.buffer[:count]
|
||||
self.buffer = self.buffer[count:]
|
||||
else:
|
||||
data = self.buffer
|
||||
count -= l
|
||||
bytes = count // 8
|
||||
if count & 7:
|
||||
bytes += 1
|
||||
buf = encode_bin(self.substream.read(bytes))
|
||||
data += buf[:count]
|
||||
self.buffer = buf[count:]
|
||||
self.total_size += len(data)
|
||||
return data
|
||||
|
||||
|
||||
class BitStreamWriter(object):
|
||||
__slots__ = ["substream", "buffer", "pos"]
|
||||
def __init__(self, substream):
|
||||
self.substream = substream
|
||||
self.buffer = []
|
||||
self.pos = 0
|
||||
def close(self):
|
||||
self.flush()
|
||||
def flush(self):
|
||||
bytes = decode_bin("".join(self.buffer))
|
||||
self.substream.write(bytes)
|
||||
self.buffer = []
|
||||
self.pos = 0
|
||||
def tell(self):
|
||||
return self.substream.tell() + self.pos // 8
|
||||
def seek(self, pos, whence = 0):
|
||||
self.flush()
|
||||
self.substream.seek(pos, whence)
|
||||
def write(self, data):
|
||||
if not data:
|
||||
return
|
||||
if type(data) is not str:
|
||||
raise TypeError("data must be a string, not %r" % (type(data),))
|
||||
self.buffer.append(data)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
275
Melodia/resources/audiotools/construct/lib/container.py
Normal file
275
Melodia/resources/audiotools/construct/lib/container.py
Normal file
@ -0,0 +1,275 @@
|
||||
def recursion_lock(retval, lock_name = "__recursion_lock__"):
|
||||
def decorator(func):
|
||||
def wrapper(self, *args, **kw):
|
||||
if getattr(self, lock_name, False):
|
||||
return retval
|
||||
setattr(self, lock_name, True)
|
||||
try:
|
||||
return func(self, *args, **kw)
|
||||
finally:
|
||||
setattr(self, lock_name, False)
|
||||
wrapper.__name__ = func.__name__
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
class Container(object):
|
||||
"""
|
||||
A generic container of attributes
|
||||
"""
|
||||
__slots__ = ["__dict__", "__attrs__"]
|
||||
def __init__(self, **kw):
|
||||
self.__dict__.update(kw)
|
||||
attrs = []
|
||||
attrs.extend(kw.keys())
|
||||
object.__setattr__(self, "__attrs__", attrs)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return self.__dict__ == other.__dict__
|
||||
except AttributeError:
|
||||
return False
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
def __delattr__(self, name):
|
||||
object.__delattr__(self, name)
|
||||
self.__attrs__.remove(name)
|
||||
def __setattr__(self, name, value):
|
||||
d = self.__dict__
|
||||
if name not in d and not name.startswith("__"):
|
||||
self.__attrs__.append(name)
|
||||
d[name] = value
|
||||
def __getitem__(self, name):
|
||||
return self.__dict__[name]
|
||||
def __delitem__(self, name):
|
||||
self.__delattr__(name)
|
||||
def __setitem__(self, name, value):
|
||||
self.__setattr__(name, value)
|
||||
def __update__(self, obj):
|
||||
for name in obj.__attrs__:
|
||||
self[name] = obj[name]
|
||||
def __copy__(self):
|
||||
new = self.__class__()
|
||||
new.__attrs__ = self.__attrs__[:]
|
||||
new.__dict__ = self.__dict__.copy()
|
||||
return new
|
||||
def __iter__(self):
|
||||
for name in self.__attrs__:
|
||||
yield name, self.__dict__[name]
|
||||
|
||||
@recursion_lock("<...>")
|
||||
def __repr__(self):
|
||||
attrs = sorted("%s = %s" % (k, repr(v))
|
||||
for k, v in self.__dict__.iteritems()
|
||||
if not k.startswith("_"))
|
||||
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
|
||||
def __str__(self):
|
||||
return self.__pretty_str__()
|
||||
@recursion_lock("<...>")
|
||||
def __pretty_str__(self, nesting = 1, indentation = " "):
|
||||
attrs = []
|
||||
ind = indentation * nesting
|
||||
for k, v in self:
|
||||
if not k.startswith("_"):
|
||||
text = [ind, k, " = "]
|
||||
if hasattr(v, "__pretty_str__"):
|
||||
text.append(v.__pretty_str__(nesting + 1, indentation))
|
||||
else:
|
||||
text.append(repr(v))
|
||||
attrs.append("".join(text))
|
||||
if not attrs:
|
||||
return "%s()" % (self.__class__.__name__,)
|
||||
attrs.insert(0, self.__class__.__name__ + ":")
|
||||
return "\n".join(attrs)
|
||||
|
||||
def __introspect__(self):
|
||||
for k in self.__attrs__:
|
||||
v = self.__dict__[k]
|
||||
if not k.startswith("_"):
|
||||
yield "kv", (k, v)
|
||||
|
||||
|
||||
class FlagsContainer(Container):
|
||||
"""
|
||||
A container providing pretty-printing for flags. Only set flags are
|
||||
displayed.
|
||||
"""
|
||||
def __inspect__(self):
|
||||
for k in self.__attrs__:
|
||||
v = self.__dict__[k]
|
||||
if not k.startswith("_") and v:
|
||||
yield "kv", (k, v)
|
||||
|
||||
def __pretty_str__(self, nesting = 1, indentation = " "):
|
||||
attrs = []
|
||||
ind = indentation * nesting
|
||||
for k in self.__attrs__:
|
||||
v = self.__dict__[k]
|
||||
if not k.startswith("_") and v:
|
||||
attrs.append(ind + k)
|
||||
if not attrs:
|
||||
return "%s()" % (self.__class__.__name__,)
|
||||
attrs.insert(0, self.__class__.__name__+ ":")
|
||||
return "\n".join(attrs)
|
||||
|
||||
class ListContainer(list):
|
||||
"""
|
||||
A container for lists
|
||||
"""
|
||||
__slots__ = ["__recursion_lock__"]
|
||||
def __str__(self):
|
||||
return self.__pretty_str__()
|
||||
@recursion_lock("[...]")
|
||||
def __pretty_str__(self, nesting = 1, indentation = " "):
|
||||
if not self:
|
||||
return "[]"
|
||||
ind = indentation * nesting
|
||||
lines = ["["]
|
||||
for elem in self:
|
||||
lines.append("\n")
|
||||
lines.append(ind)
|
||||
if hasattr(elem, "__pretty_str__"):
|
||||
lines.append(elem.__pretty_str__(nesting + 1, indentation))
|
||||
else:
|
||||
lines.append(repr(elem))
|
||||
lines.append("\n")
|
||||
lines.append(indentation * (nesting - 1))
|
||||
lines.append("]")
|
||||
return "".join(lines)
|
||||
|
||||
class AttrDict(object):
|
||||
"""
|
||||
A dictionary that can be accessed both using indexing and attributes,
|
||||
i.e.,
|
||||
x = AttrDict()
|
||||
x.foo = 5
|
||||
print x["foo"]
|
||||
"""
|
||||
__slots__ = ["__dict__"]
|
||||
def __init__(self, **kw):
|
||||
self.__dict__ = kw
|
||||
def __contains__(self, key):
|
||||
return key in self.__dict__
|
||||
def __nonzero__(self):
|
||||
return bool(self.__dict__)
|
||||
def __repr__(self):
|
||||
return repr(self.__dict__)
|
||||
def __str__(self):
|
||||
return self.__pretty_str__()
|
||||
def __pretty_str__(self, nesting = 1, indentation = " "):
|
||||
if not self:
|
||||
return "{}"
|
||||
text = ["{\n"]
|
||||
ind = nesting * indentation
|
||||
for k in sorted(self.__dict__.keys()):
|
||||
v = self.__dict__[k]
|
||||
text.append(ind)
|
||||
text.append(repr(k))
|
||||
text.append(" : ")
|
||||
if hasattr(v, "__pretty_str__"):
|
||||
try:
|
||||
text.append(v.__pretty_str__(nesting+1, indentation))
|
||||
except Exception:
|
||||
text.append(repr(v))
|
||||
else:
|
||||
text.append(repr(v))
|
||||
text.append("\n")
|
||||
text.append((nesting-1) * indentation)
|
||||
text.append("}")
|
||||
return "".join(text)
|
||||
def __delitem__(self, key):
|
||||
del self.__dict__[key]
|
||||
def __getitem__(self, key):
|
||||
return self.__dict__[key]
|
||||
def __setitem__(self, key, value):
|
||||
self.__dict__[key] = value
|
||||
def __copy__(self):
|
||||
new = self.__class__()
|
||||
new.__dict__ = self.__dict__.copy()
|
||||
return new
|
||||
def __update__(self, other):
|
||||
if isinstance(other, dict):
|
||||
self.__dict__.update(other)
|
||||
else:
|
||||
self.__dict__.update(other.__dict__)
|
||||
|
||||
class LazyContainer(object):
|
||||
__slots__ = ["subcon", "stream", "pos", "context", "_value"]
|
||||
def __init__(self, subcon, stream, pos, context):
|
||||
self.subcon = subcon
|
||||
self.stream = stream
|
||||
self.pos = pos
|
||||
self.context = context
|
||||
self._value = NotImplemented
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return self._value == other._value
|
||||
except AttributeError:
|
||||
return False
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
def __str__(self):
|
||||
return self.__pretty_str__()
|
||||
def __pretty_str__(self, nesting = 1, indentation = " "):
|
||||
if self._value is NotImplemented:
|
||||
text = "<unread>"
|
||||
elif hasattr(self._value, "__pretty_str__"):
|
||||
text = self._value.__pretty_str__(nesting, indentation)
|
||||
else:
|
||||
text = repr(self._value)
|
||||
return "%s: %s" % (self.__class__.__name__, text)
|
||||
def read(self):
|
||||
self.stream.seek(self.pos)
|
||||
return self.subcon._parse(self.stream, self.context)
|
||||
def dispose(self):
|
||||
self.subcon = None
|
||||
self.stream = None
|
||||
self.context = None
|
||||
self.pos = None
|
||||
def _get_value(self):
|
||||
if self._value is NotImplemented:
|
||||
self._value = self.read()
|
||||
return self._value
|
||||
value = property(_get_value)
|
||||
has_value = property(lambda self: self._value is not NotImplemented)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
36
Melodia/resources/audiotools/construct/lib/hex.py
Normal file
36
Melodia/resources/audiotools/construct/lib/hex.py
Normal file
@ -0,0 +1,36 @@
|
||||
_printable = dict((chr(i), ".") for i in range(256))
|
||||
_printable.update((chr(i), chr(i)) for i in range(32, 128))
|
||||
|
||||
def hexdump(data, linesize = 16):
|
||||
prettylines = []
|
||||
if len(data) < 65536:
|
||||
fmt = "%%04X %%-%ds %%s"
|
||||
else:
|
||||
fmt = "%%08X %%-%ds %%s"
|
||||
fmt = fmt % (3 * linesize - 1,)
|
||||
for i in xrange(0, len(data), linesize):
|
||||
line = data[i : i + linesize]
|
||||
hextext = " ".join(b.encode("hex") for b in line)
|
||||
rawtext = "".join(_printable[b] for b in line)
|
||||
prettylines.append(fmt % (i, hextext, rawtext))
|
||||
return prettylines
|
||||
|
||||
class HexString(str):
|
||||
"""
|
||||
represents a string that will be hex-dumped (only via __pretty_str__).
|
||||
this class derives of str, and behaves just like a normal string in all
|
||||
other contexts.
|
||||
"""
|
||||
def __init__(self, data, linesize = 16):
|
||||
str.__init__(self, data)
|
||||
self.linesize = linesize
|
||||
def __new__(cls, data, *args, **kwargs):
|
||||
return str.__new__(cls, data)
|
||||
def __pretty_str__(self, nesting = 1, indentation = " "):
|
||||
if not self:
|
||||
return "''"
|
||||
sep = "\n" + indentation * nesting
|
||||
return sep + sep.join(hexdump(self))
|
||||
|
||||
|
||||
|
151
Melodia/resources/audiotools/construct/lib/path.py
Normal file
151
Melodia/resources/audiotools/construct/lib/path.py
Normal file
@ -0,0 +1,151 @@
|
||||
from container import Container
|
||||
|
||||
|
||||
def drill(obj, root = "", levels = -1):
|
||||
if levels == 0:
|
||||
yield root, obj
|
||||
return
|
||||
levels -= 1
|
||||
if isinstance(obj, Container):
|
||||
for k, v in obj:
|
||||
r = "%s.%s" % (root, k)
|
||||
if levels:
|
||||
for r2, v2 in drill(v, r, levels):
|
||||
yield r2, v2
|
||||
else:
|
||||
yield r, v
|
||||
elif isinstance(obj, list):
|
||||
for i, item in enumerate(obj):
|
||||
r = "%s[%d]" % (root, i)
|
||||
if levels:
|
||||
for r2, v2 in drill(item, r, levels):
|
||||
yield r2, v2
|
||||
else:
|
||||
yield r, item
|
||||
else:
|
||||
yield root, obj
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from construct import *
|
||||
|
||||
c = Struct("foo",
|
||||
Byte("a"),
|
||||
Struct("b",
|
||||
Byte("c"),
|
||||
UBInt16("d"),
|
||||
),
|
||||
Byte("e"),
|
||||
Array(4,
|
||||
Struct("f",
|
||||
Byte("x"),
|
||||
Byte("y"),
|
||||
),
|
||||
),
|
||||
Byte("g"),
|
||||
)
|
||||
o = c.parse("acddexyxyxyxyg")
|
||||
|
||||
for lvl in range(4):
|
||||
for path, value in drill(o, levels = lvl):
|
||||
print path, value
|
||||
print "---"
|
||||
|
||||
output = """
|
||||
Container:
|
||||
a = 97
|
||||
b = Container:
|
||||
c = 99
|
||||
d = 25700
|
||||
e = 101
|
||||
f = [
|
||||
Container:
|
||||
x = 120
|
||||
y = 121
|
||||
Container:
|
||||
x = 120
|
||||
y = 121
|
||||
Container:
|
||||
x = 120
|
||||
y = 121
|
||||
Container:
|
||||
x = 120
|
||||
y = 121
|
||||
]
|
||||
g = 103
|
||||
---
|
||||
.a 97
|
||||
.b Container:
|
||||
c = 99
|
||||
d = 25700
|
||||
.e 101
|
||||
.f [
|
||||
Container:
|
||||
x = 120
|
||||
y = 121
|
||||
Container:
|
||||
x = 120
|
||||
y = 121
|
||||
Container:
|
||||
x = 120
|
||||
y = 121
|
||||
Container:
|
||||
x = 120
|
||||
y = 121
|
||||
]
|
||||
.g 103
|
||||
---
|
||||
.a 97
|
||||
.b.c 99
|
||||
.b.d 25700
|
||||
.e 101
|
||||
.f[0] Container:
|
||||
x = 120
|
||||
y = 121
|
||||
.f[1] Container:
|
||||
x = 120
|
||||
y = 121
|
||||
.f[2] Container:
|
||||
x = 120
|
||||
y = 121
|
||||
.f[3] Container:
|
||||
x = 120
|
||||
y = 121
|
||||
.g 103
|
||||
---
|
||||
.a 97
|
||||
.b.c 99
|
||||
.b.d 25700
|
||||
.e 101
|
||||
.f[0].x 120
|
||||
.f[0].y 121
|
||||
.f[1].x 120
|
||||
.f[1].y 121
|
||||
.f[2].x 120
|
||||
.f[2].y 121
|
||||
.f[3].x 120
|
||||
.f[3].y 121
|
||||
.g 103
|
||||
---
|
||||
"""
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
22
Melodia/resources/audiotools/construct/lib/utils.py
Normal file
22
Melodia/resources/audiotools/construct/lib/utils.py
Normal file
@ -0,0 +1,22 @@
|
||||
try:
|
||||
from cStringIO import StringIO
|
||||
except ImportError:
|
||||
from StringIO import StringIO
|
||||
|
||||
|
||||
try:
|
||||
from struct import Struct as Packer
|
||||
except ImportError:
|
||||
from struct import pack, unpack, calcsize
|
||||
class Packer(object):
|
||||
__slots__ = ["format", "size"]
|
||||
def __init__(self, format):
|
||||
self.format = format
|
||||
self.size = calcsize(format)
|
||||
def pack(self, *args):
|
||||
return pack(self.format, *args)
|
||||
def unpack(self, data):
|
||||
return unpack(self.format, data)
|
||||
|
||||
|
||||
|
628
Melodia/resources/audiotools/construct/macros.py
Normal file
628
Melodia/resources/audiotools/construct/macros.py
Normal file
@ -0,0 +1,628 @@
|
||||
from lib import BitStreamReader, BitStreamWriter, encode_bin, decode_bin
|
||||
from core import *
|
||||
from adapters import *
|
||||
|
||||
|
||||
#===============================================================================
|
||||
# fields
|
||||
#===============================================================================
|
||||
def Field(name, length):
|
||||
"""a field
|
||||
* name - the name of the field
|
||||
* length - the length of the field. the length can be either an integer
|
||||
(StaticField), or a function that takes the context as an argument and
|
||||
returns the length (MetaField)
|
||||
"""
|
||||
if callable(length):
|
||||
return MetaField(name, length)
|
||||
else:
|
||||
return StaticField(name, length)
|
||||
|
||||
def BitField(name, length, swapped = False, signed = False, bytesize = 8):
|
||||
"""a bit field; must be enclosed in a BitStruct
|
||||
* name - the name of the field
|
||||
* length - the length of the field in bits. the length can be either
|
||||
an integer, or a function that takes the context as an argument and
|
||||
returns the length
|
||||
* swapped - whether the value is byte-swapped (little endian). the
|
||||
default is False.
|
||||
* signed - whether the value of the bitfield is a signed integer. the
|
||||
default is False.
|
||||
* bytesize - the number of bits in a byte (used for byte-swapping). the
|
||||
default is 8.
|
||||
"""
|
||||
return BitIntegerAdapter(Field(name, length),
|
||||
length,
|
||||
swapped = swapped,
|
||||
signed = signed,
|
||||
bytesize = bytesize
|
||||
)
|
||||
|
||||
def Padding(length, pattern = "\x00", strict = False):
|
||||
r"""a padding field (value is discarded)
|
||||
* length - the length of the field. the length can be either an integer,
|
||||
or a function that takes the context as an argument and returns the
|
||||
length
|
||||
* pattern - the padding pattern (character) to use. default is "\x00"
|
||||
* strict - whether or not to raise an exception is the actual padding
|
||||
pattern mismatches the desired pattern. default is False.
|
||||
"""
|
||||
return PaddingAdapter(Field(None, length),
|
||||
pattern = pattern,
|
||||
strict = strict,
|
||||
)
|
||||
|
||||
def Flag(name, truth = 1, falsehood = 0, default = False):
|
||||
"""a flag field (True or False)
|
||||
* name - the name of the field
|
||||
* truth - the numeric value of truth. the default is 1.
|
||||
* falsehood - the numeric value of falsehood. the default is 0.
|
||||
* default - the default value to assume, when the value is neither
|
||||
`truth` nor `falsehood`. the default is False.
|
||||
"""
|
||||
return SymmetricMapping(Field(name, 1),
|
||||
{True : chr(truth), False : chr(falsehood)},
|
||||
default = default,
|
||||
)
|
||||
|
||||
#===============================================================================
|
||||
# field shortcuts
|
||||
#===============================================================================
|
||||
def Bit(name):
|
||||
"""a 1-bit BitField; must be enclosed in a BitStruct"""
|
||||
return BitField(name, 1)
|
||||
def Nibble(name):
|
||||
"""a 4-bit BitField; must be enclosed in a BitStruct"""
|
||||
return BitField(name, 4)
|
||||
def Octet(name):
|
||||
"""an 8-bit BitField; must be enclosed in a BitStruct"""
|
||||
return BitField(name, 8)
|
||||
|
||||
def UBInt8(name):
|
||||
"""unsigned, big endian 8-bit integer"""
|
||||
return FormatField(name, ">", "B")
|
||||
def UBInt16(name):
|
||||
"""unsigned, big endian 16-bit integer"""
|
||||
return FormatField(name, ">", "H")
|
||||
def UBInt32(name):
|
||||
"""unsigned, big endian 32-bit integer"""
|
||||
return FormatField(name, ">", "L")
|
||||
def UBInt64(name):
|
||||
"""unsigned, big endian 64-bit integer"""
|
||||
return FormatField(name, ">", "Q")
|
||||
|
||||
def SBInt8(name):
|
||||
"""signed, big endian 8-bit integer"""
|
||||
return FormatField(name, ">", "b")
|
||||
def SBInt16(name):
|
||||
"""signed, big endian 16-bit integer"""
|
||||
return FormatField(name, ">", "h")
|
||||
def SBInt32(name):
|
||||
"""signed, big endian 32-bit integer"""
|
||||
return FormatField(name, ">", "l")
|
||||
def SBInt64(name):
|
||||
"""signed, big endian 64-bit integer"""
|
||||
return FormatField(name, ">", "q")
|
||||
|
||||
def ULInt8(name):
|
||||
"""unsigned, little endian 8-bit integer"""
|
||||
return FormatField(name, "<", "B")
|
||||
def ULInt16(name):
|
||||
"""unsigned, little endian 16-bit integer"""
|
||||
return FormatField(name, "<", "H")
|
||||
def ULInt32(name):
|
||||
"""unsigned, little endian 32-bit integer"""
|
||||
return FormatField(name, "<", "L")
|
||||
def ULInt64(name):
|
||||
"""unsigned, little endian 64-bit integer"""
|
||||
return FormatField(name, "<", "Q")
|
||||
|
||||
def SLInt8(name):
|
||||
"""signed, little endian 8-bit integer"""
|
||||
return FormatField(name, "<", "b")
|
||||
def SLInt16(name):
|
||||
"""signed, little endian 16-bit integer"""
|
||||
return FormatField(name, "<", "h")
|
||||
def SLInt32(name):
|
||||
"""signed, little endian 32-bit integer"""
|
||||
return FormatField(name, "<", "l")
|
||||
def SLInt64(name):
|
||||
"""signed, little endian 64-bit integer"""
|
||||
return FormatField(name, "<", "q")
|
||||
|
||||
def UNInt8(name):
|
||||
"""unsigned, native endianity 8-bit integer"""
|
||||
return FormatField(name, "=", "B")
|
||||
def UNInt16(name):
|
||||
"""unsigned, native endianity 16-bit integer"""
|
||||
return FormatField(name, "=", "H")
|
||||
def UNInt32(name):
|
||||
"""unsigned, native endianity 32-bit integer"""
|
||||
return FormatField(name, "=", "L")
|
||||
def UNInt64(name):
|
||||
"""unsigned, native endianity 64-bit integer"""
|
||||
return FormatField(name, "=", "Q")
|
||||
|
||||
def SNInt8(name):
|
||||
"""signed, native endianity 8-bit integer"""
|
||||
return FormatField(name, "=", "b")
|
||||
def SNInt16(name):
|
||||
"""signed, native endianity 16-bit integer"""
|
||||
return FormatField(name, "=", "h")
|
||||
def SNInt32(name):
|
||||
"""signed, native endianity 32-bit integer"""
|
||||
return FormatField(name, "=", "l")
|
||||
def SNInt64(name):
|
||||
"""signed, native endianity 64-bit integer"""
|
||||
return FormatField(name, "=", "q")
|
||||
|
||||
def BFloat32(name):
|
||||
"""big endian, 32-bit IEEE floating point number"""
|
||||
return FormatField(name, ">", "f")
|
||||
def LFloat32(name):
|
||||
"""little endian, 32-bit IEEE floating point number"""
|
||||
return FormatField(name, "<", "f")
|
||||
def NFloat32(name):
|
||||
"""native endianity, 32-bit IEEE floating point number"""
|
||||
return FormatField(name, "=", "f")
|
||||
|
||||
def BFloat64(name):
|
||||
"""big endian, 64-bit IEEE floating point number"""
|
||||
return FormatField(name, ">", "d")
|
||||
def LFloat64(name):
|
||||
"""little endian, 64-bit IEEE floating point number"""
|
||||
return FormatField(name, "<", "d")
|
||||
def NFloat64(name):
|
||||
"""native endianity, 64-bit IEEE floating point number"""
|
||||
return FormatField(name, "=", "d")
|
||||
|
||||
|
||||
#===============================================================================
|
||||
# arrays
|
||||
#===============================================================================
|
||||
def Array(count, subcon):
|
||||
"""
|
||||
Repeats the given unit a fixed number of times.
|
||||
|
||||
:param int count: number of times to repeat
|
||||
:param ``Construct`` subcon: construct to repeat
|
||||
|
||||
>>> c = StrictRepeater(4, UBInt8("foo"))
|
||||
>>> c
|
||||
<Repeater('foo')>
|
||||
>>> c.parse("\\x01\\x02\\x03\\x04")
|
||||
[1, 2, 3, 4]
|
||||
>>> c.parse("\\x01\\x02\\x03\\x04\\x05\\x06")
|
||||
[1, 2, 3, 4]
|
||||
>>> c.build([5,6,7,8])
|
||||
'\\x05\\x06\\x07\\x08'
|
||||
>>> c.build([5,6,7,8,9])
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
construct.core.RepeaterError: expected 4..4, found 5
|
||||
"""
|
||||
|
||||
if callable(count):
|
||||
con = MetaArray(count, subcon)
|
||||
else:
|
||||
con = MetaArray(lambda ctx: count, subcon)
|
||||
con._clear_flag(con.FLAG_DYNAMIC)
|
||||
return con
|
||||
|
||||
def PrefixedArray(subcon, length_field = UBInt8("length")):
|
||||
"""an array prefixed by a length field.
|
||||
* subcon - the subcon to be repeated
|
||||
* length_field - a construct returning an integer
|
||||
"""
|
||||
return LengthValueAdapter(
|
||||
Sequence(subcon.name,
|
||||
length_field,
|
||||
Array(lambda ctx: ctx[length_field.name], subcon),
|
||||
nested = False
|
||||
)
|
||||
)
|
||||
|
||||
def OpenRange(mincount, subcon):
|
||||
from sys import maxint
|
||||
return Range(mincount, maxint, subcon)
|
||||
|
||||
def GreedyRange(subcon):
|
||||
"""
|
||||
Repeats the given unit one or more times.
|
||||
|
||||
:param ``Construct`` subcon: construct to repeat
|
||||
|
||||
>>> from construct import GreedyRepeater, UBInt8
|
||||
>>> c = GreedyRepeater(UBInt8("foo"))
|
||||
>>> c.parse("\\x01")
|
||||
[1]
|
||||
>>> c.parse("\\x01\\x02\\x03")
|
||||
[1, 2, 3]
|
||||
>>> c.parse("\\x01\\x02\\x03\\x04\\x05\\x06")
|
||||
[1, 2, 3, 4, 5, 6]
|
||||
>>> c.parse("")
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
construct.core.RepeaterError: expected 1..2147483647, found 0
|
||||
>>> c.build([1,2])
|
||||
'\\x01\\x02'
|
||||
>>> c.build([])
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
construct.core.RepeaterError: expected 1..2147483647, found 0
|
||||
"""
|
||||
|
||||
return OpenRange(1, subcon)
|
||||
|
||||
def OptionalGreedyRange(subcon):
|
||||
"""
|
||||
Repeats the given unit zero or more times. This repeater can't
|
||||
fail, as it accepts lists of any length.
|
||||
|
||||
:param ``Construct`` subcon: construct to repeat
|
||||
|
||||
>>> from construct import OptionalGreedyRepeater, UBInt8
|
||||
>>> c = OptionalGreedyRepeater(UBInt8("foo"))
|
||||
>>> c.parse("")
|
||||
[]
|
||||
>>> c.parse("\\x01\\x02")
|
||||
[1, 2]
|
||||
>>> c.build([])
|
||||
''
|
||||
>>> c.build([1,2])
|
||||
'\\x01\\x02'
|
||||
"""
|
||||
|
||||
return OpenRange(0, subcon)
|
||||
|
||||
|
||||
#===============================================================================
|
||||
# subconstructs
|
||||
#===============================================================================
|
||||
def Optional(subcon):
|
||||
"""an optional construct. if parsing fails, returns None.
|
||||
* subcon - the subcon to optionally parse or build
|
||||
"""
|
||||
return Select(subcon.name, subcon, Pass)
|
||||
|
||||
def Bitwise(subcon):
|
||||
"""converts the stream to bits, and passes the bitstream to subcon
|
||||
* subcon - a bitwise construct (usually BitField)
|
||||
"""
|
||||
# subcons larger than MAX_BUFFER will be wrapped by Restream instead
|
||||
# of Buffered. implementation details, don't stick your nose in :)
|
||||
MAX_BUFFER = 1024 * 8
|
||||
def resizer(length):
|
||||
if length & 7:
|
||||
raise SizeofError("size must be a multiple of 8", length)
|
||||
return length >> 3
|
||||
if not subcon._is_flag(subcon.FLAG_DYNAMIC) and subcon.sizeof() < MAX_BUFFER:
|
||||
con = Buffered(subcon,
|
||||
encoder = decode_bin,
|
||||
decoder = encode_bin,
|
||||
resizer = resizer
|
||||
)
|
||||
else:
|
||||
con = Restream(subcon,
|
||||
stream_reader = BitStreamReader,
|
||||
stream_writer = BitStreamWriter,
|
||||
resizer = resizer)
|
||||
return con
|
||||
|
||||
def Aligned(subcon, modulus = 4, pattern = "\x00"):
|
||||
r"""aligns subcon to modulus boundary using padding pattern
|
||||
* subcon - the subcon to align
|
||||
* modulus - the modulus boundary (default is 4)
|
||||
* pattern - the padding pattern (default is \x00)
|
||||
"""
|
||||
if modulus < 2:
|
||||
raise ValueError("modulus must be >= 2", modulus)
|
||||
if modulus in (2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
|
||||
def padlength(ctx):
|
||||
m1 = modulus - 1
|
||||
return (modulus - (subcon._sizeof(ctx) & m1)) & m1
|
||||
else:
|
||||
def padlength(ctx):
|
||||
return (modulus - (subcon._sizeof(ctx) % modulus)) % modulus
|
||||
return SeqOfOne(subcon.name,
|
||||
subcon,
|
||||
# ??????
|
||||
# ??????
|
||||
# ??????
|
||||
# ??????
|
||||
Padding(padlength, pattern = pattern),
|
||||
nested = False,
|
||||
)
|
||||
|
||||
def SeqOfOne(name, *args, **kw):
|
||||
"""a sequence of one element. only the first element is meaningful, the
|
||||
rest are discarded
|
||||
* name - the name of the sequence
|
||||
* args - subconstructs
|
||||
* kw - any keyword arguments to Sequence
|
||||
"""
|
||||
return IndexingAdapter(Sequence(name, *args, **kw), index = 0)
|
||||
|
||||
def Embedded(subcon):
|
||||
"""embeds a struct into the enclosing struct.
|
||||
* subcon - the struct to embed
|
||||
"""
|
||||
return Reconfig(subcon.name, subcon, subcon.FLAG_EMBED)
|
||||
|
||||
def Rename(newname, subcon):
|
||||
"""renames an existing construct
|
||||
* newname - the new name
|
||||
* subcon - the subcon to rename
|
||||
"""
|
||||
return Reconfig(newname, subcon)
|
||||
|
||||
def Alias(newname, oldname):
|
||||
"""creates an alias for an existing element in a struct
|
||||
* newname - the new name
|
||||
* oldname - the name of an existing element
|
||||
"""
|
||||
return Value(newname, lambda ctx: ctx[oldname])
|
||||
|
||||
|
||||
#===============================================================================
|
||||
# mapping
|
||||
#===============================================================================
|
||||
def SymmetricMapping(subcon, mapping, default = NotImplemented):
|
||||
"""defines a symmetrical mapping: a->b, b->a.
|
||||
* subcon - the subcon to map
|
||||
* mapping - the encoding mapping (a dict); the decoding mapping is
|
||||
achieved by reversing this mapping
|
||||
* default - the default value to use when no mapping is found. if no
|
||||
default value is given, and exception is raised. setting to Pass would
|
||||
return the value "as is" (unmapped)
|
||||
"""
|
||||
reversed_mapping = dict((v, k) for k, v in mapping.iteritems())
|
||||
return MappingAdapter(subcon,
|
||||
encoding = mapping,
|
||||
decoding = reversed_mapping,
|
||||
encdefault = default,
|
||||
decdefault = default,
|
||||
)
|
||||
|
||||
def Enum(subcon, **kw):
|
||||
"""a set of named values mapping.
|
||||
* subcon - the subcon to map
|
||||
* kw - keyword arguments which serve as the encoding mapping
|
||||
* _default_ - an optional, keyword-only argument that specifies the
|
||||
default value to use when the mapping is undefined. if not given,
|
||||
and exception is raised when the mapping is undefined. use `Pass` to
|
||||
pass the unmapped value as-is
|
||||
"""
|
||||
return SymmetricMapping(subcon, kw, kw.pop("_default_", NotImplemented))
|
||||
|
||||
def FlagsEnum(subcon, **kw):
|
||||
"""a set of flag values mapping.
|
||||
* subcon - the subcon to map
|
||||
* kw - keyword arguments which serve as the encoding mapping
|
||||
"""
|
||||
return FlagsAdapter(subcon, kw)
|
||||
|
||||
|
||||
#===============================================================================
|
||||
# structs
|
||||
#===============================================================================
|
||||
def AlignedStruct(name, *subcons, **kw):
|
||||
"""a struct of aligned fields
|
||||
* name - the name of the struct
|
||||
* subcons - the subcons that make up this structure
|
||||
* kw - keyword arguments to pass to Aligned: 'modulus' and 'pattern'
|
||||
"""
|
||||
return Struct(name, *(Aligned(sc, **kw) for sc in subcons))
|
||||
|
||||
def BitStruct(name, *subcons):
|
||||
"""a struct of bitwise fields
|
||||
* name - the name of the struct
|
||||
* subcons - the subcons that make up this structure
|
||||
"""
|
||||
return Bitwise(Struct(name, *subcons))
|
||||
|
||||
def EmbeddedBitStruct(*subcons):
|
||||
"""an embedded BitStruct. no name is necessary.
|
||||
* subcons - the subcons that make up this structure
|
||||
"""
|
||||
return Bitwise(Embedded(Struct(None, *subcons)))
|
||||
|
||||
#===============================================================================
|
||||
# strings
|
||||
#===============================================================================
|
||||
def String(name, length, encoding=None, padchar=None, paddir="right",
|
||||
trimdir="right"):
|
||||
"""
|
||||
A configurable, fixed-length string field.
|
||||
|
||||
The padding character must be specified for padding and trimming to work.
|
||||
|
||||
:param str name: name
|
||||
:param int length: length, in bytes
|
||||
:param str encoding: encoding (e.g. "utf8") or None for no encoding
|
||||
:param str padchar: optional character to pad out strings
|
||||
:param str paddir: direction to pad out strings; one of "right", "left",
|
||||
or "both"
|
||||
:param str trim: direction to trim strings; one of "right", "left"
|
||||
|
||||
>>> from construct import String
|
||||
>>> String("foo", 5).parse("hello")
|
||||
'hello'
|
||||
>>>
|
||||
>>> String("foo", 12, encoding = "utf8").parse("hello joh\\xd4\\x83n")
|
||||
u'hello joh\\u0503n'
|
||||
>>>
|
||||
>>> foo = String("foo", 10, padchar = "X", paddir = "right")
|
||||
>>> foo.parse("helloXXXXX")
|
||||
'hello'
|
||||
>>> foo.build("hello")
|
||||
'helloXXXXX'
|
||||
"""
|
||||
|
||||
con = StringAdapter(Field(name, length), encoding=encoding)
|
||||
if padchar is not None:
|
||||
con = PaddedStringAdapter(con, padchar=padchar, paddir=paddir,
|
||||
trimdir=trimdir)
|
||||
return con
|
||||
|
||||
def PascalString(name, length_field=UBInt8("length"), encoding=None):
|
||||
"""
|
||||
A length-prefixed string.
|
||||
|
||||
``PascalString`` is named after the string types of Pascal, which are
|
||||
length-prefixed. Lisp strings also follow this convention.
|
||||
|
||||
The length field will appear in the same ``Container`` as the
|
||||
``PascalString``, with the given name.
|
||||
|
||||
:param str name: name
|
||||
:param ``Construct`` length_field: a field which will store the length of
|
||||
the string
|
||||
:param str encoding: encoding (e.g. "utf8") or None for no encoding
|
||||
|
||||
>>> foo = PascalString("foo")
|
||||
>>> foo.parse("\\x05hello")
|
||||
'hello'
|
||||
>>> foo.build("hello world")
|
||||
'\\x0bhello world'
|
||||
>>>
|
||||
>>> foo = PascalString("foo", length_field = UBInt16("length"))
|
||||
>>> foo.parse("\\x00\\x05hello")
|
||||
'hello'
|
||||
>>> foo.build("hello")
|
||||
'\\x00\\x05hello'
|
||||
"""
|
||||
|
||||
return StringAdapter(
|
||||
LengthValueAdapter(
|
||||
Sequence(name,
|
||||
length_field,
|
||||
Field("data", lambda ctx: ctx[length_field.name]),
|
||||
)
|
||||
),
|
||||
encoding=encoding,
|
||||
)
|
||||
|
||||
def CString(name, terminators="\x00", encoding=None,
|
||||
char_field=Field(None, 1)):
|
||||
"""
|
||||
A string ending in a terminator.
|
||||
|
||||
``CString`` is similar to the strings of C, C++, and other related
|
||||
programming languages.
|
||||
|
||||
By default, the terminator is the NULL byte (0x00).
|
||||
|
||||
:param str name: name
|
||||
:param iterable terminators: sequence of valid terminators, in order of
|
||||
preference
|
||||
:param str encoding: encoding (e.g. "utf8") or None for no encoding
|
||||
:param ``Construct`` char_field: construct representing a single character
|
||||
|
||||
>>> foo = CString("foo")
|
||||
>>>
|
||||
>>> foo.parse("hello\\x00")
|
||||
'hello'
|
||||
>>> foo.build("hello")
|
||||
'hello\\x00'
|
||||
>>>
|
||||
>>> foo = CString("foo", terminators = "XYZ")
|
||||
>>>
|
||||
>>> foo.parse("helloX")
|
||||
'hello'
|
||||
>>> foo.parse("helloY")
|
||||
'hello'
|
||||
>>> foo.parse("helloZ")
|
||||
'hello'
|
||||
>>> foo.build("hello")
|
||||
'helloX'
|
||||
"""
|
||||
return Rename(name,
|
||||
CStringAdapter(
|
||||
RepeatUntil(lambda obj, ctx: obj in terminators,
|
||||
char_field,
|
||||
),
|
||||
terminators=terminators,
|
||||
encoding=encoding,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
#===============================================================================
|
||||
# conditional
|
||||
#===============================================================================
|
||||
def IfThenElse(name, predicate, then_subcon, else_subcon):
|
||||
"""an if-then-else conditional construct: if the predicate indicates True,
|
||||
`then_subcon` will be used; otherwise `else_subcon`
|
||||
* name - the name of the construct
|
||||
* predicate - a function taking the context as an argument and returning
|
||||
True or False
|
||||
* then_subcon - the subcon that will be used if the predicate returns True
|
||||
* else_subcon - the subcon that will be used if the predicate returns False
|
||||
"""
|
||||
return Switch(name, lambda ctx: bool(predicate(ctx)),
|
||||
{
|
||||
True : then_subcon,
|
||||
False : else_subcon,
|
||||
}
|
||||
)
|
||||
|
||||
def If(predicate, subcon, elsevalue = None):
|
||||
"""an if-then conditional construct: if the predicate indicates True,
|
||||
subcon will be used; otherwise, `elsevalue` will be returned instead.
|
||||
* predicate - a function taking the context as an argument and returning
|
||||
True or False
|
||||
* subcon - the subcon that will be used if the predicate returns True
|
||||
* elsevalue - the value that will be used should the predicate return False.
|
||||
by default this value is None.
|
||||
"""
|
||||
return IfThenElse(subcon.name,
|
||||
predicate,
|
||||
subcon,
|
||||
Value("elsevalue", lambda ctx: elsevalue)
|
||||
)
|
||||
|
||||
|
||||
#===============================================================================
|
||||
# misc
|
||||
#===============================================================================
|
||||
def OnDemandPointer(offsetfunc, subcon, force_build = True):
|
||||
"""an on-demand pointer.
|
||||
* offsetfunc - a function taking the context as an argument and returning
|
||||
the absolute stream position
|
||||
* subcon - the subcon that will be parsed from the `offsetfunc()` stream
|
||||
position on demand
|
||||
* force_build - see OnDemand. by default True.
|
||||
"""
|
||||
return OnDemand(Pointer(offsetfunc, subcon),
|
||||
advance_stream = False,
|
||||
force_build = force_build
|
||||
)
|
||||
|
||||
def Magic(data):
|
||||
return ConstAdapter(Field(None, len(data)), data)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
418
Melodia/resources/audiotools/cue.py
Normal file
418
Melodia/resources/audiotools/cue.py
Normal file
@ -0,0 +1,418 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2008-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
"""The cuesheet handling module."""
|
||||
|
||||
import re
|
||||
from audiotools import SheetException, parse_timestamp, build_timestamp
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
###################
|
||||
#Cue Sheet Parsing
|
||||
###################
|
||||
|
||||
#This method of cuesheet reading involves a tokenizer and parser,
|
||||
#analagous to lexx/yacc.
|
||||
#It might be easier to use a line-by-line ad-hoc method for parsing,
|
||||
#but this brute-force approach should be a bit more thorough.
|
||||
|
||||
SPACE = 0x0
|
||||
TAG = 0x1
|
||||
NUMBER = 0x2
|
||||
EOL = 0x4
|
||||
STRING = 0x8
|
||||
ISRC = 0x10
|
||||
TIMESTAMP = 0x20
|
||||
|
||||
|
||||
class CueException(SheetException):
|
||||
"""Raised by cuesheet parsing errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def tokens(cuedata):
|
||||
"""Yields (text, token, line) tuples from cuedata stream.
|
||||
|
||||
text is a plain string.
|
||||
token is an integer such as TAG or NUMBER.
|
||||
line is a line number integer."""
|
||||
|
||||
full_length = len(cuedata)
|
||||
cuedata = cuedata.lstrip('efbbbf'.decode('hex'))
|
||||
line_number = 1
|
||||
|
||||
#This isn't completely accurate since the whitespace requirements
|
||||
#between tokens aren't enforced.
|
||||
TOKENS = [(re.compile("^(%s)" % (s)), element) for (s, element) in
|
||||
[(r'[A-Z]{2}[A-Za-z0-9]{3}[0-9]{7}', ISRC),
|
||||
(r'[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}', TIMESTAMP),
|
||||
(r'[0-9]+', NUMBER),
|
||||
(r'[\r\n]+', EOL),
|
||||
(r'".+?"', STRING),
|
||||
(r'\S+', STRING),
|
||||
(r'[ ]+', SPACE)]]
|
||||
|
||||
TAGMATCH = re.compile(r'^[A-Z]+$')
|
||||
|
||||
while (True):
|
||||
for (token, element) in TOKENS:
|
||||
t = token.search(cuedata)
|
||||
if (t is not None):
|
||||
cuedata = cuedata[len(t.group()):]
|
||||
if (element == SPACE):
|
||||
break
|
||||
elif (element == NUMBER):
|
||||
yield (int(t.group()), element, line_number)
|
||||
elif (element == EOL):
|
||||
line_number += 1
|
||||
yield (t.group(), element, line_number)
|
||||
elif (element == STRING):
|
||||
if (TAGMATCH.match(t.group())):
|
||||
yield (t.group(), TAG, line_number)
|
||||
else:
|
||||
yield (t.group().strip('"'), element, line_number)
|
||||
elif (element == TIMESTAMP):
|
||||
(m, s, f) = map(int, t.group().split(":"))
|
||||
yield (((m * 60 * 75) + (s * 75) + f),
|
||||
element, line_number)
|
||||
else:
|
||||
yield (t.group(), element, line_number)
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
if (len(cuedata) > 0):
|
||||
raise CueException(_(u"Invalid token at char %d") % \
|
||||
(full_length - len(cuedata)))
|
||||
|
||||
|
||||
def get_value(tokens, accept, error):
|
||||
"""Retrieves a specific token from the stream of tokens.
|
||||
|
||||
tokens - the token iterator
|
||||
accept - an "or"ed list of all the tokens we'll accept
|
||||
error - the string to prepend to the error message
|
||||
|
||||
Returns the gotten value which matches one of the accepted tokens
|
||||
or raises ValueError if the token matches none of them.
|
||||
"""
|
||||
|
||||
(token, element, line_number) = tokens.next()
|
||||
if ((element & accept) != 0):
|
||||
return token
|
||||
else:
|
||||
raise CueException(_(u"%(error)s at line %(line)d") % \
|
||||
{"error": error,
|
||||
"line": line_number})
|
||||
|
||||
|
||||
def parse(tokens):
|
||||
"""Returns a Cuesheet object from the token iterator stream.
|
||||
|
||||
Raises CueException if a parsing error occurs.
|
||||
"""
|
||||
|
||||
def skip_to_eol(tokens):
|
||||
(token, element, line_number) = tokens.next()
|
||||
while (element != EOL):
|
||||
(token, element, line_number) = tokens.next()
|
||||
|
||||
cuesheet = Cuesheet()
|
||||
track = None
|
||||
|
||||
try:
|
||||
while (True):
|
||||
(token, element, line_number) = tokens.next()
|
||||
if (element == TAG):
|
||||
|
||||
#ignore comment lines
|
||||
if (token == "REM"):
|
||||
skip_to_eol(tokens)
|
||||
|
||||
#we're moving to a new track
|
||||
elif (token == 'TRACK'):
|
||||
if (track is not None):
|
||||
cuesheet.tracks[track.number] = track
|
||||
|
||||
track = Track(get_value(tokens, NUMBER,
|
||||
_(u"Invalid track number")),
|
||||
get_value(tokens, TAG | STRING,
|
||||
_(u"Invalid track type")))
|
||||
|
||||
get_value(tokens, EOL, "Excess data")
|
||||
|
||||
#if we haven't started on track data yet,
|
||||
#add attributes to the main cue sheet
|
||||
elif (track is None):
|
||||
if (token in ('CATALOG', 'CDTEXTFILE',
|
||||
'PERFORMER', 'SONGWRITER',
|
||||
'TITLE')):
|
||||
cuesheet.attribs[token] = get_value(
|
||||
tokens,
|
||||
STRING | TAG | NUMBER | ISRC,
|
||||
_(u"Missing value"))
|
||||
|
||||
get_value(tokens, EOL, _(u"Excess data"))
|
||||
|
||||
elif (token == 'FILE'):
|
||||
filename = get_value(tokens, STRING,
|
||||
_(u"Missing filename"))
|
||||
filetype = get_value(tokens, STRING | TAG,
|
||||
_(u"Missing file type"))
|
||||
|
||||
cuesheet.attribs[token] = (filename, filetype)
|
||||
|
||||
get_value(tokens, EOL, _(u"Excess data"))
|
||||
|
||||
else:
|
||||
raise CueException(
|
||||
_(u"Invalid tag %(tag)s at line %(line)d") % \
|
||||
{"tag": token,
|
||||
"line": line_number})
|
||||
#otherwise, we're adding data to the current track
|
||||
else:
|
||||
if (token in ('ISRC', 'PERFORMER',
|
||||
'SONGWRITER', 'TITLE')):
|
||||
track.attribs[token] = get_value(
|
||||
tokens,
|
||||
STRING | TAG | NUMBER | ISRC,
|
||||
"Missing value")
|
||||
|
||||
get_value(tokens, EOL, _(u"Invalid data"))
|
||||
|
||||
elif (token == 'FLAGS'):
|
||||
flags = []
|
||||
s = get_value(tokens, STRING | TAG | EOL,
|
||||
_(u"Invalid flag"))
|
||||
while (('\n' not in s) and ('\r' not in s)):
|
||||
flags.append(s)
|
||||
s = get_value(tokens, STRING | TAG | EOL,
|
||||
_(u"Invalid flag"))
|
||||
track.attribs[token] = ",".join(flags)
|
||||
|
||||
elif (token in ('POSTGAP', 'PREGAP')):
|
||||
track.attribs[token] = get_value(
|
||||
tokens, TIMESTAMP,
|
||||
_(u"Invalid timestamp"))
|
||||
get_value(tokens, EOL, _(u"Excess data"))
|
||||
|
||||
elif (token == 'INDEX'):
|
||||
index_number = get_value(tokens, NUMBER,
|
||||
_(u"Invalid index number"))
|
||||
index_timestamp = get_value(tokens, TIMESTAMP,
|
||||
_(u"Invalid timestamp"))
|
||||
track.indexes[index_number] = index_timestamp
|
||||
|
||||
get_value(tokens, EOL, _(u"Excess data"))
|
||||
|
||||
elif (token in ('FILE',)):
|
||||
skip_to_eol(tokens)
|
||||
|
||||
else:
|
||||
raise CueException(
|
||||
_(u"Invalid tag %(tag)s at line %(line)d") % \
|
||||
{"tag": token,
|
||||
"line": line_number})
|
||||
|
||||
else:
|
||||
raise CueException(_(u"Missing tag at line %d") % (
|
||||
line_number))
|
||||
except StopIteration:
|
||||
if (track is not None):
|
||||
cuesheet.tracks[track.number] = track
|
||||
return cuesheet
|
||||
|
||||
|
||||
def __attrib_str__(attrib):
|
||||
if (isinstance(attrib, tuple)):
|
||||
return " ".join([__attrib_str__(a) for a in attrib])
|
||||
elif (re.match(r'^[A-Z]+$', attrib) is not None):
|
||||
return attrib
|
||||
else:
|
||||
return "\"%s\"" % (attrib)
|
||||
|
||||
|
||||
class Cuesheet:
|
||||
"""An object representing a cuesheet file."""
|
||||
|
||||
def __init__(self):
|
||||
self.attribs = {}
|
||||
self.tracks = {}
|
||||
|
||||
def __repr__(self):
|
||||
return "Cuesheet(attribs=%s,tracks=%s)" % \
|
||||
(repr(self.attribs), repr(self.tracks))
|
||||
|
||||
def __str__(self):
|
||||
return "\r\n".join(["%s %s" % (key, __attrib_str__(value))
|
||||
for key, value in self.attribs.items()] + \
|
||||
[str(track) for track in
|
||||
sorted(self.tracks.values())])
|
||||
|
||||
def catalog(self):
|
||||
"""Returns the cuesheet's CATALOG number as a plain string, or None.
|
||||
|
||||
If present, this value is typically a CD's UPC code."""
|
||||
|
||||
if ('CATALOG' in self.attribs):
|
||||
return str(self.attribs['CATALOG'])
|
||||
else:
|
||||
return None
|
||||
|
||||
def single_file_type(self):
|
||||
"""Returns True if this cuesheet is formatted for a single file."""
|
||||
|
||||
previous = -1
|
||||
for t in self.indexes():
|
||||
for index in t:
|
||||
if (index <= previous):
|
||||
return False
|
||||
else:
|
||||
previous = index
|
||||
else:
|
||||
return True
|
||||
|
||||
def indexes(self):
|
||||
"""Yields a set of index lists, one for each track in the file."""
|
||||
|
||||
for key in sorted(self.tracks.keys()):
|
||||
yield tuple(
|
||||
[self.tracks[key].indexes[k]
|
||||
for k in sorted(self.tracks[key].indexes.keys())])
|
||||
|
||||
def pcm_lengths(self, total_length):
|
||||
"""Yields a list of PCM lengths for all audio tracks within the file.
|
||||
|
||||
total_length is the length of the entire file in PCM frames."""
|
||||
|
||||
previous = None
|
||||
|
||||
for key in sorted(self.tracks.keys()):
|
||||
current = self.tracks[key].indexes
|
||||
if (previous is None):
|
||||
previous = current
|
||||
else:
|
||||
track_length = (current[max(current.keys())] -
|
||||
previous[max(previous.keys())]) * (44100 / 75)
|
||||
total_length -= track_length
|
||||
yield track_length
|
||||
previous = current
|
||||
|
||||
yield total_length
|
||||
|
||||
def ISRCs(self):
|
||||
"""Returns a track_number->ISRC dict of all non-empty tracks."""
|
||||
|
||||
return dict([(track.number, track.ISRC()) for track in
|
||||
self.tracks.values() if track.ISRC() is not None])
|
||||
|
||||
@classmethod
|
||||
def file(cls, sheet, filename):
|
||||
"""Constructs a new cuesheet string from a compatible object.
|
||||
|
||||
sheet must have catalog(), indexes() and ISRCs() methods.
|
||||
filename is a string to the filename the cuesheet is created for.
|
||||
Although we don't care whether the filename points to a real file,
|
||||
other tools sometimes do.
|
||||
"""
|
||||
|
||||
import cStringIO
|
||||
|
||||
catalog = sheet.catalog() # a catalog string, or None
|
||||
indexes = list(sheet.indexes()) # a list of index tuples
|
||||
ISRCs = sheet.ISRCs() # a track_number->ISRC dict
|
||||
|
||||
data = cStringIO.StringIO()
|
||||
|
||||
if (catalog is not None):
|
||||
data.write("CATALOG %s\r\n" % (catalog))
|
||||
data.write("FILE \"%s\" WAVE\r\n" % (filename))
|
||||
|
||||
for (i, current) in enumerate(indexes):
|
||||
tracknum = i + 1
|
||||
|
||||
data.write(" TRACK %2.2d AUDIO\r\n" % (tracknum))
|
||||
|
||||
if (tracknum in ISRCs.keys()):
|
||||
data.write(" ISRC %s\r\n" % (ISRCs[tracknum]))
|
||||
|
||||
for (j, index) in enumerate(current):
|
||||
data.write(" INDEX %2.2d %s\r\n" % (j,
|
||||
build_timestamp(index)))
|
||||
|
||||
return data.getvalue()
|
||||
|
||||
|
||||
class Track:
|
||||
"""A track inside a Cuesheet object."""
|
||||
|
||||
def __init__(self, number, type):
|
||||
"""number is the track's number on disc, type is a string."""
|
||||
|
||||
self.number = number
|
||||
self.type = type
|
||||
self.attribs = {}
|
||||
self.indexes = {}
|
||||
|
||||
def __cmp__(self, t):
|
||||
return cmp(self.number, t.number)
|
||||
|
||||
def __repr__(self):
|
||||
return "Track(%s,%s,attribs=%s,indexes=%s)" % \
|
||||
(repr(self.number), repr(self.type),
|
||||
repr(self.attribs), repr(self.indexes))
|
||||
|
||||
def __str__(self):
|
||||
return (" TRACK %2.2d %s\r\n" % (self.number, self.type)) + \
|
||||
"\r\n".join([" %s %s" % (key, __attrib_str__(value))
|
||||
for key, value in self.attribs.items()] + \
|
||||
[" INDEX %2.2d %2.2d:%2.2d:%2.2d" % \
|
||||
(k, v / 75 / 60, v / 75 % 60, v % 75)
|
||||
for (k, v) in sorted(self.indexes.items())])
|
||||
|
||||
def ISRC(self):
|
||||
"""Returns the track's ISRC value, or None."""
|
||||
|
||||
if ('ISRC' in self.attribs.keys()):
|
||||
return str(self.attribs['ISRC'])
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def read_cuesheet(filename):
|
||||
"""Returns a Cuesheet from a cuesheet filename on disk.
|
||||
|
||||
Raises CueException if some error occurs reading or parsing the file.
|
||||
"""
|
||||
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError, msg:
|
||||
raise CueException(unicode(_(u"Unable to read cuesheet")))
|
||||
try:
|
||||
sheet = parse(tokens(f.read()))
|
||||
if (not sheet.single_file_type()):
|
||||
raise CueException(_(u"Cuesheet not formatted for disc images"))
|
||||
else:
|
||||
return sheet
|
||||
finally:
|
||||
f.close()
|
BIN
Melodia/resources/audiotools/decoders.so
Executable file
BIN
Melodia/resources/audiotools/decoders.so
Executable file
Binary file not shown.
277
Melodia/resources/audiotools/delta.py
Normal file
277
Melodia/resources/audiotools/delta.py
Normal file
@ -0,0 +1,277 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2008-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
import sys
|
||||
from itertools import izip
|
||||
import bz2
|
||||
import sqlite3
|
||||
from hashlib import sha1
|
||||
import base64
|
||||
import anydbm
|
||||
import subprocess
|
||||
import tempfile
|
||||
import whichdb
|
||||
from audiotools import BIN, transfer_data
|
||||
import cStringIO
|
||||
|
||||
|
||||
class UndoDB:
|
||||
"""A class for performing undo operations on files.
|
||||
|
||||
This stores an undo/redo patch for transforming a file
|
||||
back to its original value, or forward again to its modified form."""
|
||||
|
||||
def __init__(self, filename):
|
||||
"""filename is the location on disk for this undo database."""
|
||||
|
||||
self.db = sqlite3.connect(filename)
|
||||
self.cursor = self.db.cursor()
|
||||
|
||||
self.cursor.execute("""CREATE TABLE IF NOT EXISTS patch (
|
||||
patch_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
patch_data BLOB NOT NULL
|
||||
)""")
|
||||
|
||||
self.cursor.execute("""CREATE TABLE IF NOT EXISTS source_file (
|
||||
source_checksum CHAR(40) PRIMARY KEY,
|
||||
source_size INTEGER NOT NULL,
|
||||
target_size INTEGER NOT NULL,
|
||||
patch_id INTEGER,
|
||||
FOREIGN KEY (patch_id) REFERENCES patch (patch_id) ON DELETE CASCADE
|
||||
)""")
|
||||
|
||||
def close(self):
|
||||
"""Closes any open database handles."""
|
||||
|
||||
self.cursor.close()
|
||||
self.db.close()
|
||||
|
||||
@classmethod
|
||||
def build_patch(cls, s1, s2):
|
||||
"""Given two strings, returns a transformation patch.
|
||||
|
||||
This function presumes the two strings will be largely
|
||||
equal and similar in length. It operates by performing an
|
||||
XOR operation across both and BZ2 compressing the result."""
|
||||
|
||||
if (len(s1) < len(s2)):
|
||||
s1 += (chr(0) * (len(s2) - len(s1)))
|
||||
elif (len(s2) < len(s1)):
|
||||
s2 += (chr(0) * (len(s1) - len(s2)))
|
||||
|
||||
patch = bz2.compress("".join([chr(ord(x) ^ ord(y)) for (x, y) in
|
||||
izip(s1, s2)]))
|
||||
return patch
|
||||
|
||||
@classmethod
|
||||
def apply_patch(cls, s, patch, new_length):
|
||||
"""Given a string, patch and new length, restores string.
|
||||
|
||||
patch is the same BZ2 compressed output from build_patch().
|
||||
new_length is the size of the string originally,
|
||||
which must be stored externally from the patch itself."""
|
||||
|
||||
if (len(s) > new_length):
|
||||
s = s[0:new_length]
|
||||
elif (len(s) < new_length):
|
||||
s += (chr(0) * (new_length - len(s)))
|
||||
|
||||
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in
|
||||
izip(s, bz2.decompress(patch))])
|
||||
|
||||
def __add__(self, file_data1, file_data2):
|
||||
#file_data1's target is file_data2 and
|
||||
#file_data2's target is file_data1
|
||||
|
||||
self.cursor.execute(
|
||||
"INSERT INTO patch (patch_id, patch_data) VALUES (?, ?)",
|
||||
[None,
|
||||
base64.b64encode(
|
||||
UndoDB.build_patch(file_data1,
|
||||
file_data2)).decode('ascii')])
|
||||
patch_id = self.cursor.lastrowid
|
||||
try:
|
||||
self.cursor.execute("""INSERT INTO source_file (
|
||||
source_checksum, source_size, target_size, patch_id) values (?, ?, ?, ?)""",
|
||||
[sha1(file_data1).hexdigest().decode('ascii'),
|
||||
len(file_data1),
|
||||
len(file_data2),
|
||||
patch_id])
|
||||
self.cursor.execute("""INSERT INTO source_file (
|
||||
source_checksum, source_size, target_size, patch_id) values (?, ?, ?, ?)""",
|
||||
[sha1(file_data2).hexdigest().decode('ascii'),
|
||||
len(file_data2),
|
||||
len(file_data1),
|
||||
patch_id])
|
||||
self.db.commit()
|
||||
except sqlite3.IntegrityError:
|
||||
self.db.rollback()
|
||||
|
||||
def __undo__(self, file_data):
|
||||
self.cursor.execute("""SELECT target_size, patch_data FROM
|
||||
source_file, patch WHERE ((source_checksum = ?) AND
|
||||
(source_size = ?) AND
|
||||
(source_file.patch_id = patch.patch_id))""",
|
||||
[sha1(file_data).hexdigest().decode('ascii'),
|
||||
len(file_data)])
|
||||
row = self.cursor.fetchone()
|
||||
if (row is not None):
|
||||
(target_size, patch) = row
|
||||
return UndoDB.apply_patch(
|
||||
file_data,
|
||||
base64.b64decode(patch.encode('ascii')),
|
||||
target_size)
|
||||
else:
|
||||
return None
|
||||
|
||||
def add(self, old_file, new_file):
|
||||
"""Adds an undo entry for transforming new_file to old_file.
|
||||
|
||||
Both are filename strings."""
|
||||
|
||||
old_f = open(old_file, 'rb')
|
||||
new_f = open(new_file, 'rb')
|
||||
try:
|
||||
self.__add__(old_f.read(), new_f.read())
|
||||
finally:
|
||||
old_f.close()
|
||||
new_f.close()
|
||||
|
||||
def undo(self, new_file):
|
||||
"""Updates new_file to its original state,
|
||||
if present in the undo database.
|
||||
|
||||
Returns True if undo performed, False if not."""
|
||||
|
||||
new_f = open(new_file, 'rb')
|
||||
try:
|
||||
old_data = self.__undo__(new_f.read())
|
||||
finally:
|
||||
new_f.close()
|
||||
if (old_data is not None):
|
||||
old_f = open(new_file, 'wb')
|
||||
old_f.write(old_data)
|
||||
old_f.close()
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class OldUndoDB:
|
||||
"""A class for performing legacy undo operations on files.
|
||||
|
||||
This implementation is based on xdelta and requires it to be
|
||||
installed to function.
|
||||
"""
|
||||
|
||||
def __init__(self, filename):
|
||||
"""filename is the location on disk for this undo database."""
|
||||
|
||||
self.db = anydbm.open(filename, 'c')
|
||||
|
||||
def close(self):
|
||||
"""Closes any open database handles."""
|
||||
|
||||
self.db.close()
|
||||
|
||||
@classmethod
|
||||
def checksum(cls, filename):
|
||||
"""Returns the SHA1 checksum of the filename's contents."""
|
||||
|
||||
f = open(filename, "rb")
|
||||
c = sha1("")
|
||||
try:
|
||||
transfer_data(f.read, c.update)
|
||||
return c.hexdigest()
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def add(self, old_file, new_file):
|
||||
"""Adds an undo entry for transforming new_file to old_file.
|
||||
|
||||
Both are filename strings."""
|
||||
|
||||
#perform xdelta between old and new track to temporary file
|
||||
delta_f = tempfile.NamedTemporaryFile(suffix=".delta")
|
||||
|
||||
try:
|
||||
if (subprocess.call([BIN["xdelta"],
|
||||
"delta",
|
||||
new_file, old_file, delta_f.name]) != 2):
|
||||
#store the xdelta in our internal db
|
||||
f = open(delta_f.name, 'rb')
|
||||
data = cStringIO.StringIO()
|
||||
transfer_data(f.read, data.write)
|
||||
f.close()
|
||||
|
||||
self.db[OldUndoDB.checksum(new_file)] = data.getvalue()
|
||||
else:
|
||||
raise IOError("error performing xdelta operation")
|
||||
finally:
|
||||
delta_f.close()
|
||||
|
||||
def undo(self, new_file):
|
||||
"""Updates new_file to its original state,
|
||||
if present in the undo database."""
|
||||
|
||||
undo_checksum = OldUndoDB.checksum(new_file)
|
||||
if (undo_checksum in self.db.keys()):
|
||||
#copy the xdelta to a temporary file
|
||||
xdelta_f = tempfile.NamedTemporaryFile(suffix=".delta")
|
||||
xdelta_f.write(self.db[undo_checksum])
|
||||
xdelta_f.flush()
|
||||
|
||||
#patch the existing track to a temporary track
|
||||
old_track = tempfile.NamedTemporaryFile()
|
||||
try:
|
||||
if (subprocess.call([BIN["xdelta"],
|
||||
"patch",
|
||||
xdelta_f.name,
|
||||
new_file,
|
||||
old_track.name]) == 0):
|
||||
#copy the temporary track over the existing file
|
||||
f1 = open(old_track.name, 'rb')
|
||||
f2 = open(new_file, 'wb')
|
||||
transfer_data(f1.read, f2.write)
|
||||
f1.close()
|
||||
f2.close()
|
||||
return True
|
||||
else:
|
||||
raise IOError("error performing xdelta operation")
|
||||
finally:
|
||||
old_track.close()
|
||||
xdelta_f.close()
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def open_db(filename):
|
||||
"""Given a filename string, returns UndoDB or OldUndoDB.
|
||||
|
||||
If the file doesn't exist, this uses UndoDB by default.
|
||||
Otherwise, detect OldUndoDB if xdelta is installed."""
|
||||
|
||||
if (BIN.can_execute(BIN["xdelta"])):
|
||||
db = whichdb.whichdb(filename)
|
||||
if ((db is not None) and (db != '')):
|
||||
return OldUndoDB(filename)
|
||||
else:
|
||||
return UndoDB(filename)
|
||||
else:
|
||||
return UndoDB(filename)
|
BIN
Melodia/resources/audiotools/encoders.so
Executable file
BIN
Melodia/resources/audiotools/encoders.so
Executable file
Binary file not shown.
715
Melodia/resources/audiotools/flac.py
Normal file
715
Melodia/resources/audiotools/flac.py
Normal file
@ -0,0 +1,715 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
import array
|
||||
import audiotools
|
||||
import sys,cStringIO
|
||||
|
||||
Con = audiotools.Con
|
||||
|
||||
class UTF8(Con.Struct):
|
||||
@classmethod
|
||||
def __total_utf8_bytes__(cls, header):
|
||||
total = 0
|
||||
for b in header:
|
||||
if b == '\x01':
|
||||
total += 1
|
||||
else:
|
||||
break
|
||||
return max(1,total)
|
||||
|
||||
@classmethod
|
||||
def __calculate_utf8_value__(cls, ctx):
|
||||
import operator
|
||||
|
||||
return Con.lib.bin_to_int(ctx.header[ctx.header.index('\x00') + 1:] + \
|
||||
reduce(operator.concat,
|
||||
[s[2:] for s in ctx['sub_byte']],
|
||||
''))
|
||||
|
||||
def __init__(self, name):
|
||||
Con.Struct.__init__(
|
||||
self,name,
|
||||
Con.Bytes('header',8),
|
||||
Con.Value('total_bytes',
|
||||
lambda ctx: self.__total_utf8_bytes__(ctx['header'])),
|
||||
Con.MetaRepeater(
|
||||
lambda ctx: self.__total_utf8_bytes__(ctx['header']) - 1,
|
||||
Con.Bytes('sub_byte',8)),
|
||||
Con.Value('value',
|
||||
lambda ctx: self.__calculate_utf8_value__(ctx)))
|
||||
|
||||
class Unary(Con.Adapter):
|
||||
def __init__(self, name):
|
||||
Con.Adapter.__init__(
|
||||
self,
|
||||
Con.RepeatUntil(lambda obj,ctx: obj == 1,
|
||||
Con.Byte(name)))
|
||||
|
||||
def _encode(self, value, context):
|
||||
if (value > 0):
|
||||
return ([0] * (value)) + [1]
|
||||
else:
|
||||
return [1]
|
||||
|
||||
def _decode(self, obj, context):
|
||||
return len(obj) - 1
|
||||
|
||||
class PlusOne(Con.Adapter):
|
||||
def _encode(self, value, context):
|
||||
return value - 1
|
||||
|
||||
def _decode(self, obj, context):
|
||||
return obj + 1
|
||||
|
||||
class FlacStreamException(Exception): pass
|
||||
|
||||
class FlacReader:
|
||||
FRAME_HEADER = Con.Struct('frame_header',
|
||||
Con.Bits('sync',14),
|
||||
Con.Bits('reserved',2),
|
||||
Con.Bits('block_size',4),
|
||||
Con.Bits('sample_rate',4),
|
||||
Con.Bits('channel_assignment',4),
|
||||
Con.Bits('bits_per_sample',3),
|
||||
Con.Padding(1),
|
||||
Con.IfThenElse(
|
||||
'total_channels',
|
||||
lambda ctx1: ctx1['channel_assignment'] <= 7,
|
||||
Con.Value('c',lambda ctx2: ctx2['channel_assignment'] + 1),
|
||||
Con.Value('c',lambda ctx3: 2)),
|
||||
|
||||
UTF8('frame_number'),
|
||||
|
||||
Con.IfThenElse(
|
||||
'extended_block_size',
|
||||
lambda ctx1: ctx1['block_size'] == 6,
|
||||
Con.Bits('b',8),
|
||||
Con.If(lambda ctx2: ctx2['block_size'] == 7,
|
||||
Con.Bits('b',16))),
|
||||
|
||||
Con.IfThenElse(
|
||||
'extended_sample_rate',
|
||||
lambda ctx1: ctx1['sample_rate'] == 12,
|
||||
Con.Bits('s',8),
|
||||
Con.If(lambda ctx2: ctx2['sample_rate'] in (13,14),
|
||||
Con.Bits('s',16))),
|
||||
|
||||
Con.Bits('crc8',8))
|
||||
|
||||
UNARY = Con.Struct('unary',
|
||||
Con.RepeatUntil(
|
||||
lambda obj,ctx: obj == '\x01',
|
||||
Con.Field('bytes',1)),
|
||||
Con.Value('value',
|
||||
lambda ctx: len(ctx['bytes']) - 1)
|
||||
)
|
||||
|
||||
SUBFRAME_HEADER = Con.Struct('subframe_header',
|
||||
Con.Padding(1),
|
||||
Con.Bits('subframe_type',6),
|
||||
Con.Flag('has_wasted_bits_per_sample'),
|
||||
Con.IfThenElse(
|
||||
'wasted_bits_per_sample',
|
||||
lambda ctx: ctx['has_wasted_bits_per_sample'],
|
||||
PlusOne(Unary('value')),
|
||||
Con.Value('value',lambda ctx2: 0)))
|
||||
|
||||
|
||||
GET_BLOCKSIZE_FROM_STREAMINFO = -1
|
||||
GET_8BIT_BLOCKSIZE_FROM_END_OF_HEADER = -2
|
||||
GET_16BIT_BLOCKSIZE_FROM_END_OF_HEADER = -3
|
||||
|
||||
BLOCK_SIZE = (GET_BLOCKSIZE_FROM_STREAMINFO,
|
||||
192,
|
||||
576,1152,2304,4608,
|
||||
GET_8BIT_BLOCKSIZE_FROM_END_OF_HEADER,
|
||||
GET_16BIT_BLOCKSIZE_FROM_END_OF_HEADER,
|
||||
256,512,1024,2048,4096,8192,16384,32768)
|
||||
|
||||
GET_SAMPLE_SIZE_FROM_STREAMINFO = -1
|
||||
SAMPLE_SIZE = (GET_SAMPLE_SIZE_FROM_STREAMINFO,
|
||||
8,12,None,16,20,24,None)
|
||||
|
||||
def FIXED0(subframe,residual,i):
|
||||
subframe.insert(i,
|
||||
residual[i])
|
||||
|
||||
def FIXED1(subframe,residual,i):
|
||||
subframe.insert(i,
|
||||
subframe[i - 1] + residual[i])
|
||||
|
||||
def FIXED2(subframe,residual,i):
|
||||
subframe.insert(i,
|
||||
((2 * subframe[i - 1]) - subframe[i - 2] + \
|
||||
residual[i]))
|
||||
|
||||
def FIXED3(subframe,residual,i):
|
||||
subframe.insert(i,
|
||||
((3 * subframe[i - 1]) - (3 * subframe[i - 2]) + \
|
||||
subframe[i - 3] + residual[i]))
|
||||
|
||||
def FIXED4(subframe,residual,i):
|
||||
subframe.insert(i,
|
||||
((4 * subframe[i - 1]) - (6 * subframe[i - 2]) + \
|
||||
(4 * subframe[i - 3]) - subframe[i - 4] + residual[i]))
|
||||
|
||||
#iterates over all of the channels, in order
|
||||
def MERGE_INDEPENDENT(channel_list):
|
||||
channel_data = [iter(c) for c in channel_list]
|
||||
|
||||
while (True):
|
||||
for channel in channel_data:
|
||||
yield channel.next()
|
||||
|
||||
def MERGE_LEFT(channel_list):
|
||||
channel_left = iter(channel_list[0])
|
||||
channel_side = iter(channel_list[1])
|
||||
|
||||
while (True):
|
||||
left = channel_left.next()
|
||||
side = channel_side.next()
|
||||
|
||||
yield left
|
||||
yield left - side
|
||||
|
||||
|
||||
def MERGE_RIGHT(channel_list):
|
||||
channel_side = iter(channel_list[0])
|
||||
channel_right = iter(channel_list[1])
|
||||
|
||||
while (True):
|
||||
side = channel_side.next()
|
||||
right = channel_right.next()
|
||||
|
||||
yield side + right
|
||||
yield right
|
||||
|
||||
def MERGE_MID(channel_list):
|
||||
channel_mid = iter(channel_list[0])
|
||||
channel_side = iter(channel_list[1])
|
||||
|
||||
while (True):
|
||||
mid = channel_mid.next()
|
||||
side = channel_side.next()
|
||||
|
||||
mid = mid << 1
|
||||
mid |= (side & 0x1)
|
||||
|
||||
yield (mid + side) >> 1
|
||||
yield (mid - side) >> 1
|
||||
|
||||
|
||||
CHANNEL_FUNCTIONS = (MERGE_INDEPENDENT,
|
||||
MERGE_INDEPENDENT,
|
||||
MERGE_INDEPENDENT,
|
||||
MERGE_INDEPENDENT,
|
||||
MERGE_INDEPENDENT,
|
||||
MERGE_INDEPENDENT,
|
||||
MERGE_INDEPENDENT,
|
||||
MERGE_INDEPENDENT,
|
||||
MERGE_LEFT,
|
||||
MERGE_RIGHT,
|
||||
MERGE_MID)
|
||||
|
||||
FIXED_FUNCTIONS = (FIXED0,FIXED1,FIXED2,FIXED3,FIXED4)
|
||||
|
||||
def __init__(self, flac_stream):
|
||||
self.stream = BufferedStream(flac_stream)
|
||||
self.streaminfo = None
|
||||
self.bitstream = None
|
||||
|
||||
#ensure the file starts with 'fLaC'
|
||||
self.read_stream_marker()
|
||||
|
||||
#initialize self.bitstream
|
||||
self.begin_bitstream()
|
||||
|
||||
#find self.streaminfo in case we need it
|
||||
self.read_metadata_blocks()
|
||||
|
||||
def close(self):
|
||||
if (self.bitstream != None):
|
||||
self.bitstream.close()
|
||||
else:
|
||||
self.stream.close()
|
||||
|
||||
|
||||
def read_stream_marker(self):
|
||||
if (self.stream.read(4) != 'fLaC'):
|
||||
raise FlacStreamException('invalid stream marker')
|
||||
|
||||
def read_metadata_blocks(self):
|
||||
block = audiotools.FlacAudio.METADATA_BLOCK_HEADER.parse_stream(self.stream)
|
||||
while (block.last_block == 0):
|
||||
if (block.block_type == 0):
|
||||
self.streaminfo = audiotools.FlacAudio.STREAMINFO.parse_stream(self.stream)
|
||||
else:
|
||||
self.stream.seek(block.block_length,1)
|
||||
|
||||
block = audiotools.FlacAudio.METADATA_BLOCK_HEADER.parse_stream(self.stream)
|
||||
self.stream.seek(block.block_length,1)
|
||||
|
||||
def begin_bitstream(self):
|
||||
import bitstream
|
||||
|
||||
#self.bitstream = Con.BitStreamReader(self.stream)
|
||||
self.bitstream = bitstream.BitStreamReader(self.stream)
|
||||
|
||||
def read_frame(self):
|
||||
self.stream.reset_buffer()
|
||||
|
||||
try:
|
||||
header = FlacReader.FRAME_HEADER.parse_stream(self.bitstream)
|
||||
except Con.core.FieldError:
|
||||
return ""
|
||||
|
||||
if (header.sync != 0x3FFE):
|
||||
raise FlacStreamException('invalid sync')
|
||||
|
||||
if (crc8(self.stream.getvalue()[0:-1]) != header.crc8):
|
||||
raise FlacStreamException('crc8 checksum failed')
|
||||
|
||||
|
||||
#block_size tells us how many samples we need from each subframe
|
||||
block_size = FlacReader.BLOCK_SIZE[header.block_size]
|
||||
if (block_size == self.GET_BLOCKSIZE_FROM_STREAMINFO):
|
||||
block_size = self.streaminfo.maximum_blocksize
|
||||
|
||||
elif ((block_size == self.GET_8BIT_BLOCKSIZE_FROM_END_OF_HEADER) or
|
||||
(block_size == self.GET_16BIT_BLOCKSIZE_FROM_END_OF_HEADER)):
|
||||
block_size = header.extended_block_size + 1
|
||||
|
||||
|
||||
#grab subframe data as 32-bit array objects
|
||||
subframe_data = []
|
||||
|
||||
for channel_number in xrange(header.total_channels):
|
||||
subframe_data.append(
|
||||
self.read_subframe(header, block_size, channel_number))
|
||||
|
||||
crc16sum = crc16(self.stream.getvalue())
|
||||
|
||||
|
||||
#try to byte-align the stream
|
||||
if (len(self.bitstream.buffer) > 0):
|
||||
self.bitstream.read(len(self.bitstream.buffer))
|
||||
|
||||
|
||||
if (crc16sum != Con.Bits('crc16',16).parse_stream(self.bitstream)):
|
||||
raise FlacStreamException('crc16 checksum failed')
|
||||
|
||||
|
||||
#convert our list of subframe data arrays into
|
||||
#a string of sample data
|
||||
if (FlacReader.SAMPLE_SIZE[header.bits_per_sample] == 16):
|
||||
merged_frames = array.array('h',
|
||||
FlacReader.CHANNEL_FUNCTIONS[
|
||||
header.channel_assignment](subframe_data))
|
||||
|
||||
if (audiotools.BIG_ENDIAN):
|
||||
merged_frames.byteswap()
|
||||
|
||||
return merged_frames.tostring()
|
||||
|
||||
elif (FlacReader.SAMPLE_SIZE[header.bits_per_sample] == 8):
|
||||
merged_frames = array.array('b',
|
||||
FlacReader.CHANNEL_FUNCTIONS[
|
||||
header.channel_assignment](subframe_data))
|
||||
|
||||
return merged_frames.tostring()
|
||||
|
||||
else:
|
||||
if (FlacReader.SAMPLE_SIZE[header.bits_per_sample] == \
|
||||
self.GET_SAMPLE_SIZE_FROM_STREAMINFO):
|
||||
bits_per_sample = self.streaminfo.bits_per_sample + 1
|
||||
|
||||
elif (FlacReader.SAMPLE_SIZE[header.bits_per_sample] == None):
|
||||
raise FlacStreamException('invalid bits per sample')
|
||||
|
||||
else:
|
||||
bits_per_sample = FlacReader.SAMPLE_SIZE[header.bits_per_sample]
|
||||
|
||||
stream = Con.GreedyRepeater(
|
||||
Con.BitStruct('bits',
|
||||
Con.Bits('value',bits_per_sample,
|
||||
swapped=True,signed=True)))
|
||||
|
||||
return stream.build(
|
||||
[Con.Container(value=v) for v in
|
||||
FlacReader.CHANNEL_FUNCTIONS[header.channel_assignment](
|
||||
subframe_data)])
|
||||
|
||||
|
||||
|
||||
def read_subframe(self, frame_header, block_size, channel_number):
|
||||
subframe_header = \
|
||||
FlacReader.SUBFRAME_HEADER.parse_stream(self.bitstream)
|
||||
|
||||
#figure out the bits-per-sample of this subframe
|
||||
if ((frame_header.channel_assignment == 8) and
|
||||
(channel_number == 1)):
|
||||
#if channel is stored as left+difference
|
||||
#and this is the difference, add 1 bit
|
||||
bits_per_sample = FlacReader.SAMPLE_SIZE[
|
||||
frame_header.bits_per_sample] + 1
|
||||
|
||||
elif ((frame_header.channel_assignment == 9) and
|
||||
(channel_number == 0)):
|
||||
#if channel is stored as difference+right
|
||||
#and this is the difference, add 1 bit
|
||||
bits_per_sample = FlacReader.SAMPLE_SIZE[
|
||||
frame_header.bits_per_sample] + 1
|
||||
|
||||
elif ((frame_header.channel_assignment == 10) and
|
||||
(channel_number == 1)):
|
||||
#if channel is stored as average+difference
|
||||
#and this is the difference, add 1 bit
|
||||
bits_per_sample = FlacReader.SAMPLE_SIZE[
|
||||
frame_header.bits_per_sample] + 1
|
||||
|
||||
else:
|
||||
#otherwise, use the number from the frame header
|
||||
bits_per_sample = FlacReader.SAMPLE_SIZE[
|
||||
frame_header.bits_per_sample]
|
||||
|
||||
|
||||
if (subframe_header.has_wasted_bits_per_sample):
|
||||
bits_per_sample -= subframe_header.wasted_bits_per_sample
|
||||
|
||||
if (subframe_header.subframe_type == 0):
|
||||
subframe = self.read_subframe_constant(block_size, bits_per_sample)
|
||||
|
||||
elif (subframe_header.subframe_type == 1):
|
||||
subframe = self.read_subframe_verbatim(block_size, bits_per_sample)
|
||||
|
||||
elif ((subframe_header.subframe_type & 0x38) == 0x08):
|
||||
subframe = self.read_subframe_fixed(
|
||||
subframe_header.subframe_type & 0x07,
|
||||
block_size,
|
||||
bits_per_sample)
|
||||
|
||||
elif ((subframe_header.subframe_type & 0x20) == 0x20):
|
||||
subframe = self.read_subframe_lpc(
|
||||
(subframe_header.subframe_type & 0x1F) + 1,
|
||||
block_size,
|
||||
bits_per_sample)
|
||||
|
||||
else:
|
||||
raise FlacStreamException('invalid subframe type')
|
||||
|
||||
if (subframe_header.has_wasted_bits_per_sample):
|
||||
return array.array(
|
||||
'i',
|
||||
[i << subframe_header.wasted_bits_per_sample
|
||||
for i in subframe])
|
||||
else:
|
||||
return subframe
|
||||
|
||||
def read_subframe_constant(self, block_size, bits_per_sample):
|
||||
sample = Con.Bits('b',bits_per_sample).parse_stream(
|
||||
self.bitstream)
|
||||
|
||||
subframe = array.array('i',[sample] * block_size)
|
||||
|
||||
return subframe
|
||||
|
||||
|
||||
def read_subframe_verbatim(self, block_size, bits_per_sample):
|
||||
return array.array('i',
|
||||
Con.StrictRepeater(
|
||||
block_size,
|
||||
Con.Bits("samples",
|
||||
bits_per_sample,
|
||||
signed=True)).parse_stream(self.bitstream))
|
||||
|
||||
|
||||
def read_subframe_fixed(self, order, block_size, bits_per_sample):
|
||||
samples = Con.StrictRepeater(
|
||||
order,
|
||||
Con.Bits("warm_up_samples",
|
||||
bits_per_sample,
|
||||
signed=True))
|
||||
|
||||
subframe = array.array('i',
|
||||
samples.parse_stream(self.bitstream))
|
||||
|
||||
residual = self.read_residual(block_size,order)
|
||||
|
||||
fixed_func = self.FIXED_FUNCTIONS[order]
|
||||
|
||||
for i in xrange(len(subframe),block_size):
|
||||
fixed_func(subframe,residual,i)
|
||||
|
||||
return subframe
|
||||
|
||||
|
||||
def read_subframe_lpc(self, order, block_size, bits_per_sample):
|
||||
samples = Con.StrictRepeater(
|
||||
order,
|
||||
Con.Bits("warm_up_samples",
|
||||
bits_per_sample,
|
||||
signed=True))
|
||||
|
||||
subframe = array.array('i',
|
||||
samples.parse_stream(self.bitstream))
|
||||
|
||||
lpc_precision = Con.Bits('lpc_precision',
|
||||
4).parse_stream(self.bitstream) + 1
|
||||
|
||||
lpc_shift = Con.Bits('lpc_shift',
|
||||
5).parse_stream(self.bitstream)
|
||||
|
||||
coefficients = array.array('i',
|
||||
Con.StrictRepeater(
|
||||
order,
|
||||
Con.Bits('coefficients',
|
||||
lpc_precision,
|
||||
signed=True)).parse_stream(self.bitstream))
|
||||
|
||||
residual = self.read_residual(block_size, order)
|
||||
|
||||
for i in xrange(len(subframe),block_size):
|
||||
subframe.insert(i,
|
||||
(sum(
|
||||
[coefficients[j] * subframe[i - j - 1] for j in
|
||||
xrange(0,len(coefficients))]) >> lpc_shift) + \
|
||||
residual[i])
|
||||
|
||||
return subframe
|
||||
|
||||
|
||||
def read_residual(self, block_size, predictor_order):
|
||||
rice = array.array('i')
|
||||
|
||||
#add some dummy rice so that the Rice index matches
|
||||
#that of the rest of the subframe
|
||||
for i in xrange(predictor_order):
|
||||
rice.append(0)
|
||||
|
||||
coding_method = self.bitstream.read(2)
|
||||
if (coding_method == '\x00\x00'):
|
||||
rice2 = False
|
||||
elif (coding_method == '\x00\x01'):
|
||||
rice2 = True
|
||||
else:
|
||||
raise FlacStreamException('invalid residual coding method')
|
||||
|
||||
partition_order = Con.Bits('partition_order',4).parse_stream(
|
||||
self.bitstream)
|
||||
|
||||
if (partition_order > 0):
|
||||
total_samples = ((block_size / 2 ** partition_order) -
|
||||
predictor_order)
|
||||
rice.extend(self.read_encoded_rice(total_samples,rice2))
|
||||
|
||||
for i in xrange(1,2 ** partition_order):
|
||||
total_samples = (block_size / 2 ** partition_order)
|
||||
|
||||
rice.extend(self.read_encoded_rice(total_samples,rice2))
|
||||
else:
|
||||
rice.extend(self.read_encoded_rice(block_size - predictor_order,
|
||||
rice2))
|
||||
|
||||
return rice
|
||||
|
||||
|
||||
def read_encoded_rice(self, total_samples, rice2=False):
|
||||
bin_to_int = Con.lib.binary.bin_to_int
|
||||
|
||||
samples = array.array('i')
|
||||
|
||||
if (not rice2):
|
||||
rice_parameter = Con.Bits('rice_parameter',4).parse_stream(
|
||||
self.bitstream)
|
||||
else:
|
||||
rice_parameter = Con.Bits('rice_parameter',5).parse_stream(
|
||||
self.bitstream)
|
||||
|
||||
if (rice_parameter != 0xF):
|
||||
#a Rice encoded residual
|
||||
for x in xrange(total_samples):
|
||||
|
||||
#count the number of 0 bits before the next 1 bit
|
||||
#(unary encoding)
|
||||
#to find our most significant bits
|
||||
msb = 0
|
||||
s = self.bitstream.read(1)
|
||||
while (s != '\x01'):
|
||||
msb += 1
|
||||
s = self.bitstream.read(1)
|
||||
|
||||
#grab the proper number of least significant bits
|
||||
lsb = bin_to_int(self.bitstream.read(rice_parameter))
|
||||
|
||||
#combine msb and lsb to get the Rice-encoded value
|
||||
value = (msb << rice_parameter) | lsb
|
||||
if ((value & 0x1) == 0x1): #negative
|
||||
samples.append(-(value >> 1) - 1)
|
||||
else: #positive
|
||||
samples.append(value >> 1)
|
||||
else:
|
||||
#unencoded residual
|
||||
|
||||
bits_per_sample = Con.Bits('escape_code',5).parse_stream(
|
||||
self.bitstream)
|
||||
|
||||
sample = Con.Bits("sample",bits_per_sample,signed=True)
|
||||
|
||||
for x in xrange(total_samples):
|
||||
samples.append(sample.parse_stream(self.bitstream))
|
||||
|
||||
return samples
|
||||
|
||||
|
||||
###############################
|
||||
#Checksum calculation functions
|
||||
###############################
|
||||
|
||||
CRC8TABLE = (0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
|
||||
0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
|
||||
0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
|
||||
0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
|
||||
0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
|
||||
0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
|
||||
0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
|
||||
0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
|
||||
0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
|
||||
0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
|
||||
0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
|
||||
0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
|
||||
0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
|
||||
0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
|
||||
0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
|
||||
0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
|
||||
0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
|
||||
0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
|
||||
0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
|
||||
0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
|
||||
0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
|
||||
0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
|
||||
0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
|
||||
0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
|
||||
0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
|
||||
0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
|
||||
0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
|
||||
0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
|
||||
0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
|
||||
0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
|
||||
0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
|
||||
0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3)
|
||||
|
||||
def crc8(data, start=0):
|
||||
value = start
|
||||
|
||||
for i in map(ord,data):
|
||||
value = CRC8TABLE[value ^ i]
|
||||
|
||||
return value
|
||||
|
||||
CRC16TABLE = (0x0000,0x8005,0x800f,0x000a,0x801b,0x001e,0x0014,0x8011,
|
||||
0x8033,0x0036,0x003c,0x8039,0x0028,0x802d,0x8027,0x0022,
|
||||
0x8063,0x0066,0x006c,0x8069,0x0078,0x807d,0x8077,0x0072,
|
||||
0x0050,0x8055,0x805f,0x005a,0x804b,0x004e,0x0044,0x8041,
|
||||
0x80c3,0x00c6,0x00cc,0x80c9,0x00d8,0x80dd,0x80d7,0x00d2,
|
||||
0x00f0,0x80f5,0x80ff,0x00fa,0x80eb,0x00ee,0x00e4,0x80e1,
|
||||
0x00a0,0x80a5,0x80af,0x00aa,0x80bb,0x00be,0x00b4,0x80b1,
|
||||
0x8093,0x0096,0x009c,0x8099,0x0088,0x808d,0x8087,0x0082,
|
||||
0x8183,0x0186,0x018c,0x8189,0x0198,0x819d,0x8197,0x0192,
|
||||
0x01b0,0x81b5,0x81bf,0x01ba,0x81ab,0x01ae,0x01a4,0x81a1,
|
||||
0x01e0,0x81e5,0x81ef,0x01ea,0x81fb,0x01fe,0x01f4,0x81f1,
|
||||
0x81d3,0x01d6,0x01dc,0x81d9,0x01c8,0x81cd,0x81c7,0x01c2,
|
||||
0x0140,0x8145,0x814f,0x014a,0x815b,0x015e,0x0154,0x8151,
|
||||
0x8173,0x0176,0x017c,0x8179,0x0168,0x816d,0x8167,0x0162,
|
||||
0x8123,0x0126,0x012c,0x8129,0x0138,0x813d,0x8137,0x0132,
|
||||
0x0110,0x8115,0x811f,0x011a,0x810b,0x010e,0x0104,0x8101,
|
||||
0x8303,0x0306,0x030c,0x8309,0x0318,0x831d,0x8317,0x0312,
|
||||
0x0330,0x8335,0x833f,0x033a,0x832b,0x032e,0x0324,0x8321,
|
||||
0x0360,0x8365,0x836f,0x036a,0x837b,0x037e,0x0374,0x8371,
|
||||
0x8353,0x0356,0x035c,0x8359,0x0348,0x834d,0x8347,0x0342,
|
||||
0x03c0,0x83c5,0x83cf,0x03ca,0x83db,0x03de,0x03d4,0x83d1,
|
||||
0x83f3,0x03f6,0x03fc,0x83f9,0x03e8,0x83ed,0x83e7,0x03e2,
|
||||
0x83a3,0x03a6,0x03ac,0x83a9,0x03b8,0x83bd,0x83b7,0x03b2,
|
||||
0x0390,0x8395,0x839f,0x039a,0x838b,0x038e,0x0384,0x8381,
|
||||
0x0280,0x8285,0x828f,0x028a,0x829b,0x029e,0x0294,0x8291,
|
||||
0x82b3,0x02b6,0x02bc,0x82b9,0x02a8,0x82ad,0x82a7,0x02a2,
|
||||
0x82e3,0x02e6,0x02ec,0x82e9,0x02f8,0x82fd,0x82f7,0x02f2,
|
||||
0x02d0,0x82d5,0x82df,0x02da,0x82cb,0x02ce,0x02c4,0x82c1,
|
||||
0x8243,0x0246,0x024c,0x8249,0x0258,0x825d,0x8257,0x0252,
|
||||
0x0270,0x8275,0x827f,0x027a,0x826b,0x026e,0x0264,0x8261,
|
||||
0x0220,0x8225,0x822f,0x022a,0x823b,0x023e,0x0234,0x8231,
|
||||
0x8213,0x0216,0x021c,0x8219,0x0208,0x820d,0x8207,0x0202)
|
||||
|
||||
def crc16(data, start=0):
|
||||
value = start
|
||||
|
||||
for i in map(ord,data):
|
||||
value = ((value << 8) ^ CRC16TABLE[(value >> 8) ^ i]) & 0xFFFF
|
||||
|
||||
return value
|
||||
|
||||
#BufferedStream stores the data that passes through read()
|
||||
#so that checksums can be calculated from it.
|
||||
#Be sure to reset the buffer as needed.
|
||||
class BufferedStream:
|
||||
def __init__(self, stream):
|
||||
self.stream = stream
|
||||
self.buffer = cStringIO.StringIO()
|
||||
|
||||
def read(self, count):
|
||||
s = self.stream.read(count)
|
||||
self.buffer.write(s)
|
||||
return s
|
||||
|
||||
def seek(self, offset, whence=0):
|
||||
self.stream.seek(offset,whence)
|
||||
|
||||
def tell(self):
|
||||
return self.stream.tell()
|
||||
|
||||
def close(self):
|
||||
self.stream.close()
|
||||
|
||||
def reset_buffer(self):
|
||||
self.buffer.close()
|
||||
self.buffer = cStringIO.StringIO()
|
||||
|
||||
def getvalue(self):
|
||||
return self.buffer.getvalue()
|
||||
|
||||
|
||||
class FlacPCMReader(audiotools.PCMReader):
|
||||
#flac_file should be a file-like stream of FLAC data
|
||||
def __init__(self, flac_file):
|
||||
self.flacreader = FlacReader(flac_file)
|
||||
self.sample_rate = self.flacreader.streaminfo.samplerate
|
||||
self.channels = self.flacreader.streaminfo.channels + 1
|
||||
self.bits_per_sample = self.flacreader.streaminfo.bits_per_sample + 1
|
||||
self.process = None
|
||||
|
||||
self.buffer = []
|
||||
|
||||
#this won't return even close to the expected number of bytes
|
||||
#(though that won't really break anything)
|
||||
def read(self, bytes):
|
||||
return self.flacreader.read_frame()
|
||||
|
||||
def close(self):
|
||||
self.flacreader.close()
|
||||
|
BIN
Melodia/resources/audiotools/pcm.so
Executable file
BIN
Melodia/resources/audiotools/pcm.so
Executable file
Binary file not shown.
804
Melodia/resources/audiotools/player.py
Normal file
804
Melodia/resources/audiotools/player.py
Normal file
@ -0,0 +1,804 @@
|
||||
#!/usr/bin/bin
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import cPickle
|
||||
import select
|
||||
import audiotools
|
||||
import time
|
||||
import Queue
|
||||
import threading
|
||||
|
||||
|
||||
(RG_NO_REPLAYGAIN, RG_TRACK_GAIN, RG_ALBUM_GAIN) = range(3)
|
||||
|
||||
|
||||
class Player:
|
||||
"""A class for operating an audio player.
|
||||
|
||||
The player itself runs in a seperate thread,
|
||||
which this sends commands to."""
|
||||
|
||||
def __init__(self, audio_output,
|
||||
replay_gain=RG_NO_REPLAYGAIN,
|
||||
next_track_callback=lambda: None):
|
||||
"""audio_output is an AudioOutput subclass.
|
||||
replay_gain is RG_NO_REPLAYGAIN, RG_TRACK_GAIN or RG_ALBUM_GAIN,
|
||||
indicating how the player should apply ReplayGain.
|
||||
next_track_callback is a function with no arguments
|
||||
which is called by the player when the current track is finished."""
|
||||
|
||||
self.command_queue = Queue.Queue()
|
||||
self.worker = PlayerThread(audio_output,
|
||||
self.command_queue,
|
||||
replay_gain)
|
||||
self.thread = threading.Thread(target=self.worker.run,
|
||||
args=(next_track_callback,))
|
||||
self.thread.daemon = True
|
||||
self.thread.start()
|
||||
|
||||
def open(self, track):
|
||||
"""opens the given AudioFile for playing
|
||||
|
||||
stops playing the current file, if any"""
|
||||
|
||||
self.track = track
|
||||
self.command_queue.put(("open", [track]))
|
||||
|
||||
def play(self):
|
||||
"""begins or resumes playing an opened AudioFile, if any"""
|
||||
|
||||
self.command_queue.put(("play", []))
|
||||
|
||||
def set_replay_gain(self, replay_gain):
|
||||
"""sets the given ReplayGain level to apply during playback
|
||||
|
||||
Choose from RG_NO_REPLAYGAIN, RG_TRACK_GAIN or RG_ALBUM_GAIN
|
||||
ReplayGain cannot be applied mid-playback.
|
||||
One must stop() and play() a file for it to take effect."""
|
||||
|
||||
self.command_queue.put(("set_replay_gain", [replay_gain]))
|
||||
|
||||
def pause(self):
|
||||
"""pauses playback of the current file
|
||||
|
||||
Playback may be resumed with play() or toggle_play_pause()"""
|
||||
|
||||
self.command_queue.put(("pause", []))
|
||||
|
||||
def toggle_play_pause(self):
|
||||
"""pauses the file if playing, play the file if paused"""
|
||||
|
||||
self.command_queue.put(("toggle_play_pause", []))
|
||||
|
||||
def stop(self):
|
||||
"""stops playback of the current file
|
||||
|
||||
If play() is called, playback will start from the beginning."""
|
||||
|
||||
self.command_queue.put(("stop", []))
|
||||
|
||||
def close(self):
|
||||
"""closes the player for playback
|
||||
|
||||
The player thread is halted and the AudioOutput is closed."""
|
||||
|
||||
self.command_queue.put(("exit", []))
|
||||
|
||||
def progress(self):
|
||||
"""returns a (pcm_frames_played, pcm_frames_total) tuple
|
||||
|
||||
This indicates the current playback status in PCM frames."""
|
||||
|
||||
return (self.worker.frames_played, self.worker.total_frames)
|
||||
|
||||
|
||||
(PLAYER_STOPPED, PLAYER_PAUSED, PLAYER_PLAYING) = range(3)
|
||||
|
||||
|
||||
class PlayerThread:
|
||||
"""The Player class' subthread.
|
||||
|
||||
This should not be instantiated directly;
|
||||
Player will do so automatically."""
|
||||
|
||||
def __init__(self, audio_output, command_queue,
|
||||
replay_gain=RG_NO_REPLAYGAIN):
|
||||
self.audio_output = audio_output
|
||||
self.command_queue = command_queue
|
||||
self.replay_gain = replay_gain
|
||||
|
||||
self.track = None
|
||||
self.pcmconverter = None
|
||||
self.frames_played = 0
|
||||
self.total_frames = 0
|
||||
self.state = PLAYER_STOPPED
|
||||
|
||||
def open(self, track):
|
||||
self.stop()
|
||||
self.track = track
|
||||
self.frames_played = 0
|
||||
self.total_frames = track.total_frames()
|
||||
|
||||
def pause(self):
|
||||
if (self.state == PLAYER_PLAYING):
|
||||
self.state = PLAYER_PAUSED
|
||||
|
||||
def play(self):
|
||||
if (self.track is not None):
|
||||
if (self.state == PLAYER_STOPPED):
|
||||
if (self.replay_gain == RG_TRACK_GAIN):
|
||||
from audiotools.replaygain import ReplayGainReader
|
||||
replay_gain = self.track.replay_gain()
|
||||
|
||||
if (replay_gain is not None):
|
||||
pcmreader = ReplayGainReader(
|
||||
self.track.to_pcm(),
|
||||
replay_gain.track_gain,
|
||||
replay_gain.track_peak)
|
||||
else:
|
||||
pcmreader = self.track.to_pcm()
|
||||
elif (self.replay_gain == RG_ALBUM_GAIN):
|
||||
from audiotools.replaygain import ReplayGainReader
|
||||
replay_gain = self.track.replay_gain()
|
||||
|
||||
if (replay_gain is not None):
|
||||
pcmreader = ReplayGainReader(
|
||||
self.track.to_pcm(),
|
||||
replay_gain.album_gain,
|
||||
replay_gain.album_peak)
|
||||
else:
|
||||
pcmreader = self.track.to_pcm()
|
||||
else:
|
||||
pcmreader = self.track.to_pcm()
|
||||
|
||||
if (not self.audio_output.compatible(pcmreader)):
|
||||
self.audio_output.init(
|
||||
sample_rate=pcmreader.sample_rate,
|
||||
channels=pcmreader.channels,
|
||||
channel_mask=pcmreader.channel_mask,
|
||||
bits_per_sample=pcmreader.bits_per_sample)
|
||||
self.pcmconverter = ThreadedPCMConverter(
|
||||
pcmreader,
|
||||
self.audio_output.framelist_converter())
|
||||
self.frames_played = 0
|
||||
self.state = PLAYER_PLAYING
|
||||
elif (self.state == PLAYER_PAUSED):
|
||||
self.state = PLAYER_PLAYING
|
||||
elif (self.state == PLAYER_PLAYING):
|
||||
pass
|
||||
|
||||
def set_replay_gain(self, replay_gain):
|
||||
self.replay_gain = replay_gain
|
||||
|
||||
def toggle_play_pause(self):
|
||||
if (self.state == PLAYER_PLAYING):
|
||||
self.pause()
|
||||
elif ((self.state == PLAYER_PAUSED) or
|
||||
(self.state == PLAYER_STOPPED)):
|
||||
self.play()
|
||||
|
||||
def stop(self):
|
||||
if (self.pcmconverter is not None):
|
||||
self.pcmconverter.close()
|
||||
del(self.pcmconverter)
|
||||
self.pcmconverter = None
|
||||
self.frames_played = 0
|
||||
self.state = PLAYER_STOPPED
|
||||
|
||||
def run(self, next_track_callback=lambda: None):
|
||||
while (True):
|
||||
if ((self.state == PLAYER_STOPPED) or
|
||||
(self.state == PLAYER_PAUSED)):
|
||||
(command, args) = self.command_queue.get(True)
|
||||
if (command == "exit"):
|
||||
self.audio_output.close()
|
||||
return
|
||||
else:
|
||||
getattr(self, command)(*args)
|
||||
else:
|
||||
try:
|
||||
(command, args) = self.command_queue.get_nowait()
|
||||
if (command == "exit"):
|
||||
return
|
||||
else:
|
||||
getattr(self, command)(*args)
|
||||
except Queue.Empty:
|
||||
if (self.frames_played < self.total_frames):
|
||||
(data, frames) = self.pcmconverter.read()
|
||||
if (frames > 0):
|
||||
self.audio_output.play(data)
|
||||
self.frames_played += frames
|
||||
if (self.frames_played >= self.total_frames):
|
||||
next_track_callback()
|
||||
else:
|
||||
self.frames_played = self.total_frames
|
||||
next_track_callback()
|
||||
else:
|
||||
self.stop()
|
||||
|
||||
|
||||
class CDPlayer:
|
||||
"""A class for operating a CDDA player.
|
||||
|
||||
The player itself runs in a seperate thread,
|
||||
which this sends commands to."""
|
||||
|
||||
def __init__(self, cdda, audio_output,
|
||||
next_track_callback=lambda: None):
|
||||
"""cdda is a audiotools.CDDA object.
|
||||
audio_output is an AudioOutput subclass.
|
||||
next_track_callback is a function with no arguments
|
||||
which is called by the player when the current track is finished."""
|
||||
|
||||
self.command_queue = Queue.Queue()
|
||||
self.worker = CDPlayerThread(cdda,
|
||||
audio_output,
|
||||
self.command_queue)
|
||||
self.thread = threading.Thread(target=self.worker.run,
|
||||
args=(next_track_callback,))
|
||||
self.thread.daemon = True
|
||||
self.thread.start()
|
||||
|
||||
def open(self, track_number):
|
||||
"""track_number indicates which track to open, starting from 1
|
||||
|
||||
stops playing the current track, if any"""
|
||||
|
||||
self.command_queue.put(("open", [track_number]))
|
||||
|
||||
def play(self):
|
||||
"""begins or resumes playing the currently open track, if any"""
|
||||
|
||||
self.command_queue.put(("play", []))
|
||||
|
||||
def pause(self):
|
||||
"""pauses playback of the current track
|
||||
|
||||
Playback may be resumed with play() or toggle_play_pause()"""
|
||||
|
||||
self.command_queue.put(("pause", []))
|
||||
|
||||
def toggle_play_pause(self):
|
||||
"""pauses the track if playing, play the track if paused"""
|
||||
|
||||
self.command_queue.put(("toggle_play_pause", []))
|
||||
|
||||
def stop(self):
|
||||
"""stops playback of the current track
|
||||
|
||||
If play() is called, playback will start from the beginning."""
|
||||
|
||||
self.command_queue.put(("stop", []))
|
||||
|
||||
def close(self):
|
||||
"""closes the player for playback
|
||||
|
||||
The player thread is halted and the AudioOutput is closed."""
|
||||
|
||||
self.command_queue.put(("exit", []))
|
||||
|
||||
def progress(self):
|
||||
"""returns a (pcm_frames_played, pcm_frames_total) tuple
|
||||
|
||||
This indicates the current playback status in PCM frames."""
|
||||
|
||||
return (self.worker.frames_played, self.worker.total_frames)
|
||||
|
||||
|
||||
class CDPlayerThread:
|
||||
"""The CDPlayer class' subthread.
|
||||
|
||||
This should not be instantiated directly;
|
||||
CDPlayer will do so automatically."""
|
||||
|
||||
def __init__(self, cdda, audio_output, command_queue):
|
||||
self.cdda = cdda
|
||||
self.audio_output = audio_output
|
||||
self.command_queue = command_queue
|
||||
|
||||
self.audio_output.init(
|
||||
sample_rate=44100,
|
||||
channels=2,
|
||||
channel_mask=3,
|
||||
bits_per_sample=16)
|
||||
self.framelist_converter = self.audio_output.framelist_converter()
|
||||
|
||||
self.track = None
|
||||
self.pcmconverter = None
|
||||
self.frames_played = 0
|
||||
self.total_frames = 0
|
||||
self.state = PLAYER_STOPPED
|
||||
|
||||
def open(self, track_number):
|
||||
self.stop()
|
||||
self.track = self.cdda[track_number]
|
||||
self.frames_played = 0
|
||||
self.total_frames = self.track.length() * 44100 / 75
|
||||
|
||||
def play(self):
|
||||
if (self.track is not None):
|
||||
if (self.state == PLAYER_STOPPED):
|
||||
self.pcmconverter = ThreadedPCMConverter(
|
||||
self.track,
|
||||
self.framelist_converter)
|
||||
self.frames_played = 0
|
||||
self.state = PLAYER_PLAYING
|
||||
elif (self.state == PLAYER_PAUSED):
|
||||
self.state = PLAYER_PLAYING
|
||||
elif (self.state == PLAYER_PLAYING):
|
||||
pass
|
||||
|
||||
def pause(self):
|
||||
if (self.state == PLAYER_PLAYING):
|
||||
self.state = PLAYER_PAUSED
|
||||
|
||||
def toggle_play_pause(self):
|
||||
if (self.state == PLAYER_PLAYING):
|
||||
self.pause()
|
||||
elif ((self.state == PLAYER_PAUSED) or
|
||||
(self.state == PLAYER_STOPPED)):
|
||||
self.play()
|
||||
|
||||
def stop(self):
|
||||
if (self.pcmconverter is not None):
|
||||
self.pcmconverter.close()
|
||||
del(self.pcmconverter)
|
||||
self.pcmconverter = None
|
||||
self.frames_played = 0
|
||||
self.state = PLAYER_STOPPED
|
||||
|
||||
def run(self, next_track_callback=lambda: None):
|
||||
while (True):
|
||||
if ((self.state == PLAYER_STOPPED) or
|
||||
(self.state == PLAYER_PAUSED)):
|
||||
(command, args) = self.command_queue.get(True)
|
||||
if (command == "exit"):
|
||||
self.audio_output.close()
|
||||
return
|
||||
else:
|
||||
getattr(self, command)(*args)
|
||||
else:
|
||||
try:
|
||||
(command, args) = self.command_queue.get_nowait()
|
||||
if (command == "exit"):
|
||||
return
|
||||
else:
|
||||
getattr(self, command)(*args)
|
||||
except Queue.Empty:
|
||||
if (self.frames_played < self.total_frames):
|
||||
(data, frames) = self.pcmconverter.read()
|
||||
if (frames > 0):
|
||||
self.audio_output.play(data)
|
||||
self.frames_played += frames
|
||||
if (self.frames_played >= self.total_frames):
|
||||
next_track_callback()
|
||||
else:
|
||||
self.frames_played = self.total_frames
|
||||
next_track_callback()
|
||||
else:
|
||||
self.stop()
|
||||
|
||||
|
||||
class ThreadedPCMConverter:
|
||||
"""A class for decoding a PCMReader in a seperate thread.
|
||||
|
||||
PCMReader's data is queued such that even if decoding and
|
||||
conversion are relatively time-consuming, read() will
|
||||
continue smoothly."""
|
||||
|
||||
def __init__(self, pcmreader, converter):
|
||||
"""pcmreader is a PCMReader object.
|
||||
|
||||
converter is a function which takes a FrameList
|
||||
and returns an object suitable for the current AudioOutput object.
|
||||
Upon conclusion, the PCMReader is automatically closed."""
|
||||
|
||||
self.decoded_data = Queue.Queue()
|
||||
self.stop_decoding = threading.Event()
|
||||
|
||||
def convert(pcmreader, buffer_size, converter, decoded_data,
|
||||
stop_decoding):
|
||||
try:
|
||||
frame = pcmreader.read(buffer_size)
|
||||
while ((not stop_decoding.is_set()) and (len(frame) > 0)):
|
||||
decoded_data.put((converter(frame), frame.frames))
|
||||
frame = pcmreader.read(buffer_size)
|
||||
else:
|
||||
decoded_data.put((None, 0))
|
||||
pcmreader.close()
|
||||
except (ValueError, IOError):
|
||||
decoded_data.put((None, 0))
|
||||
pcmreader.close()
|
||||
|
||||
buffer_size = (pcmreader.sample_rate *
|
||||
pcmreader.channels *
|
||||
(pcmreader.bits_per_sample / 8)) / 20
|
||||
|
||||
self.thread = threading.Thread(
|
||||
target=convert,
|
||||
args=(pcmreader,
|
||||
buffer_size,
|
||||
converter,
|
||||
self.decoded_data,
|
||||
self.stop_decoding))
|
||||
self.thread.daemon = True
|
||||
self.thread.start()
|
||||
|
||||
def read(self):
|
||||
"""returns a (converted_data, pcm_frame_count) tuple"""
|
||||
|
||||
return self.decoded_data.get(True)
|
||||
|
||||
def close(self):
|
||||
"""stops the decoding thread and closes the PCMReader"""
|
||||
|
||||
self.stop_decoding.set()
|
||||
self.thread.join()
|
||||
|
||||
|
||||
class AudioOutput:
|
||||
"""An abstract parent class for playing audio."""
|
||||
|
||||
def __init__(self):
|
||||
self.sample_rate = 0
|
||||
self.channels = 0
|
||||
self.channel_mask = 0
|
||||
self.bits_per_sample = 0
|
||||
self.initialized = False
|
||||
|
||||
def compatible(self, pcmreader):
|
||||
"""Returns True if the given pcmreader is compatible.
|
||||
|
||||
If False, one is expected to open a new output stream
|
||||
which is compatible."""
|
||||
|
||||
return ((self.sample_rate == pcmreader.sample_rate) and
|
||||
(self.channels == pcmreader.channels) and
|
||||
(self.channel_mask == pcmreader.channel_mask) and
|
||||
(self.bits_per_sample == pcmreader.bits_per_sample))
|
||||
|
||||
def framelist_converter(self):
|
||||
"""Returns a function which converts framelist objects
|
||||
|
||||
to objects acceptable by our play() method."""
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
def init(self, sample_rate, channels, channel_mask, bits_per_sample):
|
||||
"""Initializes the output stream.
|
||||
|
||||
This *must* be called prior to play() and close().
|
||||
The general flow of audio playing is:
|
||||
|
||||
>>> pcm = audiofile.to_pcm()
|
||||
>>> player = AudioOutput()
|
||||
>>> player.init(pcm.sample_rate,
|
||||
... pcm.channels,
|
||||
... pcm.channel_mask,
|
||||
... pcm.bits_per_sample)
|
||||
>>> convert = player.framelist_converter()
|
||||
>>> frame = pcm.read(1024)
|
||||
>>> while (len(frame) > 0):
|
||||
... player.play(convert(frame))
|
||||
... frame = pcm.read(1024)
|
||||
>>> player.close()
|
||||
"""
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
def play(self, data):
|
||||
"""plays a chunk of converted data"""
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self):
|
||||
"""closes the output stream"""
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
"""returns True if the AudioOutput is available on the system"""
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class NULLAudioOutput(AudioOutput):
|
||||
"""An AudioOutput subclass which does not actually play anything.
|
||||
|
||||
Although this consumes audio output at the rate it would normally
|
||||
play, it generates no output."""
|
||||
|
||||
NAME = "NULL"
|
||||
|
||||
def framelist_converter(self):
|
||||
"""Returns a function which converts framelist objects
|
||||
|
||||
to objects acceptable by our play() method."""
|
||||
|
||||
return lambda f: f.frames
|
||||
|
||||
def init(self, sample_rate, channels, channel_mask, bits_per_sample):
|
||||
"""Initializes the output stream.
|
||||
|
||||
This *must* be called prior to play() and close()."""
|
||||
|
||||
self.sample_rate = sample_rate
|
||||
self.channels = channels
|
||||
self.channel_mask = channel_mask
|
||||
self.bits_per_sample = bits_per_sample
|
||||
|
||||
def play(self, data):
|
||||
"""plays a chunk of converted data"""
|
||||
|
||||
time.sleep(float(data) / self.sample_rate)
|
||||
|
||||
def close(self):
|
||||
"""closes the output stream"""
|
||||
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
"""returns True"""
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class OSSAudioOutput(AudioOutput):
|
||||
"""An AudioOutput subclass for OSS output."""
|
||||
|
||||
NAME = "OSS"
|
||||
|
||||
def init(self, sample_rate, channels, channel_mask, bits_per_sample):
|
||||
"""Initializes the output stream.
|
||||
|
||||
This *must* be called prior to play() and close()."""
|
||||
|
||||
if (not self.initialized):
|
||||
import ossaudiodev
|
||||
|
||||
self.sample_rate = sample_rate
|
||||
self.channels = channels
|
||||
self.channel_mask = channel_mask
|
||||
self.bits_per_sample = bits_per_sample
|
||||
|
||||
self.ossaudio = ossaudiodev.open('w')
|
||||
if (self.bits_per_sample == 8):
|
||||
self.ossaudio.setfmt(ossaudiodev.AFMT_S8_LE)
|
||||
elif (self.bits_per_sample == 16):
|
||||
self.ossaudio.setfmt(ossaudiodev.AFMT_S16_LE)
|
||||
elif (self.bits_per_sample == 24):
|
||||
self.ossaudio.setfmt(ossaudiodev.AFMT_S16_LE)
|
||||
else:
|
||||
raise ValueError("Unsupported bits-per-sample")
|
||||
|
||||
self.ossaudio.channels(channels)
|
||||
self.ossaudio.speed(sample_rate)
|
||||
|
||||
self.initialized = True
|
||||
else:
|
||||
self.close()
|
||||
self.init(sample_rate=sample_rate,
|
||||
channels=channels,
|
||||
channel_mask=channel_mask,
|
||||
bits_per_sample=bits_per_sample)
|
||||
|
||||
def framelist_converter(self):
|
||||
"""Returns a function which converts framelist objects
|
||||
|
||||
to objects acceptable by our play() method."""
|
||||
|
||||
if (self.bits_per_sample == 8):
|
||||
return lambda f: f.to_bytes(False, True)
|
||||
elif (self.bits_per_sample == 16):
|
||||
return lambda f: f.to_bytes(False, True)
|
||||
elif (self.bits_per_sample == 24):
|
||||
import audiotools.pcm
|
||||
|
||||
return lambda f: audiotools.pcm.from_list(
|
||||
[i >> 8 for i in list(f)],
|
||||
self.channels, 16, True).to_bytes(False, True)
|
||||
else:
|
||||
raise ValueError("Unsupported bits-per-sample")
|
||||
|
||||
def play(self, data):
|
||||
"""plays a chunk of converted data"""
|
||||
|
||||
self.ossaudio.writeall(data)
|
||||
|
||||
def close(self):
|
||||
"""closes the output stream"""
|
||||
|
||||
if (self.initialized):
|
||||
self.initialized = False
|
||||
self.ossaudio.close()
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
"""returns True if OSS output is available on the system"""
|
||||
|
||||
try:
|
||||
import ossaudiodev
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
|
||||
class PulseAudioOutput(AudioOutput):
|
||||
"""An AudioOutput subclass for PulseAudio output."""
|
||||
|
||||
NAME = "PulseAudio"
|
||||
|
||||
def init(self, sample_rate, channels, channel_mask, bits_per_sample):
|
||||
"""Initializes the output stream.
|
||||
|
||||
This *must* be called prior to play() and close()."""
|
||||
|
||||
if (not self.initialized):
|
||||
import subprocess
|
||||
|
||||
self.sample_rate = sample_rate
|
||||
self.channels = channels
|
||||
self.channel_mask = channel_mask
|
||||
self.bits_per_sample = bits_per_sample
|
||||
|
||||
if (bits_per_sample == 8):
|
||||
format = "u8"
|
||||
elif (bits_per_sample == 16):
|
||||
format = "s16le"
|
||||
elif (bits_per_sample == 24):
|
||||
format = "s24le"
|
||||
else:
|
||||
raise ValueError("Unsupported bits-per-sample")
|
||||
|
||||
self.pacat = subprocess.Popen(
|
||||
[audiotools.BIN["pacat"],
|
||||
"-n", "Python Audio Tools",
|
||||
"--rate", str(sample_rate),
|
||||
"--format", format,
|
||||
"--channels", str(channels),
|
||||
"--latency-msec", str(100)],
|
||||
stdin=subprocess.PIPE)
|
||||
|
||||
self.initialized = True
|
||||
else:
|
||||
self.close()
|
||||
self.init(sample_rate=sample_rate,
|
||||
channels=channels,
|
||||
channel_mask=channel_mask,
|
||||
bits_per_sample=bits_per_sample)
|
||||
|
||||
def framelist_converter(self):
|
||||
"""Returns a function which converts framelist objects
|
||||
|
||||
to objects acceptable by our play() method."""
|
||||
|
||||
if (self.bits_per_sample == 8):
|
||||
return lambda f: f.to_bytes(True, False)
|
||||
elif (self.bits_per_sample == 16):
|
||||
return lambda f: f.to_bytes(False, True)
|
||||
elif (self.bits_per_sample == 24):
|
||||
return lambda f: f.to_bytes(False, True)
|
||||
else:
|
||||
raise ValueError("Unsupported bits-per-sample")
|
||||
|
||||
def play(self, data):
|
||||
"""plays a chunk of converted data"""
|
||||
|
||||
self.pacat.stdin.write(data)
|
||||
self.pacat.stdin.flush()
|
||||
|
||||
def close(self):
|
||||
"""closes the output stream"""
|
||||
|
||||
if (self.initialized):
|
||||
self.initialized = False
|
||||
self.pacat.stdin.close()
|
||||
self.pacat.wait()
|
||||
|
||||
@classmethod
|
||||
def server_alive(cls):
|
||||
import subprocess
|
||||
|
||||
dev = subprocess.Popen([audiotools.BIN["pactl"], "stat"],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
dev.stdout.read()
|
||||
dev.stderr.read()
|
||||
return (dev.wait() == 0)
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
"""returns True if PulseAudio is available and running on the system"""
|
||||
|
||||
return (audiotools.BIN.can_execute(audiotools.BIN["pacat"]) and
|
||||
audiotools.BIN.can_execute(audiotools.BIN["pactl"]) and
|
||||
cls.server_alive())
|
||||
|
||||
|
||||
class PortAudioOutput(AudioOutput):
|
||||
"""An AudioOutput subclass for PortAudio output."""
|
||||
|
||||
NAME = "PortAudio"
|
||||
|
||||
def init(self, sample_rate, channels, channel_mask, bits_per_sample):
|
||||
"""Initializes the output stream.
|
||||
|
||||
This *must* be called prior to play() and close()."""
|
||||
|
||||
if (not self.initialized):
|
||||
import pyaudio
|
||||
|
||||
self.sample_rate = sample_rate
|
||||
self.channels = channels
|
||||
self.channel_mask = channel_mask
|
||||
self.bits_per_sample = bits_per_sample
|
||||
|
||||
self.pyaudio = pyaudio.PyAudio()
|
||||
self.stream = self.pyaudio.open(
|
||||
format=self.pyaudio.get_format_from_width(
|
||||
self.bits_per_sample / 8, False),
|
||||
channels=self.channels,
|
||||
rate=self.sample_rate,
|
||||
output=True)
|
||||
|
||||
self.initialized = True
|
||||
else:
|
||||
self.close()
|
||||
self.init(sample_rate=sample_rate,
|
||||
channels=channels,
|
||||
channel_mask=channel_mask,
|
||||
bits_per_sample=bits_per_sample)
|
||||
|
||||
def framelist_converter(self):
|
||||
"""Returns a function which converts framelist objects
|
||||
|
||||
to objects acceptable by our play() method."""
|
||||
|
||||
return lambda f: f.to_bytes(False, True)
|
||||
|
||||
def play(self, data):
|
||||
"""plays a chunk of converted data"""
|
||||
|
||||
self.stream.write(data)
|
||||
|
||||
def close(self):
|
||||
"""closes the output stream"""
|
||||
|
||||
if (self.initialized):
|
||||
self.stream.close()
|
||||
self.pyaudio.terminate()
|
||||
self.initialized = False
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
"""returns True if the AudioOutput is available on the system"""
|
||||
|
||||
try:
|
||||
import pyaudio
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
AUDIO_OUTPUT = (PulseAudioOutput, OSSAudioOutput,
|
||||
PortAudioOutput, NULLAudioOutput)
|
BIN
Melodia/resources/audiotools/prot.so
Executable file
BIN
Melodia/resources/audiotools/prot.so
Executable file
Binary file not shown.
BIN
Melodia/resources/audiotools/replaygain.so
Executable file
BIN
Melodia/resources/audiotools/replaygain.so
Executable file
Binary file not shown.
259
Melodia/resources/audiotools/replaygain_old.py
Normal file
259
Melodia/resources/audiotools/replaygain_old.py
Normal file
@ -0,0 +1,259 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2007-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
|
||||
#This is a module for ReplayGain calculation of a given PCM stream.
|
||||
#It is included as a reference implementation and not as a substitute
|
||||
#for external ReplayGain calculators.
|
||||
|
||||
#The first problem with it is that the results are not identical
|
||||
#to those of external calculators, by about a 100th of a dB or so.
|
||||
#This is probably because the C-based implementations use floats
|
||||
#while Python uses doubles. Thus the difference in rounding errors.
|
||||
|
||||
#The second problem with it is it's very, very slow.
|
||||
#Python is ill-suited to these kinds of rolling loop calculations
|
||||
#involving thousands of samples per second, so the Python-based
|
||||
#approach is several times slower than real-time.
|
||||
|
||||
|
||||
import audiotools
|
||||
import audiotools.pcmstream
|
||||
from itertools import izip
|
||||
|
||||
AYule = ((1.0, -3.8466461711806699, 7.81501653005538, -11.341703551320419, 13.055042193275449, -12.28759895145294, 9.4829380631978992, -5.8725786177599897, 2.7546586187461299, -0.86984376593551005, 0.13919314567432001),
|
||||
(1.0, -3.4784594855007098, 6.3631777756614802, -8.5475152747187408, 9.4769360780128, -8.8149868137015499, 6.8540154093699801, -4.3947099607955904, 2.1961168489077401, -0.75104302451432003, 0.13149317958807999),
|
||||
(1.0, -2.3789883497308399, 2.84868151156327, -2.6457717022982501, 2.2369765745171302, -1.67148153367602, 1.0059595480854699, -0.45953458054982999, 0.16378164858596, -0.050320777171309998, 0.023478974070199998),
|
||||
(1.0, -1.6127316513724701, 1.0797749225997, -0.2565625775407, -0.1627671912044, -0.22638893773905999, 0.39120800788283999, -0.22138138954924999, 0.045002353873520001, 0.020058518065010002, 0.0030243909574099999),
|
||||
(1.0, -1.4985897936779899, 0.87350271418187997, 0.12205022308084, -0.80774944671437998, 0.47854794562325997, -0.12453458140019, -0.040675101970140001, 0.083337552841070001, -0.042373480257460003, 0.029772073199250002),
|
||||
(1.0, -0.62820619233671005, 0.29661783706366002, -0.37256372942400001, 0.0021376785712399998, -0.42029820170917997, 0.22199650564824, 0.0061342435068200002, 0.06747620744683, 0.057848203758010003, 0.032227540721730001),
|
||||
(1.0, -1.0480033512634901, 0.29156311971248999, -0.26806001042946997, 0.0081999964585799997, 0.45054734505007998, -0.33032403314005998, 0.067393683331100004, -0.047842542290329998, 0.016399078361890002, 0.018073643235729998),
|
||||
(1.0, -0.51035327095184002, -0.31863563325244998, -0.20256413484477001, 0.14728154134329999, 0.38952639978998999, -0.23313271880868, -0.052460190244630001, -0.025059617240530001, 0.02442357316099, 0.01818801111503),
|
||||
(1.0, -0.25049871956019998, -0.43193942311113998, -0.034246810176749999, -0.046783287842420002, 0.26408300200954998, 0.15113130533215999, -0.17556493366449, -0.18823009262115001, 0.054777204286740003, 0.047044096881200002)
|
||||
)
|
||||
|
||||
BYule = ((0.038575994352000001, -0.021603671841850001, -0.0012339531685100001, -9.2916779589999993e-05, -0.016552603416190002, 0.02161526843274, -0.02074045215285, 0.0059429806512499997, 0.0030642802319099998, 0.00012025322027, 0.0028846368391600001),
|
||||
(0.054186564064300002, -0.029110078089480001, -0.0084870937985100006, -0.0085116564546900003, -0.0083499090493599996, 0.022452932533390001, -0.025963385129149998, 0.016248649629749999, -0.0024087905158400001, 0.0067461368224699999, -0.00187763777362),
|
||||
(0.15457299681924, -0.093310490563149995, -0.062478801536530001, 0.021635418887979999, -0.05588393329856, 0.047814766749210001, 0.0022231259774300001, 0.031740925400489998, -0.013905894218979999, 0.00651420667831, -0.0088136273383899993),
|
||||
(0.30296907319326999, -0.22613988682123001, -0.085873237307719993, 0.032829301726640003, -0.0091570293343400007, -0.02364141202522, -0.0058445603991300003, 0.062761013217490003, -8.2808674800000004e-06, 0.0020586188556400002, -0.029501349832869998),
|
||||
(0.33642304856131999, -0.25572241425570003, -0.11828570177555001, 0.11921148675203, -0.078344896094790006, -0.0046997791438, -0.0058950022444000001, 0.057242281403510002, 0.0083204398077299999, -0.016353813845399998, -0.017601765681500001),
|
||||
(0.44915256608449999, -0.14351757464546999, -0.22784394429749, -0.01419140100551, 0.040782627971389998, -0.12398163381747999, 0.04097565135648, 0.10478503600251, -0.01863887810927, -0.031934284389149997, 0.0054190774870700002),
|
||||
(0.56619470757640999, -0.75464456939302005, 0.16242137742230001, 0.16744243493672001, -0.18901604199609001, 0.30931782841830002, -0.27562961986223999, 0.0064731067724599998, 0.086475037803509999, -0.037889845548399997, -0.0058821544342100001),
|
||||
(0.58100494960552995, -0.53174909058578002, -0.14289799034253001, 0.17520704835522, 0.02377945217615, 0.15558449135572999, -0.25344790059353001, 0.016284624063329999, 0.069204677639589998, -0.03721611395801, -0.0074961879717200001),
|
||||
(0.53648789255105001, -0.42163034350695999, -0.0027595361192900001, 0.042678422194150002, -0.10214864179676, 0.14590772289387999, -0.024598648593450002, -0.11202315195388, -0.04060034127, 0.047886655481800003, -0.02217936801134)
|
||||
)
|
||||
|
||||
AButter = ((1.0, -1.9722337291952701, 0.97261396931305999),
|
||||
(1.0, -1.96977855582618, 0.97022847566350001),
|
||||
(1.0, -1.9583538097539801, 0.95920349965458995),
|
||||
(1.0, -1.9500275914987799, 0.95124613669835001),
|
||||
(1.0, -1.94561023566527, 0.94705070426117999),
|
||||
(1.0, -1.9278328697703599, 0.93034775234267997),
|
||||
(1.0, -1.91858953033784, 0.92177618768380998),
|
||||
(1.0, -1.9154210807478, 0.91885558323625005),
|
||||
(1.0, -1.88903307939452, 0.89487434461663995))
|
||||
|
||||
BButter = ((0.98621192462707996, -1.9724238492541599, 0.98621192462707996),
|
||||
(0.98500175787241995, -1.9700035157448399, 0.98500175787241995),
|
||||
(0.97938932735214002, -1.95877865470428, 0.97938932735214002),
|
||||
(0.97531843204928004, -1.9506368640985701, 0.97531843204928004),
|
||||
(0.97316523498161001, -1.94633046996323, 0.97316523498161001),
|
||||
(0.96454515552826003, -1.9290903110565201, 0.96454515552826003),
|
||||
(0.96009142950541004, -1.9201828590108201, 0.96009142950541004),
|
||||
(0.95856916599601005, -1.9171383319920301, 0.95856916599601005),
|
||||
(0.94597685600279002, -1.89195371200558, 0.94597685600279002))
|
||||
|
||||
SAMPLE_RATE_MAP = {48000:0,44100:1,32000:2,24000:3,22050:4,
|
||||
16000:5,12000:6,11025:7,8000:8}
|
||||
|
||||
|
||||
PINK_REF = 64.82
|
||||
|
||||
class Filter:
|
||||
def __init__(self, input_kernel, output_kernel):
|
||||
self.input_kernel = input_kernel
|
||||
self.output_kernel = output_kernel
|
||||
|
||||
self.unfiltered_samples = [0.0] * len(self.input_kernel)
|
||||
self.filtered_samples = [0.0] * len(self.output_kernel)
|
||||
|
||||
#takes a list of floating point samples
|
||||
#returns a list of filtered floating point samples
|
||||
def filter(self, samples):
|
||||
toreturn = []
|
||||
|
||||
input_kernel = tuple(reversed(self.input_kernel))
|
||||
output_kernel = tuple(reversed(self.output_kernel[1:]))
|
||||
|
||||
for s in samples:
|
||||
self.unfiltered_samples.append(s)
|
||||
|
||||
filtered = sum([i * k for i,k in zip(
|
||||
self.unfiltered_samples[-len(input_kernel):],
|
||||
input_kernel)]) - \
|
||||
sum([i * k for i,k in zip(
|
||||
self.filtered_samples[-len(output_kernel):],
|
||||
output_kernel)])
|
||||
|
||||
self.filtered_samples.append(filtered)
|
||||
toreturn.append(filtered)
|
||||
|
||||
|
||||
#if we have more filtered and unfiltered samples than we'll need,
|
||||
#chop off the excess at the beginning
|
||||
if (len(self.unfiltered_samples) > (len(self.input_kernel))):
|
||||
self.unfiltered_samples = self.unfiltered_samples[-len(self.input_kernel):]
|
||||
|
||||
if (len(self.filtered_samples) > (len(self.output_kernel))):
|
||||
self.filtered_samples = self.filtered_samples[-len(self.output_kernel):]
|
||||
|
||||
return toreturn
|
||||
|
||||
|
||||
MAX_ORDER = 10
|
||||
|
||||
class EqualLoudnessFilter(audiotools.PCMReader):
|
||||
def __init__(self, pcmreader):
|
||||
if (pcmreader.channels != 2):
|
||||
raise ValueError("channels must equal 2")
|
||||
if (pcmreader.sample_rate not in SAMPLE_RATE_MAP.keys()):
|
||||
raise ValueError("unsupported sample rate")
|
||||
|
||||
self.stream = audiotools.pcmstream.PCMStreamReader(
|
||||
pcmreader,
|
||||
pcmreader.bits_per_sample / 8,
|
||||
False,True)
|
||||
|
||||
audiotools.PCMReader.__init__(
|
||||
self,
|
||||
self.stream,
|
||||
pcmreader.sample_rate,
|
||||
2,
|
||||
pcmreader.bits_per_sample)
|
||||
|
||||
self.leftover_samples = []
|
||||
|
||||
self.yule_filter_l = Filter(
|
||||
BYule[SAMPLE_RATE_MAP[self.sample_rate]],
|
||||
AYule[SAMPLE_RATE_MAP[self.sample_rate]])
|
||||
|
||||
self.yule_filter_r = Filter(
|
||||
BYule[SAMPLE_RATE_MAP[self.sample_rate]],
|
||||
AYule[SAMPLE_RATE_MAP[self.sample_rate]])
|
||||
|
||||
self.butter_filter_l = Filter(
|
||||
BButter[SAMPLE_RATE_MAP[self.sample_rate]],
|
||||
AButter[SAMPLE_RATE_MAP[self.sample_rate]])
|
||||
|
||||
self.butter_filter_r = Filter(
|
||||
BButter[SAMPLE_RATE_MAP[self.sample_rate]],
|
||||
AButter[SAMPLE_RATE_MAP[self.sample_rate]])
|
||||
|
||||
def read(self, bytes):
|
||||
#read in a bunch of floating point samples
|
||||
(frame_list,self.leftover_samples) = audiotools.FrameList.from_samples(
|
||||
self.leftover_samples + self.stream.read(bytes),
|
||||
self.channels)
|
||||
|
||||
#convert them to a pair of floating-point channel lists
|
||||
l_channel = frame_list.channel(0)
|
||||
r_channel = frame_list.channel(1)
|
||||
|
||||
#run our channel lists through the Yule and Butter filters
|
||||
l_channel = self.butter_filter_l.filter(
|
||||
self.yule_filter_l.filter(l_channel))
|
||||
|
||||
r_channel = self.butter_filter_r.filter(
|
||||
self.yule_filter_r.filter(r_channel))
|
||||
|
||||
#convert our channel lists back to integer samples
|
||||
multiplier = 1 << (self.bits_per_sample - 1)
|
||||
|
||||
return audiotools.pcmstream.pcm_to_string(
|
||||
audiotools.FrameList.from_channels(
|
||||
([int(round(s * multiplier)) for s in l_channel],
|
||||
[int(round(s * multiplier)) for s in r_channel])),
|
||||
self.bits_per_sample / 8,
|
||||
False)
|
||||
|
||||
|
||||
#this takes a PCMReader-compatible object
|
||||
#it yields FrameLists, each 50ms long (1/20th of a second)
|
||||
#how many PCM frames that is varies depending on the sample rate
|
||||
def replay_gain_blocks(pcmreader):
|
||||
unhandled_samples = [] #partial PCM frames
|
||||
frame_pool = audiotools.FrameList([],pcmreader.channels)
|
||||
|
||||
reader = audiotools.pcmstream.PCMStreamReader(pcmreader,
|
||||
pcmreader.bits_per_sample / 8,
|
||||
False,False)
|
||||
|
||||
(framelist,unhandled_samples) = audiotools.FrameList.from_samples(
|
||||
unhandled_samples + reader.read(audiotools.BUFFER_SIZE),
|
||||
pcmreader.channels)
|
||||
|
||||
while ((len(framelist) > 0) or (len(unhandled_samples) > 0)):
|
||||
frame_pool.extend(framelist)
|
||||
|
||||
while (frame_pool.total_frames() >= (pcmreader.sample_rate / 20)):
|
||||
yield audiotools.FrameList(
|
||||
frame_pool[0:
|
||||
((pcmreader.sample_rate / 20) * pcmreader.channels)],
|
||||
pcmreader.channels)
|
||||
frame_pool = audiotools.FrameList(
|
||||
frame_pool[((pcmreader.sample_rate / 20) * pcmreader.channels):],
|
||||
pcmreader.channels)
|
||||
|
||||
(framelist,unhandled_samples) = audiotools.FrameList.from_samples(
|
||||
unhandled_samples + reader.read(audiotools.BUFFER_SIZE),
|
||||
pcmreader.channels)
|
||||
|
||||
reader.close()
|
||||
#this drops the last block that's not 50ms long
|
||||
#that's probably the right thing to do
|
||||
|
||||
|
||||
#takes a PCMReader-compatible object with 2 channels and a
|
||||
#supported sample rate
|
||||
#returns the stream's ReplayGain value in dB
|
||||
def calculate_replay_gain(pcmstream):
|
||||
import math
|
||||
|
||||
def __mean__(l):
|
||||
return sum(l) / len(l)
|
||||
|
||||
pcmstream = EqualLoudnessFilter(pcmstream)
|
||||
|
||||
db_blocks = []
|
||||
|
||||
for block in replay_gain_blocks(pcmstream):
|
||||
left = __mean__([s ** 2 for s in block.channel(0)])
|
||||
right = __mean__([s ** 2 for s in block.channel(1)])
|
||||
db_blocks.append((left + right) / 2)
|
||||
|
||||
db_blocks = [10 * math.log10(b + 10 ** -10) for b in db_blocks]
|
||||
db_blocks.sort()
|
||||
replay_gain = db_blocks[int(round(len(db_blocks) * 0.95))]
|
||||
|
||||
return PINK_REF - replay_gain
|
||||
|
||||
|
||||
if (__name__ == '__main__'):
|
||||
pass
|
||||
|
BIN
Melodia/resources/audiotools/resample.so
Executable file
BIN
Melodia/resources/audiotools/resample.so
Executable file
Binary file not shown.
246
Melodia/resources/audiotools/toc.py
Normal file
246
Melodia/resources/audiotools/toc.py
Normal file
@ -0,0 +1,246 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#Audio Tools, a module and set of tools for manipulating audio data
|
||||
#Copyright (C) 2008-2011 Brian Langenberger
|
||||
|
||||
#This program is free software; you can redistribute it and/or modify
|
||||
#it under the terms of the GNU General Public License as published by
|
||||
#the Free Software Foundation; either version 2 of the License, or
|
||||
#(at your option) any later version.
|
||||
|
||||
#This program is distributed in the hope that it will be useful,
|
||||
#but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
#GNU General Public License for more details.
|
||||
|
||||
#You should have received a copy of the GNU General Public License
|
||||
#along with this program; if not, write to the Free Software
|
||||
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
"""The TOC file handling module."""
|
||||
|
||||
import re
|
||||
from audiotools import SheetException, parse_timestamp, build_timestamp
|
||||
import gettext
|
||||
|
||||
gettext.install("audiotools", unicode=True)
|
||||
|
||||
###################
|
||||
#TOC Parsing
|
||||
###################
|
||||
|
||||
|
||||
class TOCException(SheetException):
|
||||
"""Raised by TOC file parsing errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def parse(lines):
|
||||
"""Returns a TOCFile object from an iterator of lines.
|
||||
|
||||
Raises TOCException if some problem occurs parsing the file."""
|
||||
|
||||
TRACKLINE = re.compile(r'TRACK AUDIO')
|
||||
|
||||
lines = list(lines)
|
||||
|
||||
if ('CD_DA' not in [line.strip() for line in lines]):
|
||||
raise TOCException(_(u"No CD_DA TOC header found"))
|
||||
|
||||
lines = iter(lines)
|
||||
|
||||
toc = TOCFile()
|
||||
track = None
|
||||
track_number = 0
|
||||
line_number = 0
|
||||
|
||||
try:
|
||||
while (True):
|
||||
line_number += 1
|
||||
line = lines.next().strip()
|
||||
|
||||
if (len(line) == 0):
|
||||
pass
|
||||
elif (TRACKLINE.match(line)):
|
||||
if (track is not None):
|
||||
toc.tracks[track.number] = track
|
||||
track_number += 1
|
||||
track = Track(track_number)
|
||||
else:
|
||||
if (track is not None):
|
||||
track.lines.append(line)
|
||||
if (line.startswith('FILE') or
|
||||
line.startswith('AUDIOFILE')):
|
||||
if ('"' in line):
|
||||
track.indexes = map(
|
||||
parse_timestamp,
|
||||
re.findall(r'\d+:\d+:\d+|\d+',
|
||||
line[line.rindex('"') + 1:]))
|
||||
else:
|
||||
track.indexes = map(
|
||||
parse_timestamp,
|
||||
re.findall(r'\d+:\d+:\d+|\d+',
|
||||
line))
|
||||
elif (line.startswith('START')):
|
||||
track.start = parse_timestamp(line[len('START '):])
|
||||
else:
|
||||
toc.lines.append(line)
|
||||
except StopIteration:
|
||||
if (track is not None):
|
||||
toc.tracks[track.number] = track
|
||||
return toc
|
||||
|
||||
|
||||
class TOCFile:
|
||||
"""An object representing a TOC file."""
|
||||
|
||||
def __init__(self):
|
||||
self.lines = []
|
||||
self.tracks = {}
|
||||
|
||||
def __repr__(self):
|
||||
return "TOCFile(lines=%s,tracks=%s)" % (repr(self.lines),
|
||||
repr(self.tracks))
|
||||
|
||||
def catalog(self):
|
||||
"""Returns the cuesheet's CATALOG number as a plain string, or None.
|
||||
|
||||
If present, this value is typically a CD's UPC code."""
|
||||
|
||||
for line in self.lines:
|
||||
if (line.startswith('CATALOG')):
|
||||
result = re.search(r'"(.+)"', line)
|
||||
if (result is not None):
|
||||
return result.group(1)
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
return None
|
||||
|
||||
def indexes(self):
|
||||
"""Yields a set of index lists, one for each track in the file."""
|
||||
|
||||
for track in sorted(self.tracks.values()):
|
||||
if (track.start != 0):
|
||||
yield (track.indexes[0], track.indexes[0] + track.start)
|
||||
else:
|
||||
yield (track.indexes[0],)
|
||||
|
||||
def pcm_lengths(self, total_length):
|
||||
"""Yields a list of PCM lengths for all audio tracks within the file.
|
||||
|
||||
total_length is the length of the entire file in PCM frames."""
|
||||
|
||||
previous = None
|
||||
|
||||
for current in self.indexes():
|
||||
if (previous is None):
|
||||
previous = current
|
||||
else:
|
||||
track_length = (max(current) - max(previous)) * (44100 / 75)
|
||||
total_length -= track_length
|
||||
yield track_length
|
||||
previous = current
|
||||
|
||||
yield total_length
|
||||
|
||||
def ISRCs(self):
|
||||
"""Returns a track_number->ISRC dict of all non-empty tracks."""
|
||||
|
||||
return dict([(track.number, track.ISRC()) for track in
|
||||
self.tracks.values() if track.ISRC() is not None])
|
||||
|
||||
@classmethod
|
||||
def file(cls, sheet, filename):
|
||||
"""Constructs a new TOC file string from a compatible object.
|
||||
|
||||
sheet must have catalog(), indexes() and ISRCs() methods.
|
||||
filename is a string to the filename the TOC file is created for.
|
||||
Although we don't care whether the filename points to a real file,
|
||||
other tools sometimes do.
|
||||
"""
|
||||
|
||||
import cStringIO
|
||||
|
||||
catalog = sheet.catalog() # a catalog string, or None
|
||||
indexes = list(sheet.indexes()) # a list of index tuples
|
||||
ISRCs = sheet.ISRCs() # a track_number->ISRC dict
|
||||
|
||||
data = cStringIO.StringIO()
|
||||
data.write("CD_DA\n\n")
|
||||
|
||||
if ((catalog is not None) and (len(catalog) > 0)):
|
||||
data.write("CATALOG \"%s\"\n\n" % (catalog))
|
||||
|
||||
for (i, (current, next)) in enumerate(zip(indexes,
|
||||
indexes[1:] + [None])):
|
||||
tracknum = i + 1
|
||||
|
||||
data.write("TRACK AUDIO\n")
|
||||
|
||||
if (tracknum in ISRCs.keys()):
|
||||
data.write("ISRC \"%s\"\n" % (ISRCs[tracknum]))
|
||||
|
||||
if (next is not None):
|
||||
data.write("AUDIOFILE \"%s\" %s %s\n" % \
|
||||
(filename,
|
||||
build_timestamp(current[0]),
|
||||
build_timestamp(next[0] - current[0])))
|
||||
else:
|
||||
data.write("AUDIOFILE \"%s\" %s\n" % \
|
||||
(filename,
|
||||
build_timestamp(current[0])))
|
||||
if (len(current) > 1):
|
||||
data.write("START %s\n" % \
|
||||
(build_timestamp(current[-1] - current[0])))
|
||||
|
||||
if (next is not None):
|
||||
data.write("\n")
|
||||
|
||||
return data.getvalue()
|
||||
|
||||
|
||||
class Track:
|
||||
"""A track inside a TOCFile object."""
|
||||
|
||||
def __init__(self, number):
|
||||
self.number = number
|
||||
self.lines = []
|
||||
self.indexes = []
|
||||
self.start = 0
|
||||
|
||||
def __cmp__(self, t):
|
||||
return cmp(self.number, t.number)
|
||||
|
||||
def __repr__(self):
|
||||
return "Track(%s,lines=%s,indexes=%s,start=%s)" % \
|
||||
(repr(self.number), repr(self.lines),
|
||||
repr(self.indexes), repr(self.start))
|
||||
|
||||
def ISRC(self):
|
||||
"""Returns the track's ISRC value, or None."""
|
||||
|
||||
for line in self.lines:
|
||||
if (line.startswith('ISRC')):
|
||||
match = re.search(r'"(.+)"', line)
|
||||
if (match is not None):
|
||||
return match.group(1)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def read_tocfile(filename):
|
||||
"""Returns a TOCFile from a TOC filename on disk.
|
||||
|
||||
Raises TOCException if some error occurs reading or parsing the file.
|
||||
"""
|
||||
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError, msg:
|
||||
raise TOCException(str(msg))
|
||||
try:
|
||||
return parse(iter(f.readlines()))
|
||||
finally:
|
||||
f.close()
|
BIN
Melodia/resources/audiotools/verify.so
Executable file
BIN
Melodia/resources/audiotools/verify.so
Executable file
Binary file not shown.
Loading…
Reference in New Issue
Block a user