From 544fe9bf1782a027b3f31bf4c10a050d783e32ac Mon Sep 17 00:00:00 2001
From: jvoisin
Date: Wed, 1 Feb 2012 22:56:04 +0100
Subject: Rename mat-cli to mat-gui
---
lib/FORMATS | 94 ++
lib/__init__.py | 1 +
lib/archive.py | 291 +++++
lib/audio.py | 100 ++
lib/bencode/__init__.py | 1 +
lib/bencode/bencode.py | 152 +++
lib/exiftool.py | 95 ++
lib/hachoir_editor/__init__.py | 8 +
lib/hachoir_editor/field.py | 69 +
lib/hachoir_editor/fieldset.py | 352 +++++
lib/hachoir_editor/typed_field.py | 253 ++++
lib/images.py | 48 +
lib/mat.py | 150 +++
lib/misc.py | 63 +
lib/office.py | 305 +++++
lib/parser.py | 130 ++
lib/strippers.py | 48 +
lib/tarfile/__init__.py | 1 +
lib/tarfile/tarfile.py | 2593 +++++++++++++++++++++++++++++++++++++
mat-cli | 161 ---
mat-cli.1 | 78 --
mat-gui | 4 +-
mat-gui.1 | 2 +-
mat/__init__.py | 1 -
mat/archive.py | 291 -----
mat/audio.py | 100 --
mat/bencode/__init__.py | 1 -
mat/bencode/bencode.py | 152 ---
mat/exiftool.py | 95 --
mat/hachoir_editor/__init__.py | 8 -
mat/hachoir_editor/field.py | 69 -
mat/hachoir_editor/fieldset.py | 352 -----
mat/hachoir_editor/typed_field.py | 253 ----
mat/images.py | 48 -
mat/mat.py | 150 ---
mat/misc.py | 63 -
mat/office.py | 305 -----
mat/parser.py | 130 --
mat/strippers.py | 48 -
mat/tarfile/__init__.py | 1 -
mat/tarfile/tarfile.py | 2593 -------------------------------------
setup.py | 8 +-
test/clitest.py | 20 +-
test/libtest.py | 2 +-
44 files changed, 4772 insertions(+), 4917 deletions(-)
create mode 100644 lib/FORMATS
create mode 100644 lib/__init__.py
create mode 100644 lib/archive.py
create mode 100644 lib/audio.py
create mode 100644 lib/bencode/__init__.py
create mode 100644 lib/bencode/bencode.py
create mode 100644 lib/exiftool.py
create mode 100644 lib/hachoir_editor/__init__.py
create mode 100644 lib/hachoir_editor/field.py
create mode 100644 lib/hachoir_editor/fieldset.py
create mode 100644 lib/hachoir_editor/typed_field.py
create mode 100644 lib/images.py
create mode 100644 lib/mat.py
create mode 100644 lib/misc.py
create mode 100644 lib/office.py
create mode 100644 lib/parser.py
create mode 100644 lib/strippers.py
create mode 100644 lib/tarfile/__init__.py
create mode 100644 lib/tarfile/tarfile.py
delete mode 100755 mat-cli
delete mode 100644 mat-cli.1
delete mode 100644 mat/__init__.py
delete mode 100644 mat/archive.py
delete mode 100644 mat/audio.py
delete mode 100644 mat/bencode/__init__.py
delete mode 100644 mat/bencode/bencode.py
delete mode 100644 mat/exiftool.py
delete mode 100644 mat/hachoir_editor/__init__.py
delete mode 100644 mat/hachoir_editor/field.py
delete mode 100644 mat/hachoir_editor/fieldset.py
delete mode 100644 mat/hachoir_editor/typed_field.py
delete mode 100644 mat/images.py
delete mode 100644 mat/mat.py
delete mode 100644 mat/misc.py
delete mode 100644 mat/office.py
delete mode 100644 mat/parser.py
delete mode 100644 mat/strippers.py
delete mode 100644 mat/tarfile/__init__.py
delete mode 100644 mat/tarfile/tarfile.py
diff --git a/lib/FORMATS b/lib/FORMATS
new file mode 100644
index 0000000..c497524
--- /dev/null
+++ b/lib/FORMATS
@@ -0,0 +1,94 @@
+
+
+ Portable Network Graphics
+ .png
+ full
+ textual metadata + date
+ removal of harmful fields is done with hachoir
+
+
+
+ Jpeg
+ .jpeg, .jpg
+ full
+ comment + exif/photoshop/adobe
+ removal of harmful fields is done with hachoir
+
+
+
+ Open Document
+ .odt, .odx, .ods, ...
+ full
+ a meta.xml file
+ removal of the meta.xml file
+
+
+
+ Office Openxml
+ .docx, .pptx, .xlsx, ...
+ full
+ a docProps folder containings xml metadata files
+ removal of the docProps folder
+
+
+
+ Portable Document Fileformat
+ .pdf
+ full
+ a lot
+ rendering of the pdf file on a cairo surface with the help of
+ poppler in order to remove all the internal metadata,
+ then removal of the remaining metadata fields of the pdf itself with
+ pdfrw (the next version of python-cairo will support metadata,
+ so we should get rid of pdfrw)
+
+
+
+ Tape ARchive
+ .tar, .tar.bz2, .tar.gz
+ full
+ metadata from the file itself, metadata from the file contained
+ into the archive, and metadata added by tar to the file at then
+ creation of the archive
+ extraction of each file, treatement of the file, add treated file
+ to a new archive, right before the add, remove the metadata added by tar
+ itself. When the new archive is complete, remove all his metadata.
+
+
+
+ Zip
+ .zip
+ .partial
+ metadata from the file itself, metadata from the file contained
+ into the archive, and metadata added by zip to the file when added to
+ the archive.
+
+ extraction of each file, treatement of the file, add treated file
+ to a new archive. When the new archive is complete, remove all his metadata
+ metadata added by zip itself to internal files
+
+
+
+ MPEG Audio
+ .mp3, .mp2, .mp1
+ full
+ id3
+ removal of harmful fields is done with hachoir
+
+
+
+ Ogg Vorbis
+ .ogg
+ full
+ Vorbis
+ removal of harmful fields is done with mutagen
+
+
+
+ Free Lossless Audio Codec
+ .flac
+ full
+ Flac, Vorbis
+ removal of harmful fields is done with mutagen
+
+
diff --git a/lib/__init__.py b/lib/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lib/__init__.py
@@ -0,0 +1 @@
+
diff --git a/lib/archive.py b/lib/archive.py
new file mode 100644
index 0000000..9993102
--- /dev/null
+++ b/lib/archive.py
@@ -0,0 +1,291 @@
+'''
+ Take care of archives formats
+'''
+
+import zipfile
+import shutil
+import os
+import logging
+import tempfile
+
+import parser
+import mat
+from tarfile import tarfile
+
+
+class GenericArchiveStripper(parser.GenericParser):
+ '''
+ Represent a generic archive
+ '''
+ def __init__(self, filename, parser, mime, backup, add2archive):
+ super(GenericArchiveStripper, self).__init__(filename, parser, mime,
+ backup, add2archive)
+ self.compression = ''
+ self.add2archive = add2archive
+ self.tempdir = tempfile.mkdtemp()
+
+ def __del__(self):
+ '''
+ Remove the files inside the temp dir,
+ then remove the temp dir
+ '''
+ for root, dirs, files in os.walk(self.tempdir):
+ for item in files:
+ path_file = os.path.join(root, item)
+ mat.secure_remove(path_file)
+ shutil.rmtree(self.tempdir)
+
+ def remove_all(self):
+ '''
+ Call _remove_all() with in argument : "normal"
+ '''
+ return self._remove_all('normal')
+
+ def remove_all_strict(self):
+ '''
+ call remove_all() with in argument : "strict"
+ '''
+ return self._remove_all('strict')
+
+ def _remove_all(self, method):
+ '''
+ Remove all meta, normal way if method is "normal",
+ else, use the strict way (with possible data loss)
+ '''
+ raise NotImplementedError
+
+
+class ZipStripper(GenericArchiveStripper):
+ '''
+ Represent a zip file
+ '''
+ def is_file_clean(self, fileinfo):
+ '''
+ Check if a ZipInfo object is clean of metadatas added
+ by zip itself, independently of the corresponding file metadatas
+ '''
+ if fileinfo.comment is not '':
+ return False
+ elif fileinfo.date_time is not 0:
+ return False
+ elif fileinfo.create_system is not 0:
+ return False
+ elif fileinfo.create_version is not 0:
+ return False
+ else:
+ return True
+
+ def is_clean(self):
+ '''
+ Check if the given file is clean from harmful metadata
+ '''
+ zipin = zipfile.ZipFile(self.filename, 'r')
+ if zipin.comment != '':
+ logging.debug('%s has a comment' % self.filename)
+ return False
+ for item in zipin.infolist():
+ #I have not found a way to remove the crap added by zipfile :/
+ #if not self.is_file_clean(item):
+ # logging.debug('%s from %s has compromizing zipinfo' %
+ # (item.filename, self.filename))
+ # return False
+ zipin.extract(item, self.tempdir)
+ name = os.path.join(self.tempdir, item.filename)
+ if os.path.isfile(name):
+ try:
+ cfile = mat.create_class_file(name, False,
+ self.add2archive)
+ if not cfile.is_clean():
+ return False
+ except:
+ #best solution I have found
+ logging.info('%s\'s fileformat is not supported, or is a \
+harmless format' % item.filename)
+ _, ext = os.path.splitext(name)
+ bname = os.path.basename(item.filename)
+ if ext not in parser.NOMETA:
+ if bname != 'mimetype' and bname != '.rels':
+ return False
+ zipin.close()
+ return True
+
+ def get_meta(self):
+ '''
+ Return all the metadata of a ZipFile (don't return metadatas
+ of contained files : should it ?)
+ '''
+ zipin = zipfile.ZipFile(self.filename, 'r')
+ metadata = {}
+ for field in zipin.infolist():
+ zipmeta = {}
+ zipmeta['comment'] = field.comment
+ zipmeta['modified'] = field.date_time
+ zipmeta['system'] = field.create_system
+ zipmeta['zip_version'] = field.create_version
+ metadata[field.filename] = zipmeta
+ metadata["%s comment" % self.filename] = zipin.comment
+ zipin.close()
+ return metadata
+
+ def _remove_all(self, method):
+ '''
+ So far, the zipfile module does not allow to write a ZipInfo
+ object into a zipfile (and it's a shame !) : so data added
+ by zipfile itself could not be removed. It's a big concern.
+ Is shiping a patched version of zipfile.py a good idea ?
+ '''
+ zipin = zipfile.ZipFile(self.filename, 'r')
+ zipout = zipfile.ZipFile(self.output, 'w', allowZip64=True)
+ for item in zipin.infolist():
+ zipin.extract(item, self.tempdir)
+ name = os.path.join(self.tempdir, item.filename)
+ if os.path.isfile(name):
+ try:
+ cfile = mat.create_class_file(name, False,
+ self.add2archive)
+ if method is 'normal':
+ cfile.remove_all()
+ else:
+ cfile.remove_all_strict()
+ logging.debug('Processing %s from %s' % (item.filename,
+ self.filename))
+ zipout.write(name, item.filename)
+ except:
+ logging.info('%s\'s format is not supported or harmless' %
+ item.filename)
+ _, ext = os.path.splitext(name)
+ if self.add2archive or ext in parser.NOMETA:
+ zipout.write(name, item.filename)
+ zipout.comment = ''
+ zipin.close()
+ zipout.close()
+ logging.info('%s treated' % self.filename)
+ self.do_backup()
+ return True
+
+
+class TarStripper(GenericArchiveStripper):
+ '''
+ Represent a tarfile archive
+ '''
+ def _remove(self, current_file):
+ '''
+ remove the meta added by tar itself to the file
+ '''
+ current_file.mtime = 0
+ current_file.uid = 0
+ current_file.gid = 0
+ current_file.uname = ''
+ current_file.gname = ''
+ return current_file
+
+ def _remove_all(self, method):
+ tarin = tarfile.open(self.filename, 'r' + self.compression)
+ tarout = tarfile.open(self.output, 'w' + self.compression)
+ for item in tarin.getmembers():
+ tarin.extract(item, self.tempdir)
+ name = os.path.join(self.tempdir, item.name)
+ if item.type is '0': # is item a regular file ?
+ #no backup file
+ try:
+ cfile = mat.create_class_file(name, False,
+ self.add2archive)
+ if method is 'normal':
+ cfile.remove_all()
+ else:
+ cfile.remove_all_strict()
+ tarout.add(name, item.name, filter=self._remove)
+ except:
+ logging.info('%s\' format is not supported or harmless' %
+ item.name)
+ _, ext = os.path.splitext(name)
+ if self.add2archive or ext in parser.NOMETA:
+ tarout.add(name, item.name, filter=self._remove)
+ tarin.close()
+ tarout.close()
+ self.do_backup()
+ return True
+
+ def is_file_clean(self, current_file):
+ '''
+ Check metadatas added by tar
+ '''
+ if current_file.mtime is not 0:
+ return False
+ elif current_file.uid is not 0:
+ return False
+ elif current_file.gid is not 0:
+ return False
+ elif current_file.uname is not '':
+ return False
+ elif current_file.gname is not '':
+ return False
+ else:
+ return True
+
+ def is_clean(self):
+ '''
+ Check if the file is clean from harmful metadatas
+ '''
+ tarin = tarfile.open(self.filename, 'r' + self.compression)
+ for item in tarin.getmembers():
+ if not self.is_file_clean(item):
+ tarin.close()
+ return False
+ tarin.extract(item, self.tempdir)
+ name = os.path.join(self.tempdir, item.name)
+ if item.type is '0': # is item a regular file ?
+ try:
+ class_file = mat.create_class_file(name,
+ False, self.add2archive) # no backup file
+ if not class_file.is_clean():
+ tarin.close()
+ return False
+ except:
+ logging.error('%s\'s foramt is not supported or harmless' %
+ item.filename)
+ _, ext = os.path.splitext(name)
+ if ext not in parser.NOMETA:
+ tarin.close()
+ return False
+ tarin.close()
+ return True
+
+ def get_meta(self):
+ '''
+ Return a dict with all the meta of the file
+ '''
+ tarin = tarfile.open(self.filename, 'r' + self.compression)
+ metadata = {}
+ for current_file in tarin.getmembers():
+ if current_file.type is '0':
+ if not self.is_file_clean(current_file): # if there is meta
+ current_meta = {}
+ current_meta['mtime'] = current_file.mtime
+ current_meta['uid'] = current_file.uid
+ current_meta['gid'] = current_file.gid
+ current_meta['uname'] = current_file.uname
+ current_meta['gname'] = current_file.gname
+ metadata[current_file.name] = current_meta
+ tarin.close()
+ return metadata
+
+
+class GzipStripper(TarStripper):
+ '''
+ Represent a tar.gz archive
+ '''
+ def __init__(self, filename, parser, mime, backup, add2archive):
+ super(GzipStripper, self).__init__(filename, parser, mime, backup,
+ add2archive)
+ self.compression = ':gz'
+
+
+class Bzip2Stripper(TarStripper):
+ '''
+ Represents a tar.bz2 archive
+ '''
+ def __init__(self, filename, parser, mime, backup, add2archive):
+ super(Bzip2Stripper, self).__init__(filename, parser, mime, backup,
+ add2archive)
+ self.compression = ':bz2'
diff --git a/lib/audio.py b/lib/audio.py
new file mode 100644
index 0000000..ed849ee
--- /dev/null
+++ b/lib/audio.py
@@ -0,0 +1,100 @@
+'''
+ Care about audio fileformat
+'''
+try:
+ from mutagen.flac import FLAC
+ from mutagen.oggvorbis import OggVorbis
+except ImportError:
+ pass
+
+
+import parser
+import shutil
+
+
+class MpegAudioStripper(parser.GenericParser):
+ '''
+ Represent mpeg audio file (mp3, ...)
+ '''
+ def _should_remove(self, field):
+ if field.name in ("id3v1", "id3v2"):
+ return True
+ else:
+ return False
+
+
+class OggStripper(parser.GenericParser):
+ '''
+ Represent an ogg vorbis file
+ '''
+ def remove_all(self):
+ if self.backup is True:
+ shutil.copy2(self.filename, self.output)
+ self.filename = self.output
+
+ mfile = OggVorbis(self.filename)
+ mfile.delete()
+ mfile.save()
+ return True
+
+ def is_clean(self):
+ '''
+ Check if the "metadata" block is present in the file
+ '''
+ mfile = OggVorbis(self.filename)
+ if mfile.tags == []:
+ return True
+ else:
+ return False
+
+ def get_meta(self):
+ '''
+ Return the content of the metadata block if present
+ '''
+ metadata = {}
+ mfile = OggVorbis(self.filename)
+ for key, value in mfile.tags:
+ metadata[key] = value
+ return metadata
+
+
+class FlacStripper(parser.GenericParser):
+ '''
+ Represent a Flac audio file
+ '''
+ def remove_all(self):
+ '''
+ Remove the "metadata" block from the file
+ '''
+ if self.backup is True:
+ shutil.copy2(self.filename, self.output)
+ self.filename = self.output
+
+ mfile = FLAC(self.filename)
+ mfile.delete()
+ mfile.clear_pictures()
+ mfile.save()
+ return True
+
+ def is_clean(self):
+ '''
+ Check if the "metadata" block is present in the file
+ '''
+ mfile = FLAC(self.filename)
+ if mfile.tags is None and mfile.pictures == []:
+ return True
+ else:
+ return False
+
+ def get_meta(self):
+ '''
+ Return the content of the metadata block if present
+ '''
+ metadata = {}
+ mfile = FLAC(self.filename)
+ if mfile.tags is not None:
+ if mfile.pictures != []:
+ metadata['picture :'] = 'yes'
+ for key, value in mfile.tags:
+ metadata[key] = value
+ return metadata
diff --git a/lib/bencode/__init__.py b/lib/bencode/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lib/bencode/__init__.py
@@ -0,0 +1 @@
+
diff --git a/lib/bencode/bencode.py b/lib/bencode/bencode.py
new file mode 100644
index 0000000..739ffe5
--- /dev/null
+++ b/lib/bencode/bencode.py
@@ -0,0 +1,152 @@
+# Copyright 2007 by Petru Paler
+# Copyright 2011 by Julien (jvoisin) Voisin
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+
+'''
+ A quick (and also nice) lib to bencode/bdecode torrent files
+'''
+
+
+import types
+
+
+class BTFailure(Exception):
+ '''Custom Exception'''
+ pass
+
+
+class Bencached(object):
+ '''Custom type : cached string'''
+ __slots__ = ['bencoded']
+
+ def __init__(self, string):
+ self.bencoded = string
+
+
+def decode_int(x, f):
+ '''decode an int'''
+ f += 1
+ newf = x.index('e', f)
+ n = int(x[f:newf])
+ if x[f] == '-':
+ if x[f + 1] == '0':
+ raise ValueError
+ elif x[f] == '0' and newf != f + 1:
+ raise ValueError
+ return (n, newf + 1)
+
+
+def decode_string(x, f):
+ '''decode a string'''
+ colon = x.index(':', f)
+ n = int(x[f:colon])
+ if x[f] == '0' and colon != f + 1:
+ raise ValueError
+ colon += 1
+ return (x[colon:colon + n], colon + n)
+
+
+def decode_list(x, f):
+ '''decode a list'''
+ result = []
+ f += 1
+ while x[f] != 'e':
+ v, f = DECODE_FUNC[x[f]](x, f)
+ result.append(v)
+ return (result, f + 1)
+
+
+def decode_dict(x, f):
+ '''decode a dict'''
+ result = {}
+ f += 1
+ while x[f] != 'e':
+ k, f = decode_string(x, f)
+ result[k], f = DECODE_FUNC[x[f]](x, f)
+ return (result, f + 1)
+
+
+def encode_bool(x, r):
+ '''bencode a boolean'''
+ if x:
+ encode_int(1, r)
+ else:
+ encode_int(0, r)
+
+
+def encode_int(x, r):
+ '''bencode an integer/float'''
+ r.extend(('i', str(x), 'e'))
+
+
+def encode_list(x, r):
+ '''bencode a list/tuple'''
+ r.append('l')
+ [ENCODE_FUNC[type(item)](item, r) for item in x]
+ r.append('e')
+
+
+def encode_dict(x, result):
+ '''bencode a dict'''
+ result.append('d')
+ ilist = x.items()
+ ilist.sort()
+ for k, v in ilist:
+ result.extend((str(len(k)), ':', k))
+ ENCODE_FUNC[type(v)](v, result)
+ result.append('e')
+
+
+DECODE_FUNC = {}
+DECODE_FUNC.update(dict([(str(x), decode_string) for x in xrange(9)]))
+DECODE_FUNC['l'] = decode_list
+DECODE_FUNC['d'] = decode_dict
+DECODE_FUNC['i'] = decode_int
+
+
+ENCODE_FUNC = {}
+ENCODE_FUNC[Bencached] = lambda x, r: r.append(x.bencoded)
+ENCODE_FUNC[types.IntType] = encode_int
+ENCODE_FUNC[types.LongType] = encode_int
+ENCODE_FUNC[types.StringType] = lambda x, r: r.extend((str(len(x)), ':', x))
+ENCODE_FUNC[types.ListType] = encode_list
+ENCODE_FUNC[types.TupleType] = encode_list
+ENCODE_FUNC[types.DictType] = encode_dict
+ENCODE_FUNC[types.BooleanType] = encode_bool
+
+
+def bencode(string):
+ '''bencode $string'''
+ table = []
+ ENCODE_FUNC[type(string)](string, table)
+ return ''.join(table)
+
+
+def bdecode(string):
+ '''decode $string'''
+ try:
+ result, lenght = DECODE_FUNC[string[0]](string, 0)
+ except (IndexError, KeyError, ValueError):
+ raise BTFailure('Not a valid bencoded string')
+ if lenght != len(string):
+ raise BTFailure('Invalid bencoded value (data after valid prefix)')
+ return result
diff --git a/lib/exiftool.py b/lib/exiftool.py
new file mode 100644
index 0000000..758a094
--- /dev/null
+++ b/lib/exiftool.py
@@ -0,0 +1,95 @@
+'''
+ Care about images with help of the amazing (perl) library Exiftool.
+'''
+
+import subprocess
+import parser
+
+
+class ExiftoolStripper(parser.GenericParser):
+ '''
+ A generic stripper class using exiftool as backend
+ '''
+
+ def __init__(self, filename, parser, mime, backup, add2archive):
+ super(ExiftoolStripper, self).__init__(filename, parser, mime,
+ backup, add2archive)
+ self.allowed = ['ExifTool Version Number', 'File Name', 'Directory',
+ 'File Size', 'File Modification Date/Time', 'File Permissions',
+ 'File Type', 'MIME Type', 'Image Width', 'Image Height',
+ 'Image Size']
+ self._set_allowed()
+
+ def _set_allowed(self):
+ '''
+ Set the allowed/harmless list of metadata
+ '''
+ raise NotImplementedError
+
+ def remove_all(self):
+ '''
+ Remove all metadata with help of exiftool
+ '''
+ try:
+ if self.backup:
+ # Note: '-All=' must be followed by a known exiftool option.
+ process = subprocess.Popen(['exiftool', '-m', '-All=',
+ '-out', self.output, self.filename],
+ stdout=open('/dev/null'))
+ process.wait()
+ else:
+ # Note: '-All=' must be followed by a known exiftool option.
+ process = subprocess.Popen(
+ [ 'exiftool', '-m', '-All=', '-overwrite_original', self.filename ],
+ stdout=open('/dev/null'))
+ process.wait()
+ return True
+ except:
+ return False
+
+ def is_clean(self):
+ '''
+ Check if the file is clean with help of exiftool
+ '''
+ out = subprocess.Popen(['exiftool', self.filename],
+ stdout=subprocess.PIPE).communicate()[0]
+ out = out.split('\n')
+ for i in out[:-1]:
+ if i.split(':')[0].strip() not in self.allowed:
+ return False
+ return True
+
+ def get_meta(self):
+ '''
+ Return every harmful meta with help of exiftool
+ '''
+ out = subprocess.Popen(['exiftool', self.filename],
+ stdout=subprocess.PIPE).communicate()[0]
+ out = out.split('\n')
+ meta = {}
+ for i in out[:-1]:
+ key = i.split(':')[0].strip()
+ if key not in self.allowed:
+ meta[key] = i.split(':')[1].strip()
+ return meta
+
+
+class JpegStripper(ExiftoolStripper):
+ '''
+ Care about jpeg files with help
+ of exiftool
+ '''
+ def _set_allowed(self):
+ self.allowed.extend(['JFIF Version', 'Resolution Unit',
+ 'X Resolution', 'Y Resolution', 'Encoding Process', 'Bits Per Sample',
+ 'Color Components', 'Y Cb Cr Sub Sampling'])
+
+class PngStripper(ExiftoolStripper):
+ '''
+ Care about png files with help
+ of exiftool
+ '''
+ def _set_allowed(self):
+ self.allowed.extend(['Bit Depth', 'Color Type', 'Compression',
+ 'Filter', 'Interlace', 'Pixels Per Unit X', 'Pixels Per Unit Y',
+ 'Pixel Units'])
diff --git a/lib/hachoir_editor/__init__.py b/lib/hachoir_editor/__init__.py
new file mode 100644
index 0000000..1835676
--- /dev/null
+++ b/lib/hachoir_editor/__init__.py
@@ -0,0 +1,8 @@
+from field import (
+ EditorError, FakeField)
+from typed_field import (
+ EditableField, EditableBits, EditableBytes,
+ EditableInteger, EditableString,
+ createEditableField)
+from fieldset import EditableFieldSet, NewFieldSet, createEditor
+
diff --git a/lib/hachoir_editor/field.py b/lib/hachoir_editor/field.py
new file mode 100644
index 0000000..6b1efe3
--- /dev/null
+++ b/lib/hachoir_editor/field.py
@@ -0,0 +1,69 @@
+from hachoir_core.error import HachoirError
+from hachoir_core.field import joinPath, MissingField
+
+class EditorError(HachoirError):
+ pass
+
+class FakeField(object):
+ """
+ This class have API looks similar to Field API, but objects don't contain
+ any value: all values are _computed_ by parent methods.
+
+ Example: FakeField(editor, "abc").size calls editor._getFieldSize("abc").
+ """
+ is_field_set = False
+
+ def __init__(self, parent, name):
+ self._parent = parent
+ self._name = name
+
+ def _getPath(self):
+ return joinPath(self._parent.path, self._name)
+ path = property(_getPath)
+
+ def _getName(self):
+ return self._name
+ name = property(_getName)
+
+ def _getAddress(self):
+ return self._parent._getFieldAddress(self._name)
+ address = property(_getAddress)
+
+ def _getSize(self):
+ return self._parent.input[self._name].size
+ size = property(_getSize)
+
+ def _getValue(self):
+ return self._parent.input[self._name].value
+ value = property(_getValue)
+
+ def createDisplay(self):
+ # TODO: Returns new value if field is altered
+ return self._parent.input[self._name].display
+ display = property(createDisplay)
+
+ def _getParent(self):
+ return self._parent
+ parent = property(_getParent)
+
+ def hasValue(self):
+ return self._parent.input[self._name].hasValue()
+
+ def __getitem__(self, key):
+ # TODO: Implement this function!
+ raise MissingField(self, key)
+
+ def _isAltered(self):
+ return False
+ is_altered = property(_isAltered)
+
+ def writeInto(self, output):
+ size = self.size
+ addr = self._parent._getFieldInputAddress(self._name)
+ input = self._parent.input
+ stream = input.stream
+ if size % 8:
+ output.copyBitsFrom(stream, addr, size, input.endian)
+ else:
+ output.copyBytesFrom(stream, addr, size//8)
+
diff --git a/lib/hachoir_editor/fieldset.py b/lib/hachoir_editor/fieldset.py
new file mode 100644
index 0000000..a74c8e2
--- /dev/null
+++ b/lib/hachoir_editor/fieldset.py
@@ -0,0 +1,352 @@
+from hachoir_core.dict import UniqKeyError
+from hachoir_core.field import MissingField, Float32, Float64, FakeArray
+from hachoir_core.compatibility import any
+from hachoir_core.i18n import _
+from typed_field import createEditableField
+from field import EditorError
+from collections import deque # Python 2.4
+import weakref # Python 2.1
+import struct
+
+class EditableFieldSet(object):
+ MAX_SIZE = (1 << 40) # Arbitrary limit to catch errors
+ is_field_set = True
+
+ def __init__(self, parent, fieldset):
+ self._parent = parent
+ self.input = fieldset # original FieldSet
+ self._fields = {} # cache of editable fields
+ self._deleted = set() # Names of deleted fields
+ self._inserted = {} # Inserted field (name => list of field,
+ # where name is the name after)
+
+ def array(self, key):
+ # FIXME: Use cache?
+ return FakeArray(self, key)
+
+ def _getParent(self):
+ return self._parent
+ parent = property(_getParent)
+
+ def _isAltered(self):
+ if self._inserted:
+ return True
+ if self._deleted:
+ return True
+ return any(field.is_altered for field in self._fields.itervalues())
+ is_altered = property(_isAltered)
+
+ def reset(self):
+ """
+ Reset the field set and the input field set.
+ """
+ for key, field in self._fields.iteritems():
+ if not field.is_altered:
+ del self._fields[key]
+ self.input.reset()
+
+ def __len__(self):
+ return len(self.input) \
+ - len(self._deleted) \
+ + sum( len(new) for new in self._inserted.itervalues() )
+
+ def __iter__(self):
+ for field in self.input:
+ name = field.name
+ if name in self._inserted:
+ for newfield in self._inserted[name]:
+ yield weakref.proxy(newfield)
+ if name not in self._deleted:
+ yield self[name]
+ if None in self._inserted:
+ for newfield in self._inserted[None]:
+ yield weakref.proxy(newfield)
+
+ def insertBefore(self, name, *new_fields):
+ self._insert(name, new_fields, False)
+
+ def insertAfter(self, name, *new_fields):
+ self._insert(name, new_fields, True)
+
+ def insert(self, *new_fields):
+ self._insert(None, new_fields, True)
+
+ def _insert(self, key, new_fields, next):
+ """
+ key is the name of the field before which new_fields
+ will be inserted. If next is True, the fields will be inserted
+ _after_ this field.
+ """
+ # Set unique field name
+ for field in new_fields:
+ if field._name.endswith("[]"):
+ self.input.setUniqueFieldName(field)
+
+ # Check that there is no duplicate in inserted fields
+ new_names = list(field.name for field in new_fields)
+ names_set = set(new_names)
+ if len(names_set) != len(new_fields):
+ duplicates = (name for name in names_set if 1 < new_names.count(name))
+ raise UniqKeyError(_("Duplicates in inserted fields: %s") % ", ".join(duplicates))
+
+ # Check that field names are not in input
+ if self.input: # Write special version for NewFieldSet?
+ for name in new_names:
+ if name in self.input and name not in self._deleted:
+ raise UniqKeyError(_("Field name '%s' already exists") % name)
+
+ # Check that field names are not in inserted fields
+ for fields in self._inserted.itervalues():
+ for field in fields:
+ if field.name in new_names:
+ raise UniqKeyError(_("Field name '%s' already exists") % field.name)
+
+ # Input have already inserted field?
+ if key in self._inserted:
+ if next:
+ self._inserted[key].extend( reversed(new_fields) )
+ else:
+ self._inserted[key].extendleft( reversed(new_fields) )
+ return
+
+ # Whould like to insert in inserted fields?
+ if key:
+ for fields in self._inserted.itervalues():
+ names = [item.name for item in fields]
+ try:
+ pos = names.index(key)
+ except ValueError:
+ continue
+ if 0 <= pos:
+ if next:
+ pos += 1
+ fields.rotate(-pos)
+ fields.extendleft( reversed(new_fields) )
+ fields.rotate(pos)
+ return
+
+ # Get next field. Use None if we are at the end.
+ if next:
+ index = self.input[key].index + 1
+ try:
+ key = self.input[index].name
+ except IndexError:
+ key = None
+
+ # Check that field names are not in input
+ if key not in self.input:
+ raise MissingField(self, key)
+
+ # Insert in original input
+ self._inserted[key]= deque(new_fields)
+
+ def _getDescription(self):
+ return self.input.description
+ description = property(_getDescription)
+
+ def _getStream(self):
+ # FIXME: This property is maybe a bad idea since address may be differents
+ return self.input.stream
+ stream = property(_getStream)
+
+ def _getName(self):
+ return self.input.name
+ name = property(_getName)
+
+ def _getEndian(self):
+ return self.input.endian
+ endian = property(_getEndian)
+
+ def _getAddress(self):
+ if self._parent:
+ return self._parent._getFieldAddress(self.name)
+ else:
+ return 0
+ address = property(_getAddress)
+
+ def _getAbsoluteAddress(self):
+ address = self.address
+ current = self._parent
+ while current:
+ address += current.address
+ current = current._parent
+ return address
+ absolute_address = property(_getAbsoluteAddress)
+
+ def hasValue(self):
+ return False
+# return self._parent.input[self.name].hasValue()
+
+ def _getSize(self):
+ if self.is_altered:
+ return sum(field.size for field in self)
+ else:
+ return self.input.size
+ size = property(_getSize)
+
+ def _getPath(self):
+ return self.input.path
+ path = property(_getPath)
+
+ def _getOriginalField(self, name):
+ assert name in self.input
+ return self.input[name]
+
+ def _getFieldInputAddress(self, name):
+ """
+ Absolute address of a field from the input field set.
+ """
+ assert name in self.input
+ return self.input[name].absolute_address
+
+ def _getFieldAddress(self, name):
+ """
+ Compute relative address of a field. The operation takes care of
+ deleted and resized fields.
+ """
+ #assert name not in self._deleted
+ addr = 0
+ for field in self:
+ if field.name == name:
+ return addr
+ addr += field.size
+ raise MissingField(self, name)
+
+ def _getItemByPath(self, path):
+ if not path[0]:
+ path = path[1:]
+ field = self
+ for name in path:
+ field = field[name]
+ return field
+
+ def __contains__(self, name):
+ try:
+ field = self[name]
+ return (field is not None)
+ except MissingField:
+ return False
+
+ def __getitem__(self, key):
+ """
+ Create a weak reference to an editable field (EditableField) for the
+ field with specified name. If the field is removed later, using the
+ editable field will raise a weakref.ReferenceError exception.
+
+ May raise a MissingField error if the field doesn't exist in original
+ field set or it has been deleted.
+ """
+ if "/" in key:
+ return self._getItemByPath(key.split("/"))
+ if isinstance(key, (int, long)):
+ raise EditorError("Integer index are not supported")
+
+ if (key in self._deleted) or (key not in self.input):
+ raise MissingField(self, key)
+ if key not in self._fields:
+ field = self.input[key]
+ if field.is_field_set:
+ self._fields[key] = createEditableFieldSet(self, field)
+ else:
+ self._fields[key] = createEditableField(self, field)
+ return weakref.proxy(self._fields[key])
+
+ def __delitem__(self, name):
+ """
+ Remove a field from the field set. May raise an MissingField exception
+ if the field has already been deleted.
+ """
+ parts = name.partition('/')
+ if parts[2]:
+ fieldset = self[parts[0]]
+ del fieldset[part[2]]
+ return
+ if name in self._deleted:
+ raise MissingField(self, name)
+ self._deleted.add(name)
+ if name in self._fields:
+ del self._fields[name]
+
+ def writeInto(self, output):
+ """
+ Write the content if this field set into the output stream
+ (OutputStream).
+ """
+ if not self.is_altered:
+ # Not altered: just copy bits/bytes
+ input = self.input
+ if input.size % 8:
+ output.copyBitsFrom(input.stream,
+ input.absolute_address, input.size, input.endian)
+ else:
+ output.copyBytesFrom(input.stream,
+ input.absolute_address, input.size//8)
+ else:
+ # Altered: call writeInto() method of each field
+ realaddr = 0
+ for field in self:
+ field.writeInto(output)
+ realaddr += field.size
+
+ def _getValue(self):
+ raise EditorError('Field set "%s" has no value' % self.path)
+ def _setValue(self, value):
+ raise EditorError('Field set "%s" value is read only' % self.path)
+ value = property(_getValue, _setValue, "Value of field")
+
+class EditableFloat(EditableFieldSet):
+ _value = None
+
+ def _isAltered(self):
+ return (self._value is not None)
+ is_altered = property(_isAltered)
+
+ def writeInto(self, output):
+ if self._value is not None:
+ self._write(output)
+ else:
+ EditableFieldSet.writeInto(self, output)
+
+ def _write(self, output):
+ format = self.input.struct_format
+ raw = struct.pack(format, self._value)
+ output.writeBytes(raw)
+
+ def _setValue(self, value):
+ self.parent._is_altered = True
+ self._value = value
+ value = property(EditableFieldSet._getValue, _setValue)
+
+def createEditableFieldSet(parent, field):
+ cls = field.__class__
+ # FIXME: Support Float80
+ if cls in (Float32, Float64):
+ return EditableFloat(parent, field)
+ else:
+ return EditableFieldSet(parent, field)
+
+class NewFieldSet(EditableFieldSet):
+ def __init__(self, parent, name):
+ EditableFieldSet.__init__(self, parent, None)
+ self._name = name
+ self._endian = parent.endian
+
+ def __iter__(self):
+ if None in self._inserted:
+ return iter(self._inserted[None])
+ else:
+ raise StopIteration()
+
+ def _getName(self):
+ return self._name
+ name = property(_getName)
+
+ def _getEndian(self):
+ return self._endian
+ endian = property(_getEndian)
+
+ is_altered = property(lambda self: True)
+
+def createEditor(fieldset):
+ return EditableFieldSet(None, fieldset)
+
diff --git a/lib/hachoir_editor/typed_field.py b/lib/hachoir_editor/typed_field.py
new file mode 100644
index 0000000..0f0427b
--- /dev/null
+++ b/lib/hachoir_editor/typed_field.py
@@ -0,0 +1,253 @@
+from hachoir_core.field import (
+ RawBits, Bit, Bits, PaddingBits,
+ RawBytes, Bytes, PaddingBytes,
+ GenericString, Character,
+ isInteger, isString)
+from field import FakeField
+
+class EditableField(FakeField):
+ """
+ Pure virtual class used to write editable field class.
+ """
+
+ _is_altered = False
+ def __init__(self, parent, name, value=None):
+ FakeField.__init__(self, parent, name)
+ self._value = value
+
+ def _isAltered(self):
+ return self._is_altered
+ is_altered = property(_isAltered)
+
+ def hasValue(self):
+ return True
+
+ def _computeSize(self):
+ raise NotImplementedError()
+ def _getValue(self):
+ return self._value
+ def _setValue(self, value):
+ self._value = value
+
+ def _propGetValue(self):
+ if self._value is not None:
+ return self._getValue()
+ else:
+ return FakeField._getValue(self)
+ def _propSetValue(self, value):
+ self._setValue(value)
+ self._is_altered = True
+ value = property(_propGetValue, _propSetValue)
+
+ def _getSize(self):
+ if self._value is not None:
+ return self._computeSize()
+ else:
+ return FakeField._getSize(self)
+ size = property(_getSize)
+
+ def _write(self, output):
+ raise NotImplementedError()
+
+ def writeInto(self, output):
+ if self._is_altered:
+ self._write(output)
+ else:
+ return FakeField.writeInto(self, output)
+
+class EditableFixedField(EditableField):
+ """
+ Editable field with fixed size.
+ """
+
+ def __init__(self, parent, name, value=None, size=None):
+ EditableField.__init__(self, parent, name, value)
+ if size is not None:
+ self._size = size
+ else:
+ self._size = self._parent._getOriginalField(self._name).size
+
+ def _getSize(self):
+ return self._size
+ size = property(_getSize)
+
+class EditableBits(EditableFixedField):
+ def __init__(self, parent, name, *args):
+ if args:
+ if len(args) != 2:
+ raise TypeError(
+ "Wrong argument count, EditableBits constructor prototype is: "
+ "(parent, name, [size, value])")
+ size = args[0]
+ value = args[1]
+ assert isinstance(value, (int, long))
+ else:
+ size = None
+ value = None
+ EditableFixedField.__init__(self, parent, name, value, size)
+ if args:
+ self._setValue(args[1])
+ self._is_altered = True
+
+ def _setValue(self, value):
+ if not(0 <= value < (1 << self._size)):
+ raise ValueError("Invalid value, must be in range %s..%s"
+ % (0, (1 << self._size) - 1))
+ self._value = value
+
+ def _write(self, output):
+ output.writeBits(self._size, self._value, self._parent.endian)
+
+class EditableBytes(EditableField):
+ def _setValue(self, value):
+ if not value: raise ValueError(
+ "Unable to set empty string to a EditableBytes field")
+ self._value = value
+
+ def _computeSize(self):
+ return len(self._value) * 8
+
+ def _write(self, output):
+ output.writeBytes(self._value)
+
+class EditableString(EditableField):
+ MAX_SIZE = {
+ "Pascal8": (1 << 8)-1,
+ "Pascal16": (1 << 16)-1,
+ "Pascal32": (1 << 32)-1,
+ }
+
+ def __init__(self, parent, name, *args, **kw):
+ if len(args) == 2:
+ value = args[1]
+ assert isinstance(value, str) # TODO: support Unicode
+ elif not args:
+ value = None
+ else:
+ raise TypeError(
+ "Wrong argument count, EditableString constructor prototype is:"
+ "(parent, name, [format, value])")
+ EditableField.__init__(self, parent, name, value)
+ if len(args) == 2:
+ self._charset = kw.get('charset', None)
+ self._format = args[0]
+ if self._format in GenericString.PASCAL_FORMATS:
+ self._prefix_size = GenericString.PASCAL_FORMATS[self._format]
+ else:
+ self._prefix_size = 0
+ self._suffix_str = GenericString.staticSuffixStr(
+ self._format, self._charset, self._parent.endian)
+ self._is_altered = True
+ else:
+ orig = self._parent._getOriginalField(name)
+ self._charset = orig.charset
+ self._format = orig.format
+ self._prefix_size = orig.content_offset
+ self._suffix_str = orig.suffix_str
+
+ def _setValue(self, value):
+ size = len(value)
+ if self._format in self.MAX_SIZE and self.MAX_SIZE[self._format] < size:
+ raise ValueError("String is too big")
+ self._value = value
+
+ def _computeSize(self):
+ return (self._prefix_size + len(self._value) + len(self._suffix_str))*8
+
+ def _write(self, output):
+ if self._format in GenericString.SUFFIX_FORMAT:
+ output.writeBytes(self._value)
+ output.writeBytes(self._suffix_str)
+ elif self._format == "fixed":
+ output.writeBytes(self._value)
+ else:
+ assert self._format in GenericString.PASCAL_FORMATS
+ size = GenericString.PASCAL_FORMATS[self._format]
+ output.writeInteger(len(self._value), False, size, self._parent.endian)
+ output.writeBytes(self._value)
+
+class EditableCharacter(EditableFixedField):
+ def __init__(self, parent, name, *args):
+ if args:
+ if len(args) != 3:
+ raise TypeError(
+ "Wrong argument count, EditableCharacter "
+ "constructor prototype is: (parent, name, [value])")
+ value = args[0]
+ if not isinstance(value, str) or len(value) != 1:
+ raise TypeError("EditableCharacter needs a character")
+ else:
+ value = None
+ EditableFixedField.__init__(self, parent, name, value, 8)
+ if args:
+ self._is_altered = True
+
+ def _setValue(self, value):
+ if not isinstance(value, str) or len(value) != 1:
+ raise TypeError("EditableCharacter needs a character")
+ self._value = value
+
+ def _write(self, output):
+ output.writeBytes(self._value)
+
+class EditableInteger(EditableFixedField):
+ VALID_VALUE_SIGNED = {
+ 8: (-(1 << 8), (1 << 8)-1),
+ 16: (-(1 << 15), (1 << 15)-1),
+ 32: (-(1 << 31), (1 << 31)-1),
+ }
+ VALID_VALUE_UNSIGNED = {
+ 8: (0, (1 << 8)-1),
+ 16: (0, (1 << 16)-1),
+ 32: (0, (1 << 32)-1)
+ }
+
+ def __init__(self, parent, name, *args):
+ if args:
+ if len(args) != 3:
+ raise TypeError(
+ "Wrong argument count, EditableInteger constructor prototype is: "
+ "(parent, name, [signed, size, value])")
+ size = args[1]
+ value = args[2]
+ assert isinstance(value, (int, long))
+ else:
+ size = None
+ value = None
+ EditableFixedField.__init__(self, parent, name, value, size)
+ if args:
+ self._signed = args[0]
+ self._is_altered = True
+ else:
+ self._signed = self._parent._getOriginalField(self._name).signed
+
+ def _setValue(self, value):
+ if self._signed:
+ valid = self.VALID_VALUE_SIGNED
+ else:
+ valid = self.VALID_VALUE_UNSIGNED
+ minval, maxval = valid[self._size]
+ if not(minval <= value <= maxval):
+ raise ValueError("Invalid value, must be in range %s..%s"
+ % (minval, maxval))
+ self._value = value
+
+ def _write(self, output):
+ output.writeInteger(
+ self.value, self._signed, self._size//8, self._parent.endian)
+
+def createEditableField(fieldset, field):
+ if isInteger(field):
+ cls = EditableInteger
+ elif isString(field):
+ cls = EditableString
+ elif field.__class__ in (RawBytes, Bytes, PaddingBytes):
+ cls = EditableBytes
+ elif field.__class__ in (RawBits, Bits, Bit, PaddingBits):
+ cls = EditableBits
+ elif field.__class__ == Character:
+ cls = EditableCharacter
+ else:
+ cls = FakeField
+ return cls(fieldset, field.name)
+
diff --git a/lib/images.py b/lib/images.py
new file mode 100644
index 0000000..3eb3544
--- /dev/null
+++ b/lib/images.py
@@ -0,0 +1,48 @@
+'''
+ Takes care about pictures formats
+'''
+
+import parser
+
+
+class JpegStripper(parser.GenericParser):
+ '''
+ represents a jpeg file
+ remaining :
+ http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/CanonRaw.html
+ '''
+ def _should_remove(self, field):
+ '''
+ return True if the field is compromizing
+ '''
+ name = field.name
+ if name.startswith('comment'):
+ return True
+ elif name in ('photoshop', 'exif', 'adobe', 'app12'):
+ return True
+ elif name in ('icc'): # should we remove the icc profile ?
+ return True
+ else:
+ return False
+
+
+class PngStripper(parser.GenericParser):
+ '''
+ represents a png file
+ see : http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/PNG.html
+ '''
+ def _should_remove(self, field):
+ '''
+ return True if the field is compromizing
+ '''
+ name = field.name
+ if name.startswith('text['): # textual meta
+ return True
+ elif name.startswith('utf8_text['): # uncompressed adobe crap
+ return True
+ elif name.startswith('compt_text['): # compressed adobe crap
+ return True
+ elif name == "time": # timestamp
+ return True
+ else:
+ return False
diff --git a/lib/mat.py b/lib/mat.py
new file mode 100644
index 0000000..53d02d8
--- /dev/null
+++ b/lib/mat.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+
+'''
+ Metadata anonymisation toolkit library
+'''
+
+import os
+import subprocess
+import logging
+import mimetypes
+import xml.sax
+
+import hachoir_core.cmd_line
+import hachoir_parser
+
+import strippers
+
+__version__ = '0.2.2'
+__author__ = 'jvoisin'
+
+#Silence
+LOGGING_LEVEL = logging.CRITICAL
+hachoir_core.config.quiet = True
+fname = ''
+
+#Verbose
+#LOGGING_LEVEL = logging.DEBUG
+#hachoir_core.config.quiet = False
+#logname = 'report.log'
+
+logging.basicConfig(filename=fname, level=LOGGING_LEVEL)
+
+
+def get_sharedir():
+ '''
+ An ugly hack to find where is the "FORMATS" file.
+ '''
+ if os.path.isfile('FORMATS'):
+ return ''
+ elif os.path.exists('/usr/local/share/mat/'):
+ return '/usr/local/share/mat/'
+ elif os.path.exists('/usr/share/mat/'):
+ return '/usr/share/mat'
+
+
+class XMLParser(xml.sax.handler.ContentHandler):
+ '''
+ Parse the supported format xml, and return a corresponding
+ list of dict
+ '''
+ def __init__(self):
+ self.dict = {}
+ self.list = []
+ self.content, self.key = '', ''
+ self.between = False
+
+ def startElement(self, name, attrs):
+ '''
+ Called when entering into xml balise
+ '''
+ self.between = True
+ self.key = name
+ self.content = ''
+
+ def endElement(self, name):
+ '''
+ Called when exiting a xml balise
+ '''
+ if name == 'format': # exiting a fileformat section
+ self.list.append(self.dict.copy())
+ self.dict.clear()
+ else:
+ content = self.content.replace('\s', ' ')
+ self.dict[self.key] = content
+ self.between = False
+
+ def characters(self, characters):
+ '''
+ Concatenate the content between opening and closing balises
+ '''
+ if self.between:
+ self.content += characters
+
+
+def secure_remove(filename):
+ '''
+ securely remove the file
+ '''
+ removed = False
+ try:
+ subprocess.call(['shred', '--remove', filename])
+ removed = True
+ except:
+ logging.error('Unable to securely remove %s' % filename)
+
+ if removed is False:
+ try:
+ os.remove(filename)
+ except:
+ logging.error('Unable to remove %s' % filename)
+
+
+def create_class_file(name, backup, add2archive):
+ '''
+ return a $FILETYPEStripper() class,
+ corresponding to the filetype of the given file
+ '''
+ if not os.path.isfile(name):
+ # check if the file exists
+ logging.error('%s is not a valid file' % name)
+ return None
+
+ if not os.access(name, os.R_OK):
+ #check read permissions
+ logging.error('%s is is not readable' % name)
+ return None
+
+ if not os.access(name, os.W_OK):
+ #check write permission
+ logging.error('%s is not writtable' % name)
+ return None
+
+ filename = ''
+ try:
+ filename = hachoir_core.cmd_line.unicodeFilename(name)
+ except TypeError: # get rid of "decoding Unicode is not supported"
+ filename = name
+
+ parser = hachoir_parser.createParser(filename)
+ if not parser:
+ logging.info('Unable to parse %s' % filename)
+ return None
+
+ mime = parser.mime_type
+
+ if mime == 'application/zip': # some formats are zipped stuff
+ mime = mimetypes.guess_type(name)[0]
+
+ if mime.startswith('application/vnd.oasis.opendocument'):
+ mime = 'application/opendocument' # opendocument fileformat
+ elif mime.startswith('application/vnd.openxmlformats-officedocument'):
+ mime = 'application/officeopenxml' # office openxml
+
+ try:
+ stripper_class = strippers.STRIPPERS[mime]
+ except KeyError:
+ logging.info('Don\'t have stripper for %s format' % mime)
+ return None
+
+ return stripper_class(filename, parser, mime, backup, add2archive)
diff --git a/lib/misc.py b/lib/misc.py
new file mode 100644
index 0000000..d084861
--- /dev/null
+++ b/lib/misc.py
@@ -0,0 +1,63 @@
+'''
+ Care about misc formats
+'''
+
+import parser
+
+from bencode import bencode
+
+
+class TorrentStripper(parser.GenericParser):
+ '''
+ Represent a torrent file with the help
+ of the bencode lib from Petru Paler
+ '''
+ def __init__(self, filename, parser, mime, backup, add2archive):
+ super(TorrentStripper, self).__init__(filename, parser, mime,
+ backup, add2archive)
+ self.fields = ['comment', 'creation date', 'created by']
+
+ def is_clean(self):
+ '''
+ Check if the file is clean from harmful metadatas
+ '''
+ with open(self.filename, 'r') as f:
+ decoded = bencode.bdecode(f.read())
+ for key in self.fields:
+ try:
+ if decoded[key] != '':
+ return False
+ except:
+ pass
+ return True
+
+ def get_meta(self):
+ '''
+ Return a dict with all the meta of the file
+ '''
+ metadata = {}
+ with open(self.filename, 'r') as f:
+ decoded = bencode.bdecode(f.read())
+ for key in self.fields:
+ try:
+ if decoded[key] != '':
+ metadata[key] = decoded[key]
+ except:
+ pass
+ return metadata
+
+ def remove_all(self):
+ '''
+ Remove all the files that are compromizing
+ '''
+ with open(self.filename, 'r') as f:
+ decoded = bencode.bdecode(f.read())
+ for key in self.fields:
+ try:
+ decoded[key] = ''
+ except:
+ pass
+ with open(self.output, 'w') as f: # encode the decoded torrent
+ f.write(bencode.bencode(decoded)) # and write it in self.output
+ self.do_backup()
+ return True
diff --git a/lib/office.py b/lib/office.py
new file mode 100644
index 0000000..e1d738e
--- /dev/null
+++ b/lib/office.py
@@ -0,0 +1,305 @@
+'''
+ Care about office's formats
+'''
+
+import os
+import logging
+import zipfile
+import fileinput
+import subprocess
+import xml.dom.minidom as minidom
+
+try:
+ import cairo
+ import poppler
+except ImportError:
+ pass
+
+import mat
+import parser
+import archive
+
+class OpenDocumentStripper(archive.GenericArchiveStripper):
+ '''
+ An open document file is a zip, with xml file into.
+ The one that interest us is meta.xml
+ '''
+
+ def get_meta(self):
+ '''
+ Return a dict with all the meta of the file by
+ trying to read the meta.xml file.
+ '''
+ zipin = zipfile.ZipFile(self.filename, 'r')
+ metadata = {}
+ try:
+ content = zipin.read('meta.xml')
+ dom1 = minidom.parseString(content)
+ elements = dom1.getElementsByTagName('office:meta')
+ for i in elements[0].childNodes:
+ if i.tagName != 'meta:document-statistic':
+ nodename = ''.join([k for k in i.nodeName.split(':')[1:]])
+ metadata[nodename] = ''.join([j.data for j in i.childNodes])
+ else:
+ # thank you w3c for not providing a nice
+ # method to get all attributes from a node
+ pass
+ zipin.close()
+ except KeyError: # no meta.xml file found
+ logging.debug('%s has no opendocument metadata' % self.filename)
+ return metadata
+
+ def _remove_all(self, method):
+ '''
+ FIXME ?
+ There is a patch implementing the Zipfile.remove()
+ method here : http://bugs.python.org/issue6818
+ '''
+ zipin = zipfile.ZipFile(self.filename, 'r')
+ zipout = zipfile.ZipFile(self.output, 'w', allowZip64=True)
+
+ for item in zipin.namelist():
+ name = os.path.join(self.tempdir, item)
+ _, ext = os.path.splitext(name)
+
+ if item.endswith('manifest.xml'):
+ # contain the list of all files present in the archive
+ zipin.extract(item, self.tempdir)
+ for line in fileinput.input(name, inplace=1):
+ #remove the line which contains "meta.xml"
+ line = line.strip()
+ if not 'meta.xml' in line:
+ print line
+ zipout.write(name, item)
+
+ elif ext in parser.NOMETA or item == 'mimetype':
+ #keep NOMETA files, and the "manifest" file
+ if item != 'meta.xml': # contains the metadata
+ zipin.extract(item, self.tempdir)
+ zipout.write(name, item)
+
+ else:
+ zipin.extract(item, self.tempdir)
+ if os.path.isfile(name):
+ try:
+ cfile = mat.create_class_file(name, False,
+ self.add2archive)
+ if method == 'normal':
+ cfile.remove_all()
+ else:
+ cfile.remove_all_strict()
+ logging.debug('Processing %s from %s' % (item,
+ self.filename))
+ zipout.write(name, item)
+ except:
+ logging.info('%s\' fileformat is not supported' % item)
+ if self.add2archive:
+ zipout.write(name, item)
+ zipout.comment = ''
+ logging.info('%s treated' % self.filename)
+ zipin.close()
+ zipout.close()
+ self.do_backup()
+ return True
+
+ def is_clean(self):
+ '''
+ Check if the file is clean from harmful metadatas
+ '''
+ zipin = zipfile.ZipFile(self.filename, 'r')
+ try:
+ zipin.getinfo('meta.xml')
+ except KeyError: # no meta.xml in the file
+ czf = archive.ZipStripper(self.filename, self.parser,
+ 'application/zip', self.backup, self.add2archive)
+ if czf.is_clean():
+ zipin.close()
+ return True
+ zipin.close()
+ return False
+
+
+class PdfStripper(parser.GenericParser):
+ '''
+ Represent a PDF file
+ '''
+ def __init__(self, filename, parser, mime, backup, add2archive):
+ super(PdfStripper, self).__init__(filename, parser, mime, backup,
+ add2archive)
+ uri = 'file://' + os.path.abspath(self.filename)
+ self.password = None
+ self.document = poppler.document_new_from_file(uri, self.password)
+ self.meta_list = ('title', 'author', 'subject', 'keywords', 'creator',
+ 'producer', 'metadata')
+
+ def is_clean(self):
+ '''
+ Check if the file is clean from harmful metadatas
+ '''
+ for key in self.meta_list:
+ if self.document.get_property(key) is not None and \
+ self.document.get_property(key) != '':
+ return False
+ return True
+
+
+ def remove_all(self):
+ '''
+ Remove supperficial
+ '''
+ return self._remove_meta()
+
+
+ def remove_all_strict(self):
+ '''
+ Opening the PDF with poppler, then doing a render
+ on a cairo pdfsurface for each pages.
+ Thanks to Lunar^for the idea.
+ http://cairographics.org/documentation/pycairo/2/
+ python-poppler is not documented at all : have fun ;)
+ '''
+ page = self.document.get_page(0)
+ page_width, page_height = page.get_size()
+ surface = cairo.PDFSurface(self.output, page_width, page_height)
+ context = cairo.Context(surface) # context draws on the surface
+ logging.debug('PDF rendering of %s' % self.filename)
+ for pagenum in xrange(self.document.get_n_pages()):
+ page = self.document.get_page(pagenum)
+ context.translate(0, 0)
+ page.render(context) # render the page on context
+ context.show_page() # draw context on surface
+ surface.finish()
+ return self._remove_meta()
+
+ def _remove_meta(self):
+ '''
+ Remove superficial/external metadata
+ from a PDF file, using exiftool,
+ of pdfrw if exiftool is not installed
+ '''
+ processed = False
+ try:# try with pdfrw
+ import pdfrw
+ #For now, poppler cannot write meta, so we must use pdfrw
+ logging.debug('Removing %s\'s superficial metadata' % self.filename)
+ trailer = pdfrw.PdfReader(self.output)
+ trailer.Info.Producer = trailer.Author = trailer.Info.Creator = None
+ writer = pdfrw.PdfWriter()
+ writer.trailer = trailer
+ writer.write(self.output)
+ self.do_backup()
+ processed = True
+ except:
+ pass
+
+ try: # try with exiftool
+ subprocess.Popen('exiftool', stdout=open('/dev/null'))
+ import exiftool
+ # Note: '-All=' must be followed by a known exiftool option.
+ if self.backup:
+ process = subprocess.Popen(['exiftool', '-m', '-All=',
+ '-out', self.output, self.filename], stdout=open('/dev/null'))
+ process.wait()
+ else:
+ # Note: '-All=' must be followed by a known exiftool option.
+ process = subprocess.Popen(
+ ['exiftool', '-All=', '-overwrite_original', self.filename],
+ stdout=open('/dev/null'))
+ process.wait()
+ processed = True
+ except:
+ pass
+
+ if processed is False:
+ logging.error('Please install either pdfrw, or exiftool to\
+ fully handle PDF files')
+ return processed
+
+ def get_meta(self):
+ '''
+ Return a dict with all the meta of the file
+ '''
+ metadata = {}
+ for key in self.meta_list:
+ if self.document.get_property(key) is not None and \
+ self.document.get_property(key) != '':
+ metadata[key] = self.document.get_property(key)
+ return metadata
+
+
+class OpenXmlStripper(archive.GenericArchiveStripper):
+ '''
+ Represent an office openxml document, which is like
+ an opendocument format, with some tricky stuff added.
+ It contains mostly xml, but can have media blobs, crap, ...
+ (I don't like this format.)
+ '''
+ def _remove_all(self, method):
+ '''
+ FIXME ?
+ There is a patch implementing the Zipfile.remove()
+ method here : http://bugs.python.org/issue6818
+ '''
+ zipin = zipfile.ZipFile(self.filename, 'r')
+ zipout = zipfile.ZipFile(self.output, 'w',
+ allowZip64=True)
+ for item in zipin.namelist():
+ name = os.path.join(self.tempdir, item)
+ _, ext = os.path.splitext(name)
+ if item.startswith('docProps/'): # metadatas
+ pass
+ elif ext in parser.NOMETA or item == '.rels':
+ #keep parser.NOMETA files, and the file named ".rels"
+ zipin.extract(item, self.tempdir)
+ zipout.write(name, item)
+ else:
+ zipin.extract(item, self.tempdir)
+ if os.path.isfile(name): # don't care about folders
+ try:
+ cfile = mat.create_class_file(name, False,
+ self.add2archive)
+ if method == 'normal':
+ cfile.remove_all()
+ else:
+ cfile.remove_all_strict()
+ logging.debug('Processing %s from %s' % (item,
+ self.filename))
+ zipout.write(name, item)
+ except:
+ logging.info('%s\' fileformat is not supported' % item)
+ if self.add2archive:
+ zipout.write(name, item)
+ zipout.comment = ''
+ logging.info('%s treated' % self.filename)
+ zipin.close()
+ zipout.close()
+ self.do_backup()
+ return True
+
+ def is_clean(self):
+ '''
+ Check if the file is clean from harmful metadatas
+ '''
+ zipin = zipfile.ZipFile(self.filename, 'r')
+ for item in zipin.namelist():
+ if item.startswith('docProps/'):
+ return False
+ zipin.close()
+ czf = archive.ZipStripper(self.filename, self.parser,
+ 'application/zip', self.backup, self.add2archive)
+ if not czf.is_clean():
+ return False
+ else:
+ return True
+
+ def get_meta(self):
+ '''
+ Return a dict with all the meta of the file
+ '''
+ zipin = zipfile.ZipFile(self.filename, 'r')
+ metadata = {}
+ for item in zipin.namelist():
+ if item.startswith('docProps/'):
+ metadata[item] = 'harmful content'
+ zipin.close()
+ return metadata
diff --git a/lib/parser.py b/lib/parser.py
new file mode 100644
index 0000000..6dc5d0b
--- /dev/null
+++ b/lib/parser.py
@@ -0,0 +1,130 @@
+'''
+ Parent class of all parser
+'''
+
+import hachoir_core
+import hachoir_editor
+
+import os
+
+import mat
+
+NOMETA = ('.bmp', '.rdf', '.txt', '.xml', '.rels')
+#bmp : image
+#rdf : text
+#txt : plain text
+#xml : formated text
+#rels : openxml foramted text
+
+
+FIELD = object()
+
+class GenericParser(object):
+ '''
+ Parent class of all parsers
+ '''
+ def __init__(self, filename, parser, mime, backup, add2archive):
+ self.filename = ''
+ self.parser = parser
+ self.mime = mime
+ self.backup = backup
+ self.editor = hachoir_editor.createEditor(parser)
+ self.realname = filename
+ try:
+ self.filename = hachoir_core.cmd_line.unicodeFilename(filename)
+ except TypeError: # get rid of "decoding Unicode is not supported"
+ self.filename = filename
+ basename, ext = os.path.splitext(filename)
+ self.output = basename + '.cleaned' + ext
+ self.basename = os.path.basename(filename) # only filename
+
+ def is_clean(self):
+ '''
+ Check if the file is clean from harmful metadatas
+ '''
+ for field in self.editor:
+ if self._should_remove(field):
+ return self._is_clean(self.editor)
+ return True
+
+ def _is_clean(self, fieldset):
+ for field in fieldset:
+ remove = self._should_remove(field)
+ if remove is True:
+ return False
+ if remove is FIELD:
+ if not self._is_clean(field):
+ return False
+ return True
+
+ def remove_all(self):
+ '''
+ Remove all the files that are compromizing
+ '''
+ state = self._remove_all(self.editor)
+ hachoir_core.field.writeIntoFile(self.editor, self.output)
+ self.do_backup()
+ return state
+
+ def _remove_all(self, fieldset):
+ try:
+ for field in fieldset:
+ remove = self._should_remove(field)
+ if remove is True:
+ self._remove(fieldset, field.name)
+ if remove is FIELD:
+ self._remove_all(field)
+ return True
+ except:
+ return False
+
+ def remove_all_strict(self):
+ '''
+ If the remove_all() is not efficient enough,
+ this method is implemented :
+ It is efficient, but destructive.
+ In a perfect world, with nice fileformat,
+ this method would not exist.
+ '''
+ self.remove_all()
+
+ def _remove(self, fieldset, field):
+ '''
+ Delete the given field
+ '''
+ del fieldset[field]
+
+ def get_meta(self):
+ '''
+ Return a dict with all the meta of the file
+ '''
+ metadata = {}
+ self._get_meta(self.editor, metadata)
+ return metadata
+
+ def _get_meta(self, fieldset, metadata):
+ for field in fieldset:
+ remove = self._should_remove(field)
+ if remove is True:
+ try:
+ metadata[field.name] = field.value
+ except:
+ metadata[field.name] = 'harmful content'
+ if remove is FIELD:
+ self._get_meta(field)
+
+ def _should_remove(self, key):
+ '''
+ return True if the field is compromizing
+ abstract method
+ '''
+ raise NotImplementedError
+
+ def do_backup(self):
+ '''
+ Do a backup of the file if asked,
+ and change his creation/access date
+ '''
+ if self.backup is False:
+ mat.secure_remove(self.filename)
+ os.rename(self.output, self.filename)
diff --git a/lib/strippers.py b/lib/strippers.py
new file mode 100644
index 0000000..7d27874
--- /dev/null
+++ b/lib/strippers.py
@@ -0,0 +1,48 @@
+'''
+ Manage which fileformat can be processed
+'''
+
+import images
+import audio
+import office
+import archive
+import misc
+import subprocess
+
+STRIPPERS = {
+ 'application/x-tar': archive.TarStripper,
+ 'application/x-gzip': archive.GzipStripper,
+ 'application/x-bzip2': archive.Bzip2Stripper,
+ 'application/zip': archive.ZipStripper,
+ 'audio/mpeg': audio.MpegAudioStripper,
+ 'application/x-bittorrent': misc.TorrentStripper,
+ 'application/opendocument': office.OpenDocumentStripper,
+ 'application/officeopenxml': office.OpenXmlStripper,
+}
+
+try: # PDF support
+ import poppler
+ import cairo
+ STRIPPERS['application/x-pdf'] = office.PdfStripper
+ STRIPPERS['application/pdf'] = office.PdfStripper
+except ImportError:
+ print('Unable to import python-poppler and/or python-cairo: no PDF \
+ support')
+
+try: # mutangen-python : audio format support
+ import mutagen
+ STRIPPERS['audio/x-flac'] = audio.FlacStripper
+ STRIPPERS['audio/vorbis'] = audio.OggStripper
+except ImportError:
+ print('Unable to import python-mutagen: limited audio format support')
+
+try: # check if exiftool is installed on the system
+ subprocess.Popen('exiftool', stdout=open('/dev/null'))
+ import exiftool
+ STRIPPERS['image/jpeg'] = exiftool.JpegStripper
+ STRIPPERS['image/png'] = exiftool.PngStripper
+except: # if exiftool is not installed, use hachoir
+ print('Unable to find exiftool: limited images support')
+ STRIPPERS['image/jpeg'] = images.JpegStripper
+ STRIPPERS['image/png'] = images.PngStripper
+
diff --git a/lib/tarfile/__init__.py b/lib/tarfile/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lib/tarfile/__init__.py
@@ -0,0 +1 @@
+
diff --git a/lib/tarfile/tarfile.py b/lib/tarfile/tarfile.py
new file mode 100644
index 0000000..a40f9fc
--- /dev/null
+++ b/lib/tarfile/tarfile.py
@@ -0,0 +1,2593 @@
+# -*- coding: iso-8859-1 -*-
+#-------------------------------------------------------------------
+# tarfile.py
+#-------------------------------------------------------------------
+# Copyright (C) 2002 Lars Gustäbel
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use,
+# copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following
+# conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+"""Read from and write to tar format archives.
+"""
+
+__version__ = "$Revision$"
+# $Source$
+
+version = "0.9.0"
+__author__ = "Lars Gustäbel (lars@gustaebel.de)"
+__date__ = "$Date$"
+__cvsid__ = "$Id$"
+__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
+
+#---------
+# Imports
+#---------
+import sys
+import os
+import shutil
+import stat
+import errno
+import time
+import struct
+import copy
+import re
+import operator
+
+try:
+ import grp, pwd
+except ImportError:
+ grp = pwd = None
+
+# from tarfile import *
+__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
+
+#---------------------------------------------------------
+# tar constants
+#---------------------------------------------------------
+NUL = "\0" # the null character
+BLOCKSIZE = 512 # length of processing blocks
+RECORDSIZE = BLOCKSIZE * 20 # length of records
+GNU_MAGIC = "ustar \0" # magic gnu tar string
+POSIX_MAGIC = "ustar\x0000" # magic posix tar string
+
+LENGTH_NAME = 100 # maximum length of a filename
+LENGTH_LINK = 100 # maximum length of a linkname
+LENGTH_PREFIX = 155 # maximum length of the prefix field
+
+REGTYPE = "0" # regular file
+AREGTYPE = "\0" # regular file
+LNKTYPE = "1" # link (inside tarfile)
+SYMTYPE = "2" # symbolic link
+CHRTYPE = "3" # character special device
+BLKTYPE = "4" # block special device
+DIRTYPE = "5" # directory
+FIFOTYPE = "6" # fifo special device
+CONTTYPE = "7" # contiguous file
+
+GNUTYPE_LONGNAME = "L" # GNU tar longname
+GNUTYPE_LONGLINK = "K" # GNU tar longlink
+GNUTYPE_SPARSE = "S" # GNU tar sparse file
+
+XHDTYPE = "x" # POSIX.1-2001 extended header
+XGLTYPE = "g" # POSIX.1-2001 global header
+SOLARIS_XHDTYPE = "X" # Solaris extended header
+
+USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
+GNU_FORMAT = 1 # GNU tar format
+PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
+DEFAULT_FORMAT = GNU_FORMAT
+
+#---------------------------------------------------------
+# tarfile constants
+#---------------------------------------------------------
+# File types that tarfile supports:
+SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
+ SYMTYPE, DIRTYPE, FIFOTYPE,
+ CONTTYPE, CHRTYPE, BLKTYPE,
+ GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
+ GNUTYPE_SPARSE)
+
+# File types that will be treated as a regular file.
+REGULAR_TYPES = (REGTYPE, AREGTYPE,
+ CONTTYPE, GNUTYPE_SPARSE)
+
+# File types that are part of the GNU tar format.
+GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
+ GNUTYPE_SPARSE)
+
+# Fields from a pax header that override a TarInfo attribute.
+PAX_FIELDS = ("path", "linkpath", "size", "mtime",
+ "uid", "gid", "uname", "gname")
+
+# Fields in a pax header that are numbers, all other fields
+# are treated as strings.
+PAX_NUMBER_FIELDS = {
+ "atime": float,
+ "ctime": float,
+ "mtime": float,
+ "uid": int,
+ "gid": int,
+ "size": int
+}
+
+#---------------------------------------------------------
+# Bits used in the mode field, values in octal.
+#---------------------------------------------------------
+S_IFLNK = 0120000 # symbolic link
+S_IFREG = 0100000 # regular file
+S_IFBLK = 0060000 # block device
+S_IFDIR = 0040000 # directory
+S_IFCHR = 0020000 # character device
+S_IFIFO = 0010000 # fifo
+
+TSUID = 04000 # set UID on execution
+TSGID = 02000 # set GID on execution
+TSVTX = 01000 # reserved
+
+TUREAD = 0400 # read by owner
+TUWRITE = 0200 # write by owner
+TUEXEC = 0100 # execute/search by owner
+TGREAD = 0040 # read by group
+TGWRITE = 0020 # write by group
+TGEXEC = 0010 # execute/search by group
+TOREAD = 0004 # read by other
+TOWRITE = 0002 # write by other
+TOEXEC = 0001 # execute/search by other
+
+#---------------------------------------------------------
+# initialization
+#---------------------------------------------------------
+ENCODING = sys.getfilesystemencoding()
+if ENCODING is None:
+ ENCODING = sys.getdefaultencoding()
+
+#---------------------------------------------------------
+# Some useful functions
+#---------------------------------------------------------
+
+def stn(s, length):
+ """Convert a python string to a null-terminated string buffer.
+ """
+ return s[:length] + (length - len(s)) * NUL
+
+def nts(s):
+ """Convert a null-terminated string field to a python string.
+ """
+ # Use the string up to the first null char.
+ p = s.find("\0")
+ if p == -1:
+ return s
+ return s[:p]
+
+def nti(s):
+ """Convert a number field to a python number.
+ """
+ # There are two possible encodings for a number field, see
+ # itn() below.
+ if s[0] != chr(0200):
+ try:
+ n = int(nts(s) or "0", 8)
+ except ValueError:
+ raise InvalidHeaderError("invalid header")
+ else:
+ n = 0L
+ for i in xrange(len(s) - 1):
+ n <<= 8
+ n += ord(s[i + 1])
+ return n
+
+def itn(n, digits=8, format=DEFAULT_FORMAT):
+ """Convert a python number to a number field.
+ """
+ # POSIX 1003.1-1988 requires numbers to be encoded as a string of
+ # octal digits followed by a null-byte, this allows values up to
+ # (8**(digits-1))-1. GNU tar allows storing numbers greater than
+ # that if necessary. A leading 0200 byte indicates this particular
+ # encoding, the following digits-1 bytes are a big-endian
+ # representation. This allows values up to (256**(digits-1))-1.
+ if 0 <= n < 8 ** (digits - 1):
+ s = "%0*o" % (digits - 1, n) + NUL
+ else:
+ if format != GNU_FORMAT or n >= 256 ** (digits - 1):
+ raise ValueError("overflow in number field")
+
+ if n < 0:
+ # XXX We mimic GNU tar's behaviour with negative numbers,
+ # this could raise OverflowError.
+ n = struct.unpack("L", struct.pack("l", n))[0]
+
+ s = ""
+ for i in xrange(digits - 1):
+ s = chr(n & 0377) + s
+ n >>= 8
+ s = chr(0200) + s
+ return s
+
+def uts(s, encoding, errors):
+ """Convert a unicode object to a string.
+ """
+ if errors == "utf-8":
+ # An extra error handler similar to the -o invalid=UTF-8 option
+ # in POSIX.1-2001. Replace untranslatable characters with their
+ # UTF-8 representation.
+ try:
+ return s.encode(encoding, "strict")
+ except UnicodeEncodeError:
+ x = []
+ for c in s:
+ try:
+ x.append(c.encode(encoding, "strict"))
+ except UnicodeEncodeError:
+ x.append(c.encode("utf8"))
+ return "".join(x)
+ else:
+ return s.encode(encoding, errors)
+
+def calc_chksums(buf):
+ """Calculate the checksum for a member's header by summing up all
+ characters except for the chksum field which is treated as if
+ it was filled with spaces. According to the GNU tar sources,
+ some tars (Sun and NeXT) calculate chksum with signed char,
+ which will be different if there are chars in the buffer with
+ the high bit set. So we calculate two checksums, unsigned and
+ signed.
+ """
+ unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
+ signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
+ return unsigned_chksum, signed_chksum
+
+def copyfileobj(src, dst, length=None):
+ """Copy length bytes from fileobj src to fileobj dst.
+ If length is None, copy the entire content.
+ """
+ if length == 0:
+ return
+ if length is None:
+ shutil.copyfileobj(src, dst)
+ return
+
+ BUFSIZE = 16 * 1024
+ blocks, remainder = divmod(length, BUFSIZE)
+ for b in xrange(blocks):
+ buf = src.read(BUFSIZE)
+ if len(buf) < BUFSIZE:
+ raise IOError("end of file reached")
+ dst.write(buf)
+
+ if remainder != 0:
+ buf = src.read(remainder)
+ if len(buf) < remainder:
+ raise IOError("end of file reached")
+ dst.write(buf)
+ return
+
+filemode_table = (
+ ((S_IFLNK, "l"),
+ (S_IFREG, "-"),
+ (S_IFBLK, "b"),
+ (S_IFDIR, "d"),
+ (S_IFCHR, "c"),
+ (S_IFIFO, "p")),
+
+ ((TUREAD, "r"),),
+ ((TUWRITE, "w"),),
+ ((TUEXEC|TSUID, "s"),
+ (TSUID, "S"),
+ (TUEXEC, "x")),
+
+ ((TGREAD, "r"),),
+ ((TGWRITE, "w"),),
+ ((TGEXEC|TSGID, "s"),
+ (TSGID, "S"),
+ (TGEXEC, "x")),
+
+ ((TOREAD, "r"),),
+ ((TOWRITE, "w"),),
+ ((TOEXEC|TSVTX, "t"),
+ (TSVTX, "T"),
+ (TOEXEC, "x"))
+)
+
+def filemode(mode):
+ """Convert a file's mode to a string of the form
+ -rwxrwxrwx.
+ Used by TarFile.list()
+ """
+ perm = []
+ for table in filemode_table:
+ for bit, char in table:
+ if mode & bit == bit:
+ perm.append(char)
+ break
+ else:
+ perm.append("-")
+ return "".join(perm)
+
+class TarError(Exception):
+ """Base exception."""
+ pass
+class ExtractError(TarError):
+ """General exception for extract errors."""
+ pass
+class ReadError(TarError):
+ """Exception for unreadble tar archives."""
+ pass
+class CompressionError(TarError):
+ """Exception for unavailable compression methods."""
+ pass
+class StreamError(TarError):
+ """Exception for unsupported operations on stream-like TarFiles."""
+ pass
+class HeaderError(TarError):
+ """Base exception for header errors."""
+ pass
+class EmptyHeaderError(HeaderError):
+ """Exception for empty headers."""
+ pass
+class TruncatedHeaderError(HeaderError):
+ """Exception for truncated headers."""
+ pass
+class EOFHeaderError(HeaderError):
+ """Exception for end of file headers."""
+ pass
+class InvalidHeaderError(HeaderError):
+ """Exception for invalid headers."""
+ pass
+class SubsequentHeaderError(HeaderError):
+ """Exception for missing and invalid extended headers."""
+ pass
+
+#---------------------------
+# internal stream interface
+#---------------------------
+class _LowLevelFile:
+ """Low-level file object. Supports reading and writing.
+ It is used instead of a regular file object for streaming
+ access.
+ """
+
+ def __init__(self, name, mode):
+ mode = {
+ "r": os.O_RDONLY,
+ "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
+ }[mode]
+ if hasattr(os, "O_BINARY"):
+ mode |= os.O_BINARY
+ self.fd = os.open(name, mode, 0666)
+
+ def close(self):
+ os.close(self.fd)
+
+ def read(self, size):
+ return os.read(self.fd, size)
+
+ def write(self, s):
+ os.write(self.fd, s)
+
+class _Stream:
+ """Class that serves as an adapter between TarFile and
+ a stream-like object. The stream-like object only
+ needs to have a read() or write() method and is accessed
+ blockwise. Use of gzip or bzip2 compression is possible.
+ A stream-like object could be for example: sys.stdin,
+ sys.stdout, a socket, a tape device etc.
+
+ _Stream is intended to be used only internally.
+ """
+
+ def __init__(self, name, mode, comptype, fileobj, bufsize):
+ """Construct a _Stream object.
+ """
+ self._extfileobj = True
+ if fileobj is None:
+ fileobj = _LowLevelFile(name, mode)
+ self._extfileobj = False
+
+ if comptype == '*':
+ # Enable transparent compression detection for the
+ # stream interface
+ fileobj = _StreamProxy(fileobj)
+ comptype = fileobj.getcomptype()
+
+ self.name = name or ""
+ self.mode = mode
+ self.comptype = comptype
+ self.fileobj = fileobj
+ self.bufsize = bufsize
+ self.buf = ""
+ self.pos = 0L
+ self.closed = False
+
+ if comptype == "gz":
+ try:
+ import zlib
+ except ImportError:
+ raise CompressionError("zlib module is not available")
+ self.zlib = zlib
+ self.crc = zlib.crc32("") & 0xffffffffL
+ if mode == "r":
+ self._init_read_gz()
+ else:
+ self._init_write_gz()
+
+ if comptype == "bz2":
+ try:
+ import bz2
+ except ImportError:
+ raise CompressionError("bz2 module is not available")
+ if mode == "r":
+ self.dbuf = ""
+ self.cmp = bz2.BZ2Decompressor()
+ else:
+ self.cmp = bz2.BZ2Compressor()
+
+ def __del__(self):
+ if hasattr(self, "closed") and not self.closed:
+ self.close()
+
+ def _init_write_gz(self):
+ """Initialize for writing with gzip compression.
+ """
+ self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
+ -self.zlib.MAX_WBITS,
+ self.zlib.DEF_MEM_LEVEL,
+ 0)
+ timestamp = struct.pack(" self.bufsize:
+ self.fileobj.write(self.buf[:self.bufsize])
+ self.buf = self.buf[self.bufsize:]
+
+ def close(self):
+ """Close the _Stream object. No operation should be
+ done on it afterwards.
+ """
+ if self.closed:
+ return
+
+ if self.mode == "w" and self.comptype != "tar":
+ self.buf += self.cmp.flush()
+
+ if self.mode == "w" and self.buf:
+ self.fileobj.write(self.buf)
+ self.buf = ""
+ if self.comptype == "gz":
+ # The native zlib crc is an unsigned 32-bit integer, but
+ # the Python wrapper implicitly casts that to a signed C
+ # long. So, on a 32-bit box self.crc may "look negative",
+ # while the same crc on a 64-bit box may "look positive".
+ # To avoid irksome warnings from the `struct` module, force
+ # it to look positive on all boxes.
+ self.fileobj.write(struct.pack("= 0:
+ blocks, remainder = divmod(pos - self.pos, self.bufsize)
+ for i in xrange(blocks):
+ self.read(self.bufsize)
+ self.read(remainder)
+ else:
+ raise StreamError("seeking backwards is not allowed")
+ return self.pos
+
+ def read(self, size=None):
+ """Return the next size number of bytes from the stream.
+ If size is not defined, return all bytes of the stream
+ up to EOF.
+ """
+ if size is None:
+ t = []
+ while True:
+ buf = self._read(self.bufsize)
+ if not buf:
+ break
+ t.append(buf)
+ buf = "".join(t)
+ else:
+ buf = self._read(size)
+ self.pos += len(buf)
+ return buf
+
+ def _read(self, size):
+ """Return size bytes from the stream.
+ """
+ if self.comptype == "tar":
+ return self.__read(size)
+
+ c = len(self.dbuf)
+ t = [self.dbuf]
+ while c < size:
+ buf = self.__read(self.bufsize)
+ if not buf:
+ break
+ try:
+ buf = self.cmp.decompress(buf)
+ except IOError:
+ raise ReadError("invalid compressed data")
+ t.append(buf)
+ c += len(buf)
+ t = "".join(t)
+ self.dbuf = t[size:]
+ return t[:size]
+
+ def __read(self, size):
+ """Return size bytes from stream. If internal buffer is empty,
+ read another block from the stream.
+ """
+ c = len(self.buf)
+ t = [self.buf]
+ while c < size:
+ buf = self.fileobj.read(self.bufsize)
+ if not buf:
+ break
+ t.append(buf)
+ c += len(buf)
+ t = "".join(t)
+ self.buf = t[size:]
+ return t[:size]
+# class _Stream
+
+class _StreamProxy(object):
+ """Small proxy class that enables transparent compression
+ detection for the Stream interface (mode 'r|*').
+ """
+
+ def __init__(self, fileobj):
+ self.fileobj = fileobj
+ self.buf = self.fileobj.read(BLOCKSIZE)
+
+ def read(self, size):
+ self.read = self.fileobj.read
+ return self.buf
+
+ def getcomptype(self):
+ if self.buf.startswith("\037\213\010"):
+ return "gz"
+ if self.buf.startswith("BZh91"):
+ return "bz2"
+ return "tar"
+
+ def close(self):
+ self.fileobj.close()
+# class StreamProxy
+
+class _BZ2Proxy(object):
+ """Small proxy class that enables external file object
+ support for "r:bz2" and "w:bz2" modes. This is actually
+ a workaround for a limitation in bz2 module's BZ2File
+ class which (unlike gzip.GzipFile) has no support for
+ a file object argument.
+ """
+
+ blocksize = 16 * 1024
+
+ def __init__(self, fileobj, mode):
+ self.fileobj = fileobj
+ self.mode = mode
+ self.name = getattr(self.fileobj, "name", None)
+ self.init()
+
+ def init(self):
+ import bz2
+ self.pos = 0
+ if self.mode == "r":
+ self.bz2obj = bz2.BZ2Decompressor()
+ self.fileobj.seek(0)
+ self.buf = ""
+ else:
+ self.bz2obj = bz2.BZ2Compressor()
+
+ def read(self, size):
+ b = [self.buf]
+ x = len(self.buf)
+ while x < size:
+ raw = self.fileobj.read(self.blocksize)
+ if not raw:
+ break
+ data = self.bz2obj.decompress(raw)
+ b.append(data)
+ x += len(data)
+ self.buf = "".join(b)
+
+ buf = self.buf[:size]
+ self.buf = self.buf[size:]
+ self.pos += len(buf)
+ return buf
+
+ def seek(self, pos):
+ if pos < self.pos:
+ self.init()
+ self.read(pos - self.pos)
+
+ def tell(self):
+ return self.pos
+
+ def write(self, data):
+ self.pos += len(data)
+ raw = self.bz2obj.compress(data)
+ self.fileobj.write(raw)
+
+ def close(self):
+ if self.mode == "w":
+ raw = self.bz2obj.flush()
+ self.fileobj.write(raw)
+# class _BZ2Proxy
+
+#------------------------
+# Extraction file object
+#------------------------
+class _FileInFile(object):
+ """A thin wrapper around an existing file object that
+ provides a part of its data as an individual file
+ object.
+ """
+
+ def __init__(self, fileobj, offset, size, sparse=None):
+ self.fileobj = fileobj
+ self.offset = offset
+ self.size = size
+ self.sparse = sparse
+ self.position = 0
+
+ def tell(self):
+ """Return the current file position.
+ """
+ return self.position
+
+ def seek(self, position):
+ """Seek to a position in the file.
+ """
+ self.position = position
+
+ def read(self, size=None):
+ """Read data from the file.
+ """
+ if size is None:
+ size = self.size - self.position
+ else:
+ size = min(size, self.size - self.position)
+
+ if self.sparse is None:
+ return self.readnormal(size)
+ else:
+ return self.readsparse(size)
+
+ def readnormal(self, size):
+ """Read operation for regular files.
+ """
+ self.fileobj.seek(self.offset + self.position)
+ self.position += size
+ return self.fileobj.read(size)
+
+ def readsparse(self, size):
+ """Read operation for sparse files.
+ """
+ data = []
+ while size > 0:
+ buf = self.readsparsesection(size)
+ if not buf:
+ break
+ size -= len(buf)
+ data.append(buf)
+ return "".join(data)
+
+ def readsparsesection(self, size):
+ """Read a single section of a sparse file.
+ """
+ section = self.sparse.find(self.position)
+
+ if section is None:
+ return ""
+
+ size = min(size, section.offset + section.size - self.position)
+
+ if isinstance(section, _data):
+ realpos = section.realpos + self.position - section.offset
+ self.fileobj.seek(self.offset + realpos)
+ self.position += size
+ return self.fileobj.read(size)
+ else:
+ self.position += size
+ return NUL * size
+#class _FileInFile
+
+
+class ExFileObject(object):
+ """File-like object for reading an archive member.
+ Is returned by TarFile.extractfile().
+ """
+ blocksize = 1024
+
+ def __init__(self, tarfile, tarinfo):
+ self.fileobj = _FileInFile(tarfile.fileobj,
+ tarinfo.offset_data,
+ tarinfo.size,
+ getattr(tarinfo, "sparse", None))
+ self.name = tarinfo.name
+ self.mode = "r"
+ self.closed = False
+ self.size = tarinfo.size
+
+ self.position = 0
+ self.buffer = ""
+
+ def read(self, size=None):
+ """Read at most size bytes from the file. If size is not
+ present or None, read all data until EOF is reached.
+ """
+ if self.closed:
+ raise ValueError("I/O operation on closed file")
+
+ buf = ""
+ if self.buffer:
+ if size is None:
+ buf = self.buffer
+ self.buffer = ""
+ else:
+ buf = self.buffer[:size]
+ self.buffer = self.buffer[size:]
+
+ if size is None:
+ buf += self.fileobj.read()
+ else:
+ buf += self.fileobj.read(size - len(buf))
+
+ self.position += len(buf)
+ return buf
+
+ def readline(self, size=-1):
+ """Read one entire line from the file. If size is present
+ and non-negative, return a string with at most that
+ size, which may be an incomplete line.
+ """
+ if self.closed:
+ raise ValueError("I/O operation on closed file")
+
+ if "\n" in self.buffer:
+ pos = self.buffer.find("\n") + 1
+ else:
+ buffers = [self.buffer]
+ while True:
+ buf = self.fileobj.read(self.blocksize)
+ buffers.append(buf)
+ if not buf or "\n" in buf:
+ self.buffer = "".join(buffers)
+ pos = self.buffer.find("\n") + 1
+ if pos == 0:
+ # no newline found.
+ pos = len(self.buffer)
+ break
+
+ if size != -1:
+ pos = min(size, pos)
+
+ buf = self.buffer[:pos]
+ self.buffer = self.buffer[pos:]
+ self.position += len(buf)
+ return buf
+
+ def readlines(self):
+ """Return a list with all remaining lines.
+ """
+ result = []
+ while True:
+ line = self.readline()
+ if not line: break
+ result.append(line)
+ return result
+
+ def tell(self):
+ """Return the current file position.
+ """
+ if self.closed:
+ raise ValueError("I/O operation on closed file")
+
+ return self.position
+
+ def seek(self, pos, whence=os.SEEK_SET):
+ """Seek to a position in the file.
+ """
+ if self.closed:
+ raise ValueError("I/O operation on closed file")
+
+ if whence == os.SEEK_SET:
+ self.position = min(max(pos, 0), self.size)
+ elif whence == os.SEEK_CUR:
+ if pos < 0:
+ self.position = max(self.position + pos, 0)
+ else:
+ self.position = min(self.position + pos, self.size)
+ elif whence == os.SEEK_END:
+ self.position = max(min(self.size + pos, self.size), 0)
+ else:
+ raise ValueError("Invalid argument")
+
+ self.buffer = ""
+ self.fileobj.seek(self.position)
+
+ def close(self):
+ """Close the file object.
+ """
+ self.closed = True
+
+ def __iter__(self):
+ """Get an iterator over the file's lines.
+ """
+ while True:
+ line = self.readline()
+ if not line:
+ break
+ yield line
+#class ExFileObject
+
+#------------------
+# Exported Classes
+#------------------
+class TarInfo(object):
+ """Informational class which holds the details about an
+ archive member given by a tar header block.
+ TarInfo objects are returned by TarFile.getmember(),
+ TarFile.getmembers() and TarFile.gettarinfo() and are
+ usually created internally.
+ """
+
+ def __init__(self, name=""):
+ """Construct a TarInfo object. name is the optional name
+ of the member.
+ """
+ self.name = name # member name
+ self.mode = 0644 # file permissions
+ self.uid = 0 # user id
+ self.gid = 0 # group id
+ self.size = 0 # file size
+ self.mtime = 0 # modification time
+ self.chksum = 0 # header checksum
+ self.type = REGTYPE # member type
+ self.linkname = "" # link name
+ self.uname = "" # user name
+ self.gname = "" # group name
+ self.devmajor = 0 # device major number
+ self.devminor = 0 # device minor number
+
+ self.offset = 0 # the tar header starts here
+ self.offset_data = 0 # the file's data starts here
+
+ self.pax_headers = {} # pax header information
+
+ # In pax headers the "name" and "linkname" field are called
+ # "path" and "linkpath".
+ def _getpath(self):
+ return self.name
+ def _setpath(self, name):
+ self.name = name
+ path = property(_getpath, _setpath)
+
+ def _getlinkpath(self):
+ return self.linkname
+ def _setlinkpath(self, linkname):
+ self.linkname = linkname
+ linkpath = property(_getlinkpath, _setlinkpath)
+
+ def __repr__(self):
+ return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
+
+ def get_info(self, encoding, errors):
+ """Return the TarInfo's attributes as a dictionary.
+ """
+ info = {
+ "name": self.name,
+ "mode": self.mode & 07777,
+ "uid": self.uid,
+ "gid": self.gid,
+ "size": self.size,
+ "mtime": self.mtime,
+ "chksum": self.chksum,
+ "type": self.type,
+ "linkname": self.linkname,
+ "uname": self.uname,
+ "gname": self.gname,
+ "devmajor": self.devmajor,
+ "devminor": self.devminor
+ }
+
+ if info["type"] == DIRTYPE and not info["name"].endswith("/"):
+ info["name"] += "/"
+
+ for key in ("name", "linkname", "uname", "gname"):
+ if type(info[key]) is unicode:
+ info[key] = info[key].encode(encoding, errors)
+
+ return info
+
+ def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"):
+ """Return a tar header as a string of 512 byte blocks.
+ """
+ info = self.get_info(encoding, errors)
+
+ if format == USTAR_FORMAT:
+ return self.create_ustar_header(info)
+ elif format == GNU_FORMAT:
+ return self.create_gnu_header(info)
+ elif format == PAX_FORMAT:
+ return self.create_pax_header(info, encoding, errors)
+ else:
+ raise ValueError("invalid format")
+
+ def create_ustar_header(self, info):
+ """Return the object as a ustar header block.
+ """
+ info["magic"] = POSIX_MAGIC
+
+ if len(info["linkname"]) > LENGTH_LINK:
+ raise ValueError("linkname is too long")
+
+ if len(info["name"]) > LENGTH_NAME:
+ info["prefix"], info["name"] = self._posix_split_name(info["name"])
+
+ return self._create_header(info, USTAR_FORMAT)
+
+ def create_gnu_header(self, info):
+ """Return the object as a GNU header block sequence.
+ """
+ info["magic"] = GNU_MAGIC
+
+ buf = ""
+ if len(info["linkname"]) > LENGTH_LINK:
+ buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK)
+
+ if len(info["name"]) > LENGTH_NAME:
+ buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME)
+
+ return buf + self._create_header(info, GNU_FORMAT)
+
+ def create_pax_header(self, info, encoding, errors):
+ """Return the object as a ustar header block. If it cannot be
+ represented this way, prepend a pax extended header sequence
+ with supplement information.
+ """
+ info["magic"] = POSIX_MAGIC
+ pax_headers = self.pax_headers.copy()
+
+ # Test string fields for values that exceed the field length or cannot
+ # be represented in ASCII encoding.
+ for name, hname, length in (
+ ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
+ ("uname", "uname", 32), ("gname", "gname", 32)):
+
+ if hname in pax_headers:
+ # The pax header has priority.
+ continue
+
+ val = info[name].decode(encoding, errors)
+
+ # Try to encode the string as ASCII.
+ try:
+ val.encode("ascii")
+ except UnicodeEncodeError:
+ pax_headers[hname] = val
+ continue
+
+ if len(info[name]) > length:
+ pax_headers[hname] = val
+
+ # Test number fields for values that exceed the field limit or values
+ # that like to be stored as float.
+ for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
+ if name in pax_headers:
+ # The pax header has priority. Avoid overflow.
+ info[name] = 0
+ continue
+
+ val = info[name]
+ if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
+ pax_headers[name] = unicode(val)
+ info[name] = 0
+
+ # Create a pax extended header if necessary.
+ if pax_headers:
+ buf = self._create_pax_generic_header(pax_headers)
+ else:
+ buf = ""
+
+ return buf + self._create_header(info, USTAR_FORMAT)
+
+ @classmethod
+ def create_pax_global_header(cls, pax_headers):
+ """Return the object as a pax global header block sequence.
+ """
+ return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
+
+ def _posix_split_name(self, name):
+ """Split a name longer than 100 chars into a prefix
+ and a name part.
+ """
+ prefix = name[:LENGTH_PREFIX + 1]
+ while prefix and prefix[-1] != "/":
+ prefix = prefix[:-1]
+
+ name = name[len(prefix):]
+ prefix = prefix[:-1]
+
+ if not prefix or len(name) > LENGTH_NAME:
+ raise ValueError("name is too long")
+ return prefix, name
+
+ @staticmethod
+ def _create_header(info, format):
+ """Return a header block. info is a dictionary with file
+ information, format must be one of the *_FORMAT constants.
+ """
+ parts = [
+ stn(info.get("name", ""), 100),
+ itn(info.get("mode", 0) & 07777, 8, format),
+ itn(info.get("uid", 0), 8, format),
+ itn(info.get("gid", 0), 8, format),
+ itn(info.get("size", 0), 12, format),
+ itn(info.get("mtime", 0), 12, format),
+ " ", # checksum field
+ info.get("type", REGTYPE),
+ stn(info.get("linkname", ""), 100),
+ stn(info.get("magic", POSIX_MAGIC), 8),
+ stn(info.get("uname", ""), 32),
+ stn(info.get("gname", ""), 32),
+ itn(info.get("devmajor", 0), 8, format),
+ itn(info.get("devminor", 0), 8, format),
+ stn(info.get("prefix", ""), 155)
+ ]
+
+ buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
+ chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
+ buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
+ return buf
+
+ @staticmethod
+ def _create_payload(payload):
+ """Return the string payload filled with zero bytes
+ up to the next 512 byte border.
+ """
+ blocks, remainder = divmod(len(payload), BLOCKSIZE)
+ if remainder > 0:
+ payload += (BLOCKSIZE - remainder) * NUL
+ return payload
+
+ @classmethod
+ def _create_gnu_long_header(cls, name, type):
+ """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
+ for name.
+ """
+ name += NUL
+
+ info = {}
+ info["name"] = "././@LongLink"
+ info["type"] = type
+ info["size"] = len(name)
+ info["magic"] = GNU_MAGIC
+
+ # create extended header + name blocks.
+ return cls._create_header(info, USTAR_FORMAT) + \
+ cls._create_payload(name)
+
+ @classmethod
+ def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
+ """Return a POSIX.1-2001 extended or global header sequence
+ that contains a list of keyword, value pairs. The values
+ must be unicode objects.
+ """
+ records = []
+ for keyword, value in pax_headers.iteritems():
+ keyword = keyword.encode("utf8")
+ value = value.encode("utf8")
+ l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
+ n = p = 0
+ while True:
+ n = l + len(str(p))
+ if n == p:
+ break
+ p = n
+ records.append("%d %s=%s\n" % (p, keyword, value))
+ records = "".join(records)
+
+ # We use a hardcoded "././@PaxHeader" name like star does
+ # instead of the one that POSIX recommends.
+ info = {}
+ info["name"] = "././@PaxHeader"
+ info["type"] = type
+ info["size"] = len(records)
+ info["magic"] = POSIX_MAGIC
+
+ # Create pax header + record blocks.
+ return cls._create_header(info, USTAR_FORMAT) + \
+ cls._create_payload(records)
+
+ @classmethod
+ def frombuf(cls, buf):
+ """Construct a TarInfo object from a 512 byte string buffer.
+ """
+ if len(buf) == 0:
+ raise EmptyHeaderError("empty header")
+ if len(buf) != BLOCKSIZE:
+ raise TruncatedHeaderError("truncated header")
+ if buf.count(NUL) == BLOCKSIZE:
+ raise EOFHeaderError("end of file header")
+
+ chksum = nti(buf[148:156])
+ if chksum not in calc_chksums(buf):
+ raise InvalidHeaderError("bad checksum")
+
+ obj = cls()
+ obj.buf = buf
+ obj.name = nts(buf[0:100])
+ obj.mode = nti(buf[100:108])
+ obj.uid = nti(buf[108:116])
+ obj.gid = nti(buf[116:124])
+ obj.size = nti(buf[124:136])
+ obj.mtime = nti(buf[136:148])
+ obj.chksum = chksum
+ obj.type = buf[156:157]
+ obj.linkname = nts(buf[157:257])
+ obj.uname = nts(buf[265:297])
+ obj.gname = nts(buf[297:329])
+ obj.devmajor = nti(buf[329:337])
+ obj.devminor = nti(buf[337:345])
+ prefix = nts(buf[345:500])
+
+ # Old V7 tar format represents a directory as a regular
+ # file with a trailing slash.
+ if obj.type == AREGTYPE and obj.name.endswith("/"):
+ obj.type = DIRTYPE
+
+ # Remove redundant slashes from directories.
+ if obj.isdir():
+ obj.name = obj.name.rstrip("/")
+
+ # Reconstruct a ustar longname.
+ if prefix and obj.type not in GNU_TYPES:
+ obj.name = prefix + "/" + obj.name
+ return obj
+
+ @classmethod
+ def fromtarfile(cls, tarfile):
+ """Return the next TarInfo object from TarFile object
+ tarfile.
+ """
+ buf = tarfile.fileobj.read(BLOCKSIZE)
+ obj = cls.frombuf(buf)
+ obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
+ return obj._proc_member(tarfile)
+
+ #--------------------------------------------------------------------------
+ # The following are methods that are called depending on the type of a
+ # member. The entry point is _proc_member() which can be overridden in a
+ # subclass to add custom _proc_*() methods. A _proc_*() method MUST
+ # implement the following
+ # operations:
+ # 1. Set self.offset_data to the position where the data blocks begin,
+ # if there is data that follows.
+ # 2. Set tarfile.offset to the position where the next member's header will
+ # begin.
+ # 3. Return self or another valid TarInfo object.
+ def _proc_member(self, tarfile):
+ """Choose the right processing method depending on
+ the type and call it.
+ """
+ if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
+ return self._proc_gnulong(tarfile)
+ elif self.type == GNUTYPE_SPARSE:
+ return self._proc_sparse(tarfile)
+ elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
+ return self._proc_pax(tarfile)
+ else:
+ return self._proc_builtin(tarfile)
+
+ def _proc_builtin(self, tarfile):
+ """Process a builtin type or an unknown type which
+ will be treated as a regular file.
+ """
+ self.offset_data = tarfile.fileobj.tell()
+ offset = self.offset_data
+ if self.isreg() or self.type not in SUPPORTED_TYPES:
+ # Skip the following data blocks.
+ offset += self._block(self.size)
+ tarfile.offset = offset
+
+ # Patch the TarInfo object with saved global
+ # header information.
+ self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
+
+ return self
+
+ def _proc_gnulong(self, tarfile):
+ """Process the blocks that hold a GNU longname
+ or longlink member.
+ """
+ buf = tarfile.fileobj.read(self._block(self.size))
+
+ # Fetch the next header and process it.
+ try:
+ next = self.fromtarfile(tarfile)
+ except HeaderError:
+ raise SubsequentHeaderError("missing or bad subsequent header")
+
+ # Patch the TarInfo object from the next header with
+ # the longname information.
+ next.offset = self.offset
+ if self.type == GNUTYPE_LONGNAME:
+ next.name = nts(buf)
+ elif self.type == GNUTYPE_LONGLINK:
+ next.linkname = nts(buf)
+
+ return next
+
+ def _proc_sparse(self, tarfile):
+ """Process a GNU sparse header plus extra headers.
+ """
+ buf = self.buf
+ sp = _ringbuffer()
+ pos = 386
+ lastpos = 0L
+ realpos = 0L
+ # There are 4 possible sparse structs in the
+ # first header.
+ for i in xrange(4):
+ try:
+ offset = nti(buf[pos:pos + 12])
+ numbytes = nti(buf[pos + 12:pos + 24])
+ except ValueError:
+ break
+ if offset > lastpos:
+ sp.append(_hole(lastpos, offset - lastpos))
+ sp.append(_data(offset, numbytes, realpos))
+ realpos += numbytes
+ lastpos = offset + numbytes
+ pos += 24
+
+ isextended = ord(buf[482])
+ origsize = nti(buf[483:495])
+
+ # If the isextended flag is given,
+ # there are extra headers to process.
+ while isextended == 1:
+ buf = tarfile.fileobj.read(BLOCKSIZE)
+ pos = 0
+ for i in xrange(21):
+ try:
+ offset = nti(buf[pos:pos + 12])
+ numbytes = nti(buf[pos + 12:pos + 24])
+ except ValueError:
+ break
+ if offset > lastpos:
+ sp.append(_hole(lastpos, offset - lastpos))
+ sp.append(_data(offset, numbytes, realpos))
+ realpos += numbytes
+ lastpos = offset + numbytes
+ pos += 24
+ isextended = ord(buf[504])
+
+ if lastpos < origsize:
+ sp.append(_hole(lastpos, origsize - lastpos))
+
+ self.sparse = sp
+
+ self.offset_data = tarfile.fileobj.tell()
+ tarfile.offset = self.offset_data + self._block(self.size)
+ self.size = origsize
+
+ return self
+
+ def _proc_pax(self, tarfile):
+ """Process an extended or global header as described in
+ POSIX.1-2001.
+ """
+ # Read the header information.
+ buf = tarfile.fileobj.read(self._block(self.size))
+
+ # A pax header stores supplemental information for either
+ # the following file (extended) or all following files
+ # (global).
+ if self.type == XGLTYPE:
+ pax_headers = tarfile.pax_headers
+ else:
+ pax_headers = tarfile.pax_headers.copy()
+
+ # Parse pax header information. A record looks like that:
+ # "%d %s=%s\n" % (length, keyword, value). length is the size
+ # of the complete record including the length field itself and
+ # the newline. keyword and value are both UTF-8 encoded strings.
+ regex = re.compile(r"(\d+) ([^=]+)=", re.U)
+ pos = 0
+ while True:
+ match = regex.match(buf, pos)
+ if not match:
+ break
+
+ length, keyword = match.groups()
+ length = int(length)
+ value = buf[match.end(2) + 1:match.start(1) + length - 1]
+
+ keyword = keyword.decode("utf8")
+ value = value.decode("utf8")
+
+ pax_headers[keyword] = value
+ pos += length
+
+ # Fetch the next header.
+ try:
+ next = self.fromtarfile(tarfile)
+ except HeaderError:
+ raise SubsequentHeaderError("missing or bad subsequent header")
+
+ if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
+ # Patch the TarInfo object with the extended header info.
+ next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
+ next.offset = self.offset
+
+ if "size" in pax_headers:
+ # If the extended header replaces the size field,
+ # we need to recalculate the offset where the next
+ # header starts.
+ offset = next.offset_data
+ if next.isreg() or next.type not in SUPPORTED_TYPES:
+ offset += next._block(next.size)
+ tarfile.offset = offset
+
+ return next
+
+ def _apply_pax_info(self, pax_headers, encoding, errors):
+ """Replace fields with supplemental information from a previous
+ pax extended or global header.
+ """
+ for keyword, value in pax_headers.iteritems():
+ if keyword not in PAX_FIELDS:
+ continue
+
+ if keyword == "path":
+ value = value.rstrip("/")
+
+ if keyword in PAX_NUMBER_FIELDS:
+ try:
+ value = PAX_NUMBER_FIELDS[keyword](value)
+ except ValueError:
+ value = 0
+ else:
+ value = uts(value, encoding, errors)
+
+ setattr(self, keyword, value)
+
+ self.pax_headers = pax_headers.copy()
+
+ def _block(self, count):
+ """Round up a byte count by BLOCKSIZE and return it,
+ e.g. _block(834) => 1024.
+ """
+ blocks, remainder = divmod(count, BLOCKSIZE)
+ if remainder:
+ blocks += 1
+ return blocks * BLOCKSIZE
+
+ def isreg(self):
+ return self.type in REGULAR_TYPES
+ def isfile(self):
+ return self.isreg()
+ def isdir(self):
+ return self.type == DIRTYPE
+ def issym(self):
+ return self.type == SYMTYPE
+ def islnk(self):
+ return self.type == LNKTYPE
+ def ischr(self):
+ return self.type == CHRTYPE
+ def isblk(self):
+ return self.type == BLKTYPE
+ def isfifo(self):
+ return self.type == FIFOTYPE
+ def issparse(self):
+ return self.type == GNUTYPE_SPARSE
+ def isdev(self):
+ return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
+# class TarInfo
+
+class TarFile(object):
+ """The TarFile Class provides an interface to tar archives.
+ """
+
+ debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
+
+ dereference = False # If true, add content of linked file to the
+ # tar file, else the link.
+
+ ignore_zeros = False # If true, skips empty or invalid blocks and
+ # continues processing.
+
+ errorlevel = 1 # If 0, fatal errors only appear in debug
+ # messages (if debug >= 0). If > 0, errors
+ # are passed to the caller as exceptions.
+
+ format = DEFAULT_FORMAT # The format to use when creating an archive.
+
+ encoding = ENCODING # Encoding for 8-bit character strings.
+
+ errors = None # Error handler for unicode conversion.
+
+ tarinfo = TarInfo # The default TarInfo class to use.
+
+ fileobject = ExFileObject # The default ExFileObject class to use.
+
+ def __init__(self, name=None, mode="r", fileobj=None, format=None,
+ tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
+ errors=None, pax_headers=None, debug=None, errorlevel=None):
+ """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
+ read from an existing archive, 'a' to append data to an existing
+ file or 'w' to create a new file overwriting an existing one. `mode'
+ defaults to 'r'.
+ If `fileobj' is given, it is used for reading or writing data. If it
+ can be determined, `mode' is overridden by `fileobj's mode.
+ `fileobj' is not closed, when TarFile is closed.
+ """
+ if len(mode) > 1 or mode not in "raw":
+ raise ValueError("mode must be 'r', 'a' or 'w'")
+ self.mode = mode
+ self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
+
+ if not fileobj:
+ if self.mode == "a" and not os.path.exists(name):
+ # Create nonexistent files in append mode.
+ self.mode = "w"
+ self._mode = "wb"
+ fileobj = bltn_open(name, self._mode)
+ self._extfileobj = False
+ else:
+ if name is None and hasattr(fileobj, "name"):
+ name = fileobj.name
+ if hasattr(fileobj, "mode"):
+ self._mode = fileobj.mode
+ self._extfileobj = True
+ self.name = os.path.abspath(name) if name else None
+ self.fileobj = fileobj
+
+ # Init attributes.
+ if format is not None:
+ self.format = format
+ if tarinfo is not None:
+ self.tarinfo = tarinfo
+ if dereference is not None:
+ self.dereference = dereference
+ if ignore_zeros is not None:
+ self.ignore_zeros = ignore_zeros
+ if encoding is not None:
+ self.encoding = encoding
+
+ if errors is not None:
+ self.errors = errors
+ elif mode == "r":
+ self.errors = "utf-8"
+ else:
+ self.errors = "strict"
+
+ if pax_headers is not None and self.format == PAX_FORMAT:
+ self.pax_headers = pax_headers
+ else:
+ self.pax_headers = {}
+
+ if debug is not None:
+ self.debug = debug
+ if errorlevel is not None:
+ self.errorlevel = errorlevel
+
+ # Init datastructures.
+ self.closed = False
+ self.members = [] # list of members as TarInfo objects
+ self._loaded = False # flag if all members have been read
+ self.offset = self.fileobj.tell()
+ # current position in the archive file
+ self.inodes = {} # dictionary caching the inodes of
+ # archive members already added
+
+ try:
+ if self.mode == "r":
+ self.firstmember = None
+ self.firstmember = self.next()
+
+ if self.mode == "a":
+ # Move to the end of the archive,
+ # before the first empty block.
+ while True:
+ self.fileobj.seek(self.offset)
+ try:
+ tarinfo = self.tarinfo.fromtarfile(self)
+ self.members.append(tarinfo)
+ except EOFHeaderError:
+ self.fileobj.seek(self.offset)
+ break
+ except HeaderError, e:
+ raise ReadError(str(e))
+
+ if self.mode in "aw":
+ self._loaded = True
+
+ if self.pax_headers:
+ buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
+ self.fileobj.write(buf)
+ self.offset += len(buf)
+ except:
+ if not self._extfileobj:
+ self.fileobj.close()
+ self.closed = True
+ raise
+
+ def _getposix(self):
+ return self.format == USTAR_FORMAT
+ def _setposix(self, value):
+ import warnings
+ warnings.warn("use the format attribute instead", DeprecationWarning,
+ 2)
+ if value:
+ self.format = USTAR_FORMAT
+ else:
+ self.format = GNU_FORMAT
+ posix = property(_getposix, _setposix)
+
+ #--------------------------------------------------------------------------
+ # Below are the classmethods which act as alternate constructors to the
+ # TarFile class. The open() method is the only one that is needed for
+ # public use; it is the "super"-constructor and is able to select an
+ # adequate "sub"-constructor for a particular compression using the mapping
+ # from OPEN_METH.
+ #
+ # This concept allows one to subclass TarFile without losing the comfort of
+ # the super-constructor. A sub-constructor is registered and made available
+ # by adding it to the mapping in OPEN_METH.
+
+ @classmethod
+ def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
+ """Open a tar archive for reading, writing or appending. Return
+ an appropriate TarFile class.
+
+ mode:
+ 'r' or 'r:*' open for reading with transparent compression
+ 'r:' open for reading exclusively uncompressed
+ 'r:gz' open for reading with gzip compression
+ 'r:bz2' open for reading with bzip2 compression
+ 'a' or 'a:' open for appending, creating the file if necessary
+ 'w' or 'w:' open for writing without compression
+ 'w:gz' open for writing with gzip compression
+ 'w:bz2' open for writing with bzip2 compression
+
+ 'r|*' open a stream of tar blocks with transparent compression
+ 'r|' open an uncompressed stream of tar blocks for reading
+ 'r|gz' open a gzip compressed stream of tar blocks
+ 'r|bz2' open a bzip2 compressed stream of tar blocks
+ 'w|' open an uncompressed stream for writing
+ 'w|gz' open a gzip compressed stream for writing
+ 'w|bz2' open a bzip2 compressed stream for writing
+ """
+
+ if not name and not fileobj:
+ raise ValueError("nothing to open")
+
+ if mode in ("r", "r:*"):
+ # Find out which *open() is appropriate for opening the file.
+ for comptype in cls.OPEN_METH:
+ func = getattr(cls, cls.OPEN_METH[comptype])
+ if fileobj is not None:
+ saved_pos = fileobj.tell()
+ try:
+ return func(name, "r", fileobj, **kwargs)
+ except (ReadError, CompressionError), e:
+ if fileobj is not None:
+ fileobj.seek(saved_pos)
+ continue
+ raise ReadError("file could not be opened successfully")
+
+ elif ":" in mode:
+ filemode, comptype = mode.split(":", 1)
+ filemode = filemode or "r"
+ comptype = comptype or "tar"
+
+ # Select the *open() function according to
+ # given compression.
+ if comptype in cls.OPEN_METH:
+ func = getattr(cls, cls.OPEN_METH[comptype])
+ else:
+ raise CompressionError("unknown compression type %r" % comptype)
+ return func(name, filemode, fileobj, **kwargs)
+
+ elif "|" in mode:
+ filemode, comptype = mode.split("|", 1)
+ filemode = filemode or "r"
+ comptype = comptype or "tar"
+
+ if filemode not in "rw":
+ raise ValueError("mode must be 'r' or 'w'")
+
+ t = cls(name, filemode,
+ _Stream(name, filemode, comptype, fileobj, bufsize),
+ **kwargs)
+ t._extfileobj = False
+ return t
+
+ elif mode in "aw":
+ return cls.taropen(name, mode, fileobj, **kwargs)
+
+ raise ValueError("undiscernible mode")
+
+ @classmethod
+ def taropen(cls, name, mode="r", fileobj=None, **kwargs):
+ """Open uncompressed tar archive name for reading or writing.
+ """
+ if len(mode) > 1 or mode not in "raw":
+ raise ValueError("mode must be 'r', 'a' or 'w'")
+ return cls(name, mode, fileobj, **kwargs)
+
+ @classmethod
+ def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
+ """Open gzip compressed tar archive name for reading or writing.
+ Appending is not allowed.
+ """
+ if len(mode) > 1 or mode not in "rw":
+ raise ValueError("mode must be 'r' or 'w'")
+
+ try:
+ import gzip
+ gzip.GzipFile
+ except (ImportError, AttributeError):
+ raise CompressionError("gzip module is not available")
+
+ if fileobj is None:
+ fileobj = bltn_open(name, mode + "b")
+
+ try:
+ t = cls.taropen(name, mode,
+ gzip.GzipFile(name, mode, compresslevel, fileobj),
+ **kwargs)
+ except IOError:
+ raise ReadError("not a gzip file")
+ t._extfileobj = False
+ return t
+
+ @classmethod
+ def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
+ """Open bzip2 compressed tar archive name for reading or writing.
+ Appending is not allowed.
+ """
+ if len(mode) > 1 or mode not in "rw":
+ raise ValueError("mode must be 'r' or 'w'.")
+
+ try:
+ import bz2
+ except ImportError:
+ raise CompressionError("bz2 module is not available")
+
+ if fileobj is not None:
+ fileobj = _BZ2Proxy(fileobj, mode)
+ else:
+ fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
+
+ try:
+ t = cls.taropen(name, mode, fileobj, **kwargs)
+ except (IOError, EOFError):
+ raise ReadError("not a bzip2 file")
+ t._extfileobj = False
+ return t
+
+ # All *open() methods are registered here.
+ OPEN_METH = {
+ "tar": "taropen", # uncompressed tar
+ "gz": "gzopen", # gzip compressed tar
+ "bz2": "bz2open" # bzip2 compressed tar
+ }
+
+ #--------------------------------------------------------------------------
+ # The public methods which TarFile provides:
+
+ def close(self):
+ """Close the TarFile. In write-mode, two finishing zero blocks are
+ appended to the archive.
+ """
+ if self.closed:
+ return
+
+ if self.mode in "aw":
+ self.fileobj.write(NUL * (BLOCKSIZE * 2))
+ self.offset += (BLOCKSIZE * 2)
+ # fill up the end with zero-blocks
+ # (like option -b20 for tar does)
+ blocks, remainder = divmod(self.offset, RECORDSIZE)
+ if remainder > 0:
+ self.fileobj.write(NUL * (RECORDSIZE - remainder))
+
+ if not self._extfileobj:
+ self.fileobj.close()
+ self.closed = True
+
+ def getmember(self, name):
+ """Return a TarInfo object for member `name'. If `name' can not be
+ found in the archive, KeyError is raised. If a member occurs more
+ than once in the archive, its last occurrence is assumed to be the
+ most up-to-date version.
+ """
+ tarinfo = self._getmember(name)
+ if tarinfo is None:
+ raise KeyError("filename %r not found" % name)
+ return tarinfo
+
+ def getmembers(self):
+ """Return the members of the archive as a list of TarInfo objects. The
+ list has the same order as the members in the archive.
+ """
+ self._check()
+ if not self._loaded: # if we want to obtain a list of
+ self._load() # all members, we first have to
+ # scan the whole archive.
+ return self.members
+
+ def getnames(self):
+ """Return the members of the archive as a list of their names. It has
+ the same order as the list returned by getmembers().
+ """
+ return [tarinfo.name for tarinfo in self.getmembers()]
+
+ def gettarinfo(self, name=None, arcname=None, fileobj=None):
+ """Create a TarInfo object for either the file `name' or the file
+ object `fileobj' (using os.fstat on its file descriptor). You can
+ modify some of the TarInfo's attributes before you add it using
+ addfile(). If given, `arcname' specifies an alternative name for the
+ file in the archive.
+ """
+ self._check("aw")
+
+ # When fileobj is given, replace name by
+ # fileobj's real name.
+ if fileobj is not None:
+ name = fileobj.name
+
+ # Building the name of the member in the archive.
+ # Backward slashes are converted to forward slashes,
+ # Absolute paths are turned to relative paths.
+ if arcname is None:
+ arcname = name
+ drv, arcname = os.path.splitdrive(arcname)
+ arcname = arcname.replace(os.sep, "/")
+ arcname = arcname.lstrip("/")
+
+ # Now, fill the TarInfo object with
+ # information specific for the file.
+ tarinfo = self.tarinfo()
+ tarinfo.tarfile = self
+
+ # Use os.stat or os.lstat, depending on platform
+ # and if symlinks shall be resolved.
+ if fileobj is None:
+ if hasattr(os, "lstat") and not self.dereference:
+ statres = os.lstat(name)
+ else:
+ statres = os.stat(name)
+ else:
+ statres = os.fstat(fileobj.fileno())
+ linkname = ""
+
+ stmd = statres.st_mode
+ if stat.S_ISREG(stmd):
+ inode = (statres.st_ino, statres.st_dev)
+ if not self.dereference and statres.st_nlink > 1 and \
+ inode in self.inodes and arcname != self.inodes[inode]:
+ # Is it a hardlink to an already
+ # archived file?
+ type = LNKTYPE
+ linkname = self.inodes[inode]
+ else:
+ # The inode is added only if its valid.
+ # For win32 it is always 0.
+ type = REGTYPE
+ if inode[0]:
+ self.inodes[inode] = arcname
+ elif stat.S_ISDIR(stmd):
+ type = DIRTYPE
+ elif stat.S_ISFIFO(stmd):
+ type = FIFOTYPE
+ elif stat.S_ISLNK(stmd):
+ type = SYMTYPE
+ linkname = os.readlink(name)
+ elif stat.S_ISCHR(stmd):
+ type = CHRTYPE
+ elif stat.S_ISBLK(stmd):
+ type = BLKTYPE
+ else:
+ return None
+
+ # Fill the TarInfo object with all
+ # information we can get.
+ tarinfo.name = arcname
+ tarinfo.mode = stmd
+ tarinfo.uid = statres.st_uid
+ tarinfo.gid = statres.st_gid
+ if type == REGTYPE:
+ tarinfo.size = statres.st_size
+ else:
+ tarinfo.size = 0L
+ tarinfo.mtime = statres.st_mtime
+ tarinfo.type = type
+ tarinfo.linkname = linkname
+ if pwd:
+ try:
+ tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
+ except KeyError:
+ pass
+ if grp:
+ try:
+ tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
+ except KeyError:
+ pass
+
+ if type in (CHRTYPE, BLKTYPE):
+ if hasattr(os, "major") and hasattr(os, "minor"):
+ tarinfo.devmajor = os.major(statres.st_rdev)
+ tarinfo.devminor = os.minor(statres.st_rdev)
+ return tarinfo
+
+ def list(self, verbose=True):
+ """Print a table of contents to sys.stdout. If `verbose' is False, only
+ the names of the members are printed. If it is True, an `ls -l'-like
+ output is produced.
+ """
+ self._check()
+
+ for tarinfo in self:
+ if verbose:
+ print filemode(tarinfo.mode),
+ print "%s/%s" % (tarinfo.uname or tarinfo.uid,
+ tarinfo.gname or tarinfo.gid),
+ if tarinfo.ischr() or tarinfo.isblk():
+ print "%10s" % ("%d,%d" \
+ % (tarinfo.devmajor, tarinfo.devminor)),
+ else:
+ print "%10d" % tarinfo.size,
+ print "%d-%02d-%02d %02d:%02d:%02d" \
+ % time.localtime(tarinfo.mtime)[:6],
+
+ print tarinfo.name + ("/" if tarinfo.isdir() else ""),
+
+ if verbose:
+ if tarinfo.issym():
+ print "->", tarinfo.linkname,
+ if tarinfo.islnk():
+ print "link to", tarinfo.linkname,
+ print
+
+ def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
+ """Add the file `name' to the archive. `name' may be any type of file
+ (directory, fifo, symbolic link, etc.). If given, `arcname'
+ specifies an alternative name for the file in the archive.
+ Directories are added recursively by default. This can be avoided by
+ setting `recursive' to False. `exclude' is a function that should
+ return True for each filename to be excluded. `filter' is a function
+ that expects a TarInfo object argument and returns the changed
+ TarInfo object, if it returns None the TarInfo object will be
+ excluded from the archive.
+ """
+ self._check("aw")
+
+ if arcname is None:
+ arcname = name
+
+ # Exclude pathnames.
+ if exclude is not None:
+ import warnings
+ warnings.warn("use the filter argument instead",
+ DeprecationWarning, 2)
+ if exclude(name):
+ self._dbg(2, "tarfile: Excluded %r" % name)
+ return
+
+ # Skip if somebody tries to archive the archive...
+ if self.name is not None and os.path.abspath(name) == self.name:
+ self._dbg(2, "tarfile: Skipped %r" % name)
+ return
+
+ self._dbg(1, name)
+
+ # Create a TarInfo object from the file.
+ tarinfo = self.gettarinfo(name, arcname)
+
+ if tarinfo is None:
+ self._dbg(1, "tarfile: Unsupported type %r" % name)
+ return
+
+ # Change or exclude the TarInfo object.
+ if filter is not None:
+ tarinfo = filter(tarinfo)
+ if tarinfo is None:
+ self._dbg(2, "tarfile: Excluded %r" % name)
+ return
+
+ # Append the tar header and data to the archive.
+ if tarinfo.isreg():
+ f = bltn_open(name, "rb")
+ self.addfile(tarinfo, f)
+ f.close()
+
+ elif tarinfo.isdir():
+ self.addfile(tarinfo)
+ if recursive:
+ for f in os.listdir(name):
+ self.add(os.path.join(name, f), os.path.join(arcname, f),
+ recursive, exclude, filter)
+
+ else:
+ self.addfile(tarinfo)
+
+ def addfile(self, tarinfo, fileobj=None):
+ """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
+ given, tarinfo.size bytes are read from it and added to the archive.
+ You can create TarInfo objects using gettarinfo().
+ On Windows platforms, `fileobj' should always be opened with mode
+ 'rb' to avoid irritation about the file size.
+ """
+ self._check("aw")
+
+ tarinfo = copy.copy(tarinfo)
+
+ buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
+ self.fileobj.write(buf)
+ self.offset += len(buf)
+
+ # If there's data to follow, append it.
+ if fileobj is not None:
+ copyfileobj(fileobj, self.fileobj, tarinfo.size)
+ blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
+ if remainder > 0:
+ self.fileobj.write(NUL * (BLOCKSIZE - remainder))
+ blocks += 1
+ self.offset += blocks * BLOCKSIZE
+
+ self.members.append(tarinfo)
+
+ def extractall(self, path=".", members=None):
+ """Extract all members from the archive to the current working
+ directory and set owner, modification time and permissions on
+ directories afterwards. `path' specifies a different directory
+ to extract to. `members' is optional and must be a subset of the
+ list returned by getmembers().
+ """
+ directories = []
+
+ if members is None:
+ members = self
+
+ for tarinfo in members:
+ if tarinfo.isdir():
+ # Extract directories with a safe mode.
+ directories.append(tarinfo)
+ tarinfo = copy.copy(tarinfo)
+ tarinfo.mode = 0700
+ self.extract(tarinfo, path)
+
+ # Reverse sort directories.
+ directories.sort(key=operator.attrgetter('name'))
+ directories.reverse()
+
+ # Set correct owner, mtime and filemode on directories.
+ for tarinfo in directories:
+ dirpath = os.path.join(path, tarinfo.name)
+ try:
+ self.chown(tarinfo, dirpath)
+ self.utime(tarinfo, dirpath)
+ self.chmod(tarinfo, dirpath)
+ except ExtractError, e:
+ if self.errorlevel > 1:
+ raise
+ else:
+ self._dbg(1, "tarfile: %s" % e)
+
+ def extract(self, member, path=""):
+ """Extract a member from the archive to the current working directory,
+ using its full name. Its file information is extracted as accurately
+ as possible. `member' may be a filename or a TarInfo object. You can
+ specify a different directory using `path'.
+ """
+ self._check("r")
+
+ if isinstance(member, basestring):
+ tarinfo = self.getmember(member)
+ else:
+ tarinfo = member
+
+ # Prepare the link target for makelink().
+ if tarinfo.islnk():
+ tarinfo._link_target = os.path.join(path, tarinfo.linkname)
+
+ try:
+ self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
+ except EnvironmentError, e:
+ if self.errorlevel > 0:
+ raise
+ else:
+ if e.filename is None:
+ self._dbg(1, "tarfile: %s" % e.strerror)
+ else:
+ self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
+ except ExtractError, e:
+ if self.errorlevel > 1:
+ raise
+ else:
+ self._dbg(1, "tarfile: %s" % e)
+
+ def extractfile(self, member):
+ """Extract a member from the archive as a file object. `member' may be
+ a filename or a TarInfo object. If `member' is a regular file, a
+ file-like object is returned. If `member' is a link, a file-like
+ object is constructed from the link's target. If `member' is none of
+ the above, None is returned.
+ The file-like object is read-only and provides the following
+ methods: read(), readline(), readlines(), seek() and tell()
+ """
+ self._check("r")
+
+ if isinstance(member, basestring):
+ tarinfo = self.getmember(member)
+ else:
+ tarinfo = member
+
+ if tarinfo.isreg():
+ return self.fileobject(self, tarinfo)
+
+ elif tarinfo.type not in SUPPORTED_TYPES:
+ # If a member's type is unknown, it is treated as a
+ # regular file.
+ return self.fileobject(self, tarinfo)
+
+ elif tarinfo.islnk() or tarinfo.issym():
+ if isinstance(self.fileobj, _Stream):
+ # A small but ugly workaround for the case that someone tries
+ # to extract a (sym)link as a file-object from a non-seekable
+ # stream of tar blocks.
+ raise StreamError("cannot extract (sym)link as file object")
+ else:
+ # A (sym)link's file object is its target's file object.
+ return self.extractfile(self._find_link_target(tarinfo))
+ else:
+ # If there's no data associated with the member (directory, chrdev,
+ # blkdev, etc.), return None instead of a file object.
+ return None
+
+ def _extract_member(self, tarinfo, targetpath):
+ """Extract the TarInfo object tarinfo to a physical
+ file called targetpath.
+ """
+ # Fetch the TarInfo object for the given name
+ # and build the destination pathname, replacing
+ # forward slashes to platform specific separators.
+ targetpath = targetpath.rstrip("/")
+ targetpath = targetpath.replace("/", os.sep)
+
+ # Create all upper directories.
+ upperdirs = os.path.dirname(targetpath)
+ if upperdirs and not os.path.exists(upperdirs):
+ # Create directories that are not part of the archive with
+ # default permissions.
+ os.makedirs(upperdirs)
+
+ if tarinfo.islnk() or tarinfo.issym():
+ self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
+ else:
+ self._dbg(1, tarinfo.name)
+
+ if tarinfo.isreg():
+ self.makefile(tarinfo, targetpath)
+ elif tarinfo.isdir():
+ self.makedir(tarinfo, targetpath)
+ elif tarinfo.isfifo():
+ self.makefifo(tarinfo, targetpath)
+ elif tarinfo.ischr() or tarinfo.isblk():
+ self.makedev(tarinfo, targetpath)
+ elif tarinfo.islnk() or tarinfo.issym():
+ self.makelink(tarinfo, targetpath)
+ elif tarinfo.type not in SUPPORTED_TYPES:
+ self.makeunknown(tarinfo, targetpath)
+ else:
+ self.makefile(tarinfo, targetpath)
+
+ self.chown(tarinfo, targetpath)
+ if not tarinfo.issym():
+ self.chmod(tarinfo, targetpath)
+ self.utime(tarinfo, targetpath)
+
+ #--------------------------------------------------------------------------
+ # Below are the different file methods. They are called via
+ # _extract_member() when extract() is called. They can be replaced in a
+ # subclass to implement other functionality.
+
+ def makedir(self, tarinfo, targetpath):
+ """Make a directory called targetpath.
+ """
+ try:
+ # Use a safe mode for the directory, the real mode is set
+ # later in _extract_member().
+ os.mkdir(targetpath, 0700)
+ except EnvironmentError, e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ def makefile(self, tarinfo, targetpath):
+ """Make a file called targetpath.
+ """
+ source = self.extractfile(tarinfo)
+ target = bltn_open(targetpath, "wb")
+ copyfileobj(source, target)
+ source.close()
+ target.close()
+
+ def makeunknown(self, tarinfo, targetpath):
+ """Make a file from a TarInfo object with an unknown type
+ at targetpath.
+ """
+ self.makefile(tarinfo, targetpath)
+ self._dbg(1, "tarfile: Unknown file type %r, " \
+ "extracted as regular file." % tarinfo.type)
+
+ def makefifo(self, tarinfo, targetpath):
+ """Make a fifo called targetpath.
+ """
+ if hasattr(os, "mkfifo"):
+ os.mkfifo(targetpath)
+ else:
+ raise ExtractError("fifo not supported by system")
+
+ def makedev(self, tarinfo, targetpath):
+ """Make a character or block device called targetpath.
+ """
+ if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
+ raise ExtractError("special devices not supported by system")
+
+ mode = tarinfo.mode
+ if tarinfo.isblk():
+ mode |= stat.S_IFBLK
+ else:
+ mode |= stat.S_IFCHR
+
+ os.mknod(targetpath, mode,
+ os.makedev(tarinfo.devmajor, tarinfo.devminor))
+
+ def makelink(self, tarinfo, targetpath):
+ """Make a (symbolic) link called targetpath. If it cannot be created
+ (platform limitation), we try to make a copy of the referenced file
+ instead of a link.
+ """
+ if hasattr(os, "symlink") and hasattr(os, "link"):
+ # For systems that support symbolic and hard links.
+ if tarinfo.issym():
+ if os.path.lexists(targetpath):
+ os.unlink(targetpath)
+ os.symlink(tarinfo.linkname, targetpath)
+ else:
+ # See extract().
+ if os.path.exists(tarinfo._link_target):
+ if os.path.lexists(targetpath):
+ os.unlink(targetpath)
+ os.link(tarinfo._link_target, targetpath)
+ else:
+ self._extract_member(self._find_link_target(tarinfo), targetpath)
+ else:
+ try:
+ self._extract_member(self._find_link_target(tarinfo), targetpath)
+ except KeyError:
+ raise ExtractError("unable to resolve link inside archive")
+
+ def chown(self, tarinfo, targetpath):
+ """Set owner of targetpath according to tarinfo.
+ """
+ if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
+ # We have to be root to do so.
+ try:
+ g = grp.getgrnam(tarinfo.gname)[2]
+ except KeyError:
+ try:
+ g = grp.getgrgid(tarinfo.gid)[2]
+ except KeyError:
+ g = os.getgid()
+ try:
+ u = pwd.getpwnam(tarinfo.uname)[2]
+ except KeyError:
+ try:
+ u = pwd.getpwuid(tarinfo.uid)[2]
+ except KeyError:
+ u = os.getuid()
+ try:
+ if tarinfo.issym() and hasattr(os, "lchown"):
+ os.lchown(targetpath, u, g)
+ else:
+ if sys.platform != "os2emx":
+ os.chown(targetpath, u, g)
+ except EnvironmentError, e:
+ raise ExtractError("could not change owner")
+
+ def chmod(self, tarinfo, targetpath):
+ """Set file permissions of targetpath according to tarinfo.
+ """
+ if hasattr(os, 'chmod'):
+ try:
+ os.chmod(targetpath, tarinfo.mode)
+ except EnvironmentError, e:
+ raise ExtractError("could not change mode")
+
+ def utime(self, tarinfo, targetpath):
+ """Set modification time of targetpath according to tarinfo.
+ """
+ if not hasattr(os, 'utime'):
+ return
+ try:
+ os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
+ except EnvironmentError, e:
+ raise ExtractError("could not change modification time")
+
+ #--------------------------------------------------------------------------
+ def next(self):
+ """Return the next member of the archive as a TarInfo object, when
+ TarFile is opened for reading. Return None if there is no more
+ available.
+ """
+ self._check("ra")
+ if self.firstmember is not None:
+ m = self.firstmember
+ self.firstmember = None
+ return m
+
+ # Read the next block.
+ self.fileobj.seek(self.offset)
+ tarinfo = None
+ while True:
+ try:
+ tarinfo = self.tarinfo.fromtarfile(self)
+ except EOFHeaderError, e:
+ if self.ignore_zeros:
+ self._dbg(2, "0x%X: %s" % (self.offset, e))
+ self.offset += BLOCKSIZE
+ continue
+ except InvalidHeaderError, e:
+ if self.ignore_zeros:
+ self._dbg(2, "0x%X: %s" % (self.offset, e))
+ self.offset += BLOCKSIZE
+ continue
+ elif self.offset == 0:
+ raise ReadError(str(e))
+ except EmptyHeaderError:
+ if self.offset == 0:
+ raise ReadError("empty file")
+ except TruncatedHeaderError, e:
+ if self.offset == 0:
+ raise ReadError(str(e))
+ except SubsequentHeaderError, e:
+ raise ReadError(str(e))
+ break
+
+ if tarinfo is not None:
+ self.members.append(tarinfo)
+ else:
+ self._loaded = True
+
+ return tarinfo
+
+ #--------------------------------------------------------------------------
+ # Little helper methods:
+
+ def _getmember(self, name, tarinfo=None, normalize=False):
+ """Find an archive member by name from bottom to top.
+ If tarinfo is given, it is used as the starting point.
+ """
+ # Ensure that all members have been loaded.
+ members = self.getmembers()
+
+ # Limit the member search list up to tarinfo.
+ if tarinfo is not None:
+ members = members[:members.index(tarinfo)]
+
+ if normalize:
+ name = os.path.normpath(name)
+
+ for member in reversed(members):
+ if normalize:
+ member_name = os.path.normpath(member.name)
+ else:
+ member_name = member.name
+
+ if name == member_name:
+ return member
+
+ def _load(self):
+ """Read through the entire archive file and look for readable
+ members.
+ """
+ while True:
+ tarinfo = self.next()
+ if tarinfo is None:
+ break
+ self._loaded = True
+
+ def _check(self, mode=None):
+ """Check if TarFile is still open, and if the operation's mode
+ corresponds to TarFile's mode.
+ """
+ if self.closed:
+ raise IOError("%s is closed" % self.__class__.__name__)
+ if mode is not None and self.mode not in mode:
+ raise IOError("bad operation for mode %r" % self.mode)
+
+ def _find_link_target(self, tarinfo):
+ """Find the target member of a symlink or hardlink member in the
+ archive.
+ """
+ if tarinfo.issym():
+ # Always search the entire archive.
+ linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
+ limit = None
+ else:
+ # Search the archive before the link, because a hard link is
+ # just a reference to an already archived file.
+ linkname = tarinfo.linkname
+ limit = tarinfo
+
+ member = self._getmember(linkname, tarinfo=limit, normalize=True)
+ if member is None:
+ raise KeyError("linkname %r not found" % linkname)
+ return member
+
+ def __iter__(self):
+ """Provide an iterator object.
+ """
+ if self._loaded:
+ return iter(self.members)
+ else:
+ return TarIter(self)
+
+ def _dbg(self, level, msg):
+ """Write debugging output to sys.stderr.
+ """
+ if level <= self.debug:
+ print >> sys.stderr, msg
+
+ def __enter__(self):
+ self._check()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if type is None:
+ self.close()
+ else:
+ # An exception occurred. We must not call close() because
+ # it would try to write end-of-archive blocks and padding.
+ if not self._extfileobj:
+ self.fileobj.close()
+ self.closed = True
+# class TarFile
+
+class TarIter:
+ """Iterator Class.
+
+ for tarinfo in TarFile(...):
+ suite...
+ """
+
+ def __init__(self, tarfile):
+ """Construct a TarIter object.
+ """
+ self.tarfile = tarfile
+ self.index = 0
+ def __iter__(self):
+ """Return iterator object.
+ """
+ return self
+ def next(self):
+ """Return the next item using TarFile's next() method.
+ When all members have been read, set TarFile as _loaded.
+ """
+ # Fix for SF #1100429: Under rare circumstances it can
+ # happen that getmembers() is called during iteration,
+ # which will cause TarIter to stop prematurely.
+ if not self.tarfile._loaded:
+ tarinfo = self.tarfile.next()
+ if not tarinfo:
+ self.tarfile._loaded = True
+ raise StopIteration
+ else:
+ try:
+ tarinfo = self.tarfile.members[self.index]
+ except IndexError:
+ raise StopIteration
+ self.index += 1
+ return tarinfo
+
+# Helper classes for sparse file support
+class _section:
+ """Base class for _data and _hole.
+ """
+ def __init__(self, offset, size):
+ self.offset = offset
+ self.size = size
+ def __contains__(self, offset):
+ return self.offset <= offset < self.offset + self.size
+
+class _data(_section):
+ """Represent a data section in a sparse file.
+ """
+ def __init__(self, offset, size, realpos):
+ _section.__init__(self, offset, size)
+ self.realpos = realpos
+
+class _hole(_section):
+ """Represent a hole section in a sparse file.
+ """
+ pass
+
+class _ringbuffer(list):
+ """Ringbuffer class which increases performance
+ over a regular list.
+ """
+ def __init__(self):
+ self.idx = 0
+ def find(self, offset):
+ idx = self.idx
+ while True:
+ item = self[idx]
+ if offset in item:
+ break
+ idx += 1
+ if idx == len(self):
+ idx = 0
+ if idx == self.idx:
+ # End of File
+ return None
+ self.idx = idx
+ return item
+
+#---------------------------------------------
+# zipfile compatible TarFile class
+#---------------------------------------------
+TAR_PLAIN = 0 # zipfile.ZIP_STORED
+TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
+class TarFileCompat:
+ """TarFile class compatible with standard module zipfile's
+ ZipFile class.
+ """
+ def __init__(self, file, mode="r", compression=TAR_PLAIN):
+ from warnings import warnpy3k
+ warnpy3k("the TarFileCompat class has been removed in Python 3.0",
+ stacklevel=2)
+ if compression == TAR_PLAIN:
+ self.tarfile = TarFile.taropen(file, mode)
+ elif compression == TAR_GZIPPED:
+ self.tarfile = TarFile.gzopen(file, mode)
+ else:
+ raise ValueError("unknown compression constant")
+ if mode[0:1] == "r":
+ members = self.tarfile.getmembers()
+ for m in members:
+ m.filename = m.name
+ m.file_size = m.size
+ m.date_time = time.gmtime(m.mtime)[:6]
+ def namelist(self):
+ return map(lambda m: m.name, self.infolist())
+ def infolist(self):
+ return filter(lambda m: m.type in REGULAR_TYPES,
+ self.tarfile.getmembers())
+ def printdir(self):
+ self.tarfile.list()
+ def testzip(self):
+ return
+ def getinfo(self, name):
+ return self.tarfile.getmember(name)
+ def read(self, name):
+ return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
+ def write(self, filename, arcname=None, compress_type=None):
+ self.tarfile.add(filename, arcname)
+ def writestr(self, zinfo, bytes):
+ try:
+ from cStringIO import StringIO
+ except ImportError:
+ from StringIO import StringIO
+ import calendar
+ tinfo = TarInfo(zinfo.filename)
+ tinfo.size = len(bytes)
+ tinfo.mtime = calendar.timegm(zinfo.date_time)
+ self.tarfile.addfile(tinfo, StringIO(bytes))
+ def close(self):
+ self.tarfile.close()
+#class TarFileCompat
+
+#--------------------
+# exported functions
+#--------------------
+def is_tarfile(name):
+ """Return True if name points to a tar archive that we
+ are able to handle, else return False.
+ """
+ try:
+ t = open(name)
+ t.close()
+ return True
+ except TarError:
+ return False
+
+bltn_open = open
+open = TarFile.open
diff --git a/mat-cli b/mat-cli
deleted file mode 100755
index 1058d46..0000000
--- a/mat-cli
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/usr/bin/env python
-'''
- Metadata anonymisation toolkit - CLI edition
-'''
-
-import sys
-import xml.sax
-import optparse
-import os
-
-import hachoir_core
-
-from mat import mat
-
-
-def parse():
- '''
- Get, and parse options passed to the program
- '''
- parser = optparse.OptionParser(usage='%prog [options] files\n\
-The default behaviour is to clean files given in argument')
- options = optparse.OptionGroup(parser, 'Options')
- options.add_option('--add2archive', '-a', action='store_true',
- default=False, help='Add to output archive non-supported filetypes')
- options.add_option('--backup', '-b', action='store_true', default=False,
- help='Keep a backup copy')
- options.add_option('--force', '-f', action='store_true', default=False,
- help='Don\'t check if files are clean before cleaning')
- options.add_option('--strict', '-u', action='store_true', default=False,
- help='Strict cleaning mode : loss can occur')
-
- info = optparse.OptionGroup(parser, 'Informations')
- info.add_option('--check', '-c', action='store_true', default=False,
- help='Check if a file is free of harmful metadatas')
- info.add_option('--display', '-d', action='store_true', default=False,
- help='List all the harmful metadata of a file without removing them')
- info.add_option('--list', '-l', action='store_true', default=False,
- help='List all supported fileformat')
- info.add_option('--version', '-v', action='callback',
- callback=display_version, help='Display version and exit')
- parser.add_option_group(options)
- parser.add_option_group(info)
-
- values, arguments = parser.parse_args()
- if not arguments and values.list is False:
- # if no argument and no files are passed,
- # print help and exit
- parser.print_help()
- sys.exit(0)
- return values, arguments
-
-
-def display_version(*_):
- '''
- Display the program's version, and exit
- '''
- print('Metadata Anonymisation Toolkit version %s') % mat.__version__
- print('Hachoir version %s') % hachoir_core.__version__
- sys.exit(0)
-
-
-def list_meta(class_file, filename, force):
- '''
- Print all the metadata of 'filename' on stdout
- '''
- print('[+] File %s :' % filename)
- if force is False and class_file.is_clean():
- print('No harmful metadata found')
- else:
- meta = class_file.get_meta()
- print ('Harmful metadata found:')
- if meta is not None:
- for key, value in class_file.get_meta().iteritems():
- print('\t' + key + ' : ' + str(value))
-
-
-def is_clean(class_file, filename, force):
- '''
- Say if 'filename' is clean or not
- '''
- if class_file.is_clean():
- print('[+] %s is clean' % filename)
- else:
- print('[+] %s is not clean' % filename)
-
-
-def clean_meta(class_file, filename, force):
- '''
- Clean the file 'filename'
- '''
- print('[+] Cleaning %s' % filename)
- if force is False and class_file.is_clean():
- print('%s is already clean' % filename)
- else:
- if class_file.remove_all():
- print('%s cleaned !' % filename)
- else:
- print('Unable to clean %s', filename)
-
-def clean_meta_strict(class_file, filename, force):
- '''
- Clean the file 'filename', strict way
- '''
- print('[+] Cleaning %s' % filename)
- if force is False and class_file.is_clean():
- print('%s is already clean' % filename)
- else:
- class_file.remove_all_strict()
- print('%s cleaned' % filename)
-
-
-def list_supported():
- '''
- Print all supported fileformat, and exit
- '''
- handler = mat.XMLParser()
- parser = xml.sax.make_parser()
- parser.setContentHandler(handler)
- path = os.path.join(mat.get_sharedir(), 'FORMATS')
- with open(path, 'r') as xmlfile:
- parser.parse(xmlfile)
-
- for item in handler.list:
- print('%s (%s)' % (item['name'], item['extension']))
- print('\tsupport : ' + item['support'])
- print('\tmetadata : ' + item['metadata'])
- print('\tmethod : ' + item['method'])
- if item['support'] == 'partial':
- print('\tremaining : ' + item['remaining'])
- print('\n')
- sys.exit(0)
-
-
-def main():
- '''
- main function : get args, and launch the appropriate function
- '''
- args, filenames = parse()
-
- #func receive the function correponding to the options given as parameters
- if args.display is True: # only print metadatas
- func = list_meta
- elif args.check is True: # only check if the file is clean
- func = is_clean
- elif args.strict is True: # destructive anonymisation method
- func = clean_meta_strict
- elif args.list is True: # print the list of all supported format
- list_supported()
- else: # clean the file
- func = clean_meta
-
- for filename in filenames:
- class_file = mat.create_class_file(filename, args.backup,
- args.add2archive)
- if class_file is not None:
- func(class_file, filename, args.force)
- else:
- print('Unable to process %s' % filename)
-
-if __name__ == '__main__':
- main()
diff --git a/mat-cli.1 b/mat-cli.1
deleted file mode 100644
index 48ebf9a..0000000
--- a/mat-cli.1
+++ /dev/null
@@ -1,78 +0,0 @@
-.TH METADATA "1" "August 2011" "Metadata Anonymisation Toolkit" "User Commands"
-
-
-.SH NAME
-MAT \- Metadata Anonymisation Toolkit
-
-
-.SH SYNOPSIS
-.B mat-cli
-[\fIoptions\fR] \fIfiles\fR
-.TP
-.B mat-gui
-
-
-.SH DESCRIPTION
-The \fBMetadata Anonymisation Toolkit\fR is a lib (with a CLI and a GUI)
-created to anonymise file's \fBmetadata\fR. In essence, metadata answer who,
-what, when, where, why, and how about every face of the data that are being
-documented. They can be a \fBrisk for privacy\fR.
-
-
-.SH OPTIONS
-.TP
-\fB\-h\fR, \fB\-\-help\fR
-show this help message and exit
-.TP
-\fB\-a\fR, \fB\-\-add2archive\fR
-Add to outputed archive non\-supported filetypes
-.TP
-\fB\-b\fR, \fB\-\-backup\fR
-Keep a backup copy
-.TP
-\fB\-c\fR, \fB\-\-check\fR
-Check if a file is free of harmful metadatas
-.TP
-\fB\-d\fR, \fB\-\-display\fR
-List all the harmful meta of a file without removing them
-.TP
-\fB\-f\fR, \fB\-\-force\fR
-Don't check if files are clean before cleaning
-.TP
-\fB\-l\fR, \fB\-\-list\fR
-List all supported fileformat
-.TP
-\fB\-u\fR, \fB\-\-strict\fR
-Remove harmful meta, but loss can occure
-.TP
-\fB\-v\fR, \fB\-\-version\fR
-Display version and exit
-
-
-.SH EXAMPLES
-.TP
-\fBmat-cli \-\-display\fR mydocument.pdf
-Display the mydocument.pdf's harmful metadata
-.TP
-\fBmat-cli \-\-check *.jpg\fR
-Check all the jpg images from the current folder
-
-
-.SH NOTES
-MAT \fBonly process metadata\fR, it does \fBnot\fR handle file data.
-Blame yourself if you are traced back because of the data of your files.
-MAT is not perfect : In most of the cases, a forensic expert with a lot
-of time \fBcan trace back\fR your document.
-If you want absolute privacy, use plain-text.
-
-
-.SH AUTHOR
-Julien (\fBjvoisin\fR) Voisin , during the GSoC 2011
-
-
-.SH BUGS
-MAT does not handle watermaking/tattoo for now.
-
-
-.SH "SEE ALSO"
-exiftool, hachoir-metadata
diff --git a/mat-gui b/mat-gui
index 550cea9..db007e5 100755
--- a/mat-gui
+++ b/mat-gui
@@ -17,8 +17,8 @@ import mimetypes
import xml.sax
import urllib2
-from mat import mat
-from mat import strippers
+from lib import mat
+from lib import strippers
logging.basicConfig(level=mat.LOGGING_LEVEL)
diff --git a/mat-gui.1 b/mat-gui.1
index e350033..51115ae 120000
--- a/mat-gui.1
+++ b/mat-gui.1
@@ -1 +1 @@
-mat-cli.1
\ No newline at end of file
+mat.1
\ No newline at end of file
diff --git a/mat/__init__.py b/mat/__init__.py
deleted file mode 100644
index 8b13789..0000000
--- a/mat/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/mat/archive.py b/mat/archive.py
deleted file mode 100644
index 9993102..0000000
--- a/mat/archive.py
+++ /dev/null
@@ -1,291 +0,0 @@
-'''
- Take care of archives formats
-'''
-
-import zipfile
-import shutil
-import os
-import logging
-import tempfile
-
-import parser
-import mat
-from tarfile import tarfile
-
-
-class GenericArchiveStripper(parser.GenericParser):
- '''
- Represent a generic archive
- '''
- def __init__(self, filename, parser, mime, backup, add2archive):
- super(GenericArchiveStripper, self).__init__(filename, parser, mime,
- backup, add2archive)
- self.compression = ''
- self.add2archive = add2archive
- self.tempdir = tempfile.mkdtemp()
-
- def __del__(self):
- '''
- Remove the files inside the temp dir,
- then remove the temp dir
- '''
- for root, dirs, files in os.walk(self.tempdir):
- for item in files:
- path_file = os.path.join(root, item)
- mat.secure_remove(path_file)
- shutil.rmtree(self.tempdir)
-
- def remove_all(self):
- '''
- Call _remove_all() with in argument : "normal"
- '''
- return self._remove_all('normal')
-
- def remove_all_strict(self):
- '''
- call remove_all() with in argument : "strict"
- '''
- return self._remove_all('strict')
-
- def _remove_all(self, method):
- '''
- Remove all meta, normal way if method is "normal",
- else, use the strict way (with possible data loss)
- '''
- raise NotImplementedError
-
-
-class ZipStripper(GenericArchiveStripper):
- '''
- Represent a zip file
- '''
- def is_file_clean(self, fileinfo):
- '''
- Check if a ZipInfo object is clean of metadatas added
- by zip itself, independently of the corresponding file metadatas
- '''
- if fileinfo.comment is not '':
- return False
- elif fileinfo.date_time is not 0:
- return False
- elif fileinfo.create_system is not 0:
- return False
- elif fileinfo.create_version is not 0:
- return False
- else:
- return True
-
- def is_clean(self):
- '''
- Check if the given file is clean from harmful metadata
- '''
- zipin = zipfile.ZipFile(self.filename, 'r')
- if zipin.comment != '':
- logging.debug('%s has a comment' % self.filename)
- return False
- for item in zipin.infolist():
- #I have not found a way to remove the crap added by zipfile :/
- #if not self.is_file_clean(item):
- # logging.debug('%s from %s has compromizing zipinfo' %
- # (item.filename, self.filename))
- # return False
- zipin.extract(item, self.tempdir)
- name = os.path.join(self.tempdir, item.filename)
- if os.path.isfile(name):
- try:
- cfile = mat.create_class_file(name, False,
- self.add2archive)
- if not cfile.is_clean():
- return False
- except:
- #best solution I have found
- logging.info('%s\'s fileformat is not supported, or is a \
-harmless format' % item.filename)
- _, ext = os.path.splitext(name)
- bname = os.path.basename(item.filename)
- if ext not in parser.NOMETA:
- if bname != 'mimetype' and bname != '.rels':
- return False
- zipin.close()
- return True
-
- def get_meta(self):
- '''
- Return all the metadata of a ZipFile (don't return metadatas
- of contained files : should it ?)
- '''
- zipin = zipfile.ZipFile(self.filename, 'r')
- metadata = {}
- for field in zipin.infolist():
- zipmeta = {}
- zipmeta['comment'] = field.comment
- zipmeta['modified'] = field.date_time
- zipmeta['system'] = field.create_system
- zipmeta['zip_version'] = field.create_version
- metadata[field.filename] = zipmeta
- metadata["%s comment" % self.filename] = zipin.comment
- zipin.close()
- return metadata
-
- def _remove_all(self, method):
- '''
- So far, the zipfile module does not allow to write a ZipInfo
- object into a zipfile (and it's a shame !) : so data added
- by zipfile itself could not be removed. It's a big concern.
- Is shiping a patched version of zipfile.py a good idea ?
- '''
- zipin = zipfile.ZipFile(self.filename, 'r')
- zipout = zipfile.ZipFile(self.output, 'w', allowZip64=True)
- for item in zipin.infolist():
- zipin.extract(item, self.tempdir)
- name = os.path.join(self.tempdir, item.filename)
- if os.path.isfile(name):
- try:
- cfile = mat.create_class_file(name, False,
- self.add2archive)
- if method is 'normal':
- cfile.remove_all()
- else:
- cfile.remove_all_strict()
- logging.debug('Processing %s from %s' % (item.filename,
- self.filename))
- zipout.write(name, item.filename)
- except:
- logging.info('%s\'s format is not supported or harmless' %
- item.filename)
- _, ext = os.path.splitext(name)
- if self.add2archive or ext in parser.NOMETA:
- zipout.write(name, item.filename)
- zipout.comment = ''
- zipin.close()
- zipout.close()
- logging.info('%s treated' % self.filename)
- self.do_backup()
- return True
-
-
-class TarStripper(GenericArchiveStripper):
- '''
- Represent a tarfile archive
- '''
- def _remove(self, current_file):
- '''
- remove the meta added by tar itself to the file
- '''
- current_file.mtime = 0
- current_file.uid = 0
- current_file.gid = 0
- current_file.uname = ''
- current_file.gname = ''
- return current_file
-
- def _remove_all(self, method):
- tarin = tarfile.open(self.filename, 'r' + self.compression)
- tarout = tarfile.open(self.output, 'w' + self.compression)
- for item in tarin.getmembers():
- tarin.extract(item, self.tempdir)
- name = os.path.join(self.tempdir, item.name)
- if item.type is '0': # is item a regular file ?
- #no backup file
- try:
- cfile = mat.create_class_file(name, False,
- self.add2archive)
- if method is 'normal':
- cfile.remove_all()
- else:
- cfile.remove_all_strict()
- tarout.add(name, item.name, filter=self._remove)
- except:
- logging.info('%s\' format is not supported or harmless' %
- item.name)
- _, ext = os.path.splitext(name)
- if self.add2archive or ext in parser.NOMETA:
- tarout.add(name, item.name, filter=self._remove)
- tarin.close()
- tarout.close()
- self.do_backup()
- return True
-
- def is_file_clean(self, current_file):
- '''
- Check metadatas added by tar
- '''
- if current_file.mtime is not 0:
- return False
- elif current_file.uid is not 0:
- return False
- elif current_file.gid is not 0:
- return False
- elif current_file.uname is not '':
- return False
- elif current_file.gname is not '':
- return False
- else:
- return True
-
- def is_clean(self):
- '''
- Check if the file is clean from harmful metadatas
- '''
- tarin = tarfile.open(self.filename, 'r' + self.compression)
- for item in tarin.getmembers():
- if not self.is_file_clean(item):
- tarin.close()
- return False
- tarin.extract(item, self.tempdir)
- name = os.path.join(self.tempdir, item.name)
- if item.type is '0': # is item a regular file ?
- try:
- class_file = mat.create_class_file(name,
- False, self.add2archive) # no backup file
- if not class_file.is_clean():
- tarin.close()
- return False
- except:
- logging.error('%s\'s foramt is not supported or harmless' %
- item.filename)
- _, ext = os.path.splitext(name)
- if ext not in parser.NOMETA:
- tarin.close()
- return False
- tarin.close()
- return True
-
- def get_meta(self):
- '''
- Return a dict with all the meta of the file
- '''
- tarin = tarfile.open(self.filename, 'r' + self.compression)
- metadata = {}
- for current_file in tarin.getmembers():
- if current_file.type is '0':
- if not self.is_file_clean(current_file): # if there is meta
- current_meta = {}
- current_meta['mtime'] = current_file.mtime
- current_meta['uid'] = current_file.uid
- current_meta['gid'] = current_file.gid
- current_meta['uname'] = current_file.uname
- current_meta['gname'] = current_file.gname
- metadata[current_file.name] = current_meta
- tarin.close()
- return metadata
-
-
-class GzipStripper(TarStripper):
- '''
- Represent a tar.gz archive
- '''
- def __init__(self, filename, parser, mime, backup, add2archive):
- super(GzipStripper, self).__init__(filename, parser, mime, backup,
- add2archive)
- self.compression = ':gz'
-
-
-class Bzip2Stripper(TarStripper):
- '''
- Represents a tar.bz2 archive
- '''
- def __init__(self, filename, parser, mime, backup, add2archive):
- super(Bzip2Stripper, self).__init__(filename, parser, mime, backup,
- add2archive)
- self.compression = ':bz2'
diff --git a/mat/audio.py b/mat/audio.py
deleted file mode 100644
index ed849ee..0000000
--- a/mat/audio.py
+++ /dev/null
@@ -1,100 +0,0 @@
-'''
- Care about audio fileformat
-'''
-try:
- from mutagen.flac import FLAC
- from mutagen.oggvorbis import OggVorbis
-except ImportError:
- pass
-
-
-import parser
-import shutil
-
-
-class MpegAudioStripper(parser.GenericParser):
- '''
- Represent mpeg audio file (mp3, ...)
- '''
- def _should_remove(self, field):
- if field.name in ("id3v1", "id3v2"):
- return True
- else:
- return False
-
-
-class OggStripper(parser.GenericParser):
- '''
- Represent an ogg vorbis file
- '''
- def remove_all(self):
- if self.backup is True:
- shutil.copy2(self.filename, self.output)
- self.filename = self.output
-
- mfile = OggVorbis(self.filename)
- mfile.delete()
- mfile.save()
- return True
-
- def is_clean(self):
- '''
- Check if the "metadata" block is present in the file
- '''
- mfile = OggVorbis(self.filename)
- if mfile.tags == []:
- return True
- else:
- return False
-
- def get_meta(self):
- '''
- Return the content of the metadata block if present
- '''
- metadata = {}
- mfile = OggVorbis(self.filename)
- for key, value in mfile.tags:
- metadata[key] = value
- return metadata
-
-
-class FlacStripper(parser.GenericParser):
- '''
- Represent a Flac audio file
- '''
- def remove_all(self):
- '''
- Remove the "metadata" block from the file
- '''
- if self.backup is True:
- shutil.copy2(self.filename, self.output)
- self.filename = self.output
-
- mfile = FLAC(self.filename)
- mfile.delete()
- mfile.clear_pictures()
- mfile.save()
- return True
-
- def is_clean(self):
- '''
- Check if the "metadata" block is present in the file
- '''
- mfile = FLAC(self.filename)
- if mfile.tags is None and mfile.pictures == []:
- return True
- else:
- return False
-
- def get_meta(self):
- '''
- Return the content of the metadata block if present
- '''
- metadata = {}
- mfile = FLAC(self.filename)
- if mfile.tags is not None:
- if mfile.pictures != []:
- metadata['picture :'] = 'yes'
- for key, value in mfile.tags:
- metadata[key] = value
- return metadata
diff --git a/mat/bencode/__init__.py b/mat/bencode/__init__.py
deleted file mode 100644
index 8b13789..0000000
--- a/mat/bencode/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/mat/bencode/bencode.py b/mat/bencode/bencode.py
deleted file mode 100644
index 739ffe5..0000000
--- a/mat/bencode/bencode.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# Copyright 2007 by Petru Paler
-# Copyright 2011 by Julien (jvoisin) Voisin
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-
-'''
- A quick (and also nice) lib to bencode/bdecode torrent files
-'''
-
-
-import types
-
-
-class BTFailure(Exception):
- '''Custom Exception'''
- pass
-
-
-class Bencached(object):
- '''Custom type : cached string'''
- __slots__ = ['bencoded']
-
- def __init__(self, string):
- self.bencoded = string
-
-
-def decode_int(x, f):
- '''decode an int'''
- f += 1
- newf = x.index('e', f)
- n = int(x[f:newf])
- if x[f] == '-':
- if x[f + 1] == '0':
- raise ValueError
- elif x[f] == '0' and newf != f + 1:
- raise ValueError
- return (n, newf + 1)
-
-
-def decode_string(x, f):
- '''decode a string'''
- colon = x.index(':', f)
- n = int(x[f:colon])
- if x[f] == '0' and colon != f + 1:
- raise ValueError
- colon += 1
- return (x[colon:colon + n], colon + n)
-
-
-def decode_list(x, f):
- '''decode a list'''
- result = []
- f += 1
- while x[f] != 'e':
- v, f = DECODE_FUNC[x[f]](x, f)
- result.append(v)
- return (result, f + 1)
-
-
-def decode_dict(x, f):
- '''decode a dict'''
- result = {}
- f += 1
- while x[f] != 'e':
- k, f = decode_string(x, f)
- result[k], f = DECODE_FUNC[x[f]](x, f)
- return (result, f + 1)
-
-
-def encode_bool(x, r):
- '''bencode a boolean'''
- if x:
- encode_int(1, r)
- else:
- encode_int(0, r)
-
-
-def encode_int(x, r):
- '''bencode an integer/float'''
- r.extend(('i', str(x), 'e'))
-
-
-def encode_list(x, r):
- '''bencode a list/tuple'''
- r.append('l')
- [ENCODE_FUNC[type(item)](item, r) for item in x]
- r.append('e')
-
-
-def encode_dict(x, result):
- '''bencode a dict'''
- result.append('d')
- ilist = x.items()
- ilist.sort()
- for k, v in ilist:
- result.extend((str(len(k)), ':', k))
- ENCODE_FUNC[type(v)](v, result)
- result.append('e')
-
-
-DECODE_FUNC = {}
-DECODE_FUNC.update(dict([(str(x), decode_string) for x in xrange(9)]))
-DECODE_FUNC['l'] = decode_list
-DECODE_FUNC['d'] = decode_dict
-DECODE_FUNC['i'] = decode_int
-
-
-ENCODE_FUNC = {}
-ENCODE_FUNC[Bencached] = lambda x, r: r.append(x.bencoded)
-ENCODE_FUNC[types.IntType] = encode_int
-ENCODE_FUNC[types.LongType] = encode_int
-ENCODE_FUNC[types.StringType] = lambda x, r: r.extend((str(len(x)), ':', x))
-ENCODE_FUNC[types.ListType] = encode_list
-ENCODE_FUNC[types.TupleType] = encode_list
-ENCODE_FUNC[types.DictType] = encode_dict
-ENCODE_FUNC[types.BooleanType] = encode_bool
-
-
-def bencode(string):
- '''bencode $string'''
- table = []
- ENCODE_FUNC[type(string)](string, table)
- return ''.join(table)
-
-
-def bdecode(string):
- '''decode $string'''
- try:
- result, lenght = DECODE_FUNC[string[0]](string, 0)
- except (IndexError, KeyError, ValueError):
- raise BTFailure('Not a valid bencoded string')
- if lenght != len(string):
- raise BTFailure('Invalid bencoded value (data after valid prefix)')
- return result
diff --git a/mat/exiftool.py b/mat/exiftool.py
deleted file mode 100644
index 758a094..0000000
--- a/mat/exiftool.py
+++ /dev/null
@@ -1,95 +0,0 @@
-'''
- Care about images with help of the amazing (perl) library Exiftool.
-'''
-
-import subprocess
-import parser
-
-
-class ExiftoolStripper(parser.GenericParser):
- '''
- A generic stripper class using exiftool as backend
- '''
-
- def __init__(self, filename, parser, mime, backup, add2archive):
- super(ExiftoolStripper, self).__init__(filename, parser, mime,
- backup, add2archive)
- self.allowed = ['ExifTool Version Number', 'File Name', 'Directory',
- 'File Size', 'File Modification Date/Time', 'File Permissions',
- 'File Type', 'MIME Type', 'Image Width', 'Image Height',
- 'Image Size']
- self._set_allowed()
-
- def _set_allowed(self):
- '''
- Set the allowed/harmless list of metadata
- '''
- raise NotImplementedError
-
- def remove_all(self):
- '''
- Remove all metadata with help of exiftool
- '''
- try:
- if self.backup:
- # Note: '-All=' must be followed by a known exiftool option.
- process = subprocess.Popen(['exiftool', '-m', '-All=',
- '-out', self.output, self.filename],
- stdout=open('/dev/null'))
- process.wait()
- else:
- # Note: '-All=' must be followed by a known exiftool option.
- process = subprocess.Popen(
- [ 'exiftool', '-m', '-All=', '-overwrite_original', self.filename ],
- stdout=open('/dev/null'))
- process.wait()
- return True
- except:
- return False
-
- def is_clean(self):
- '''
- Check if the file is clean with help of exiftool
- '''
- out = subprocess.Popen(['exiftool', self.filename],
- stdout=subprocess.PIPE).communicate()[0]
- out = out.split('\n')
- for i in out[:-1]:
- if i.split(':')[0].strip() not in self.allowed:
- return False
- return True
-
- def get_meta(self):
- '''
- Return every harmful meta with help of exiftool
- '''
- out = subprocess.Popen(['exiftool', self.filename],
- stdout=subprocess.PIPE).communicate()[0]
- out = out.split('\n')
- meta = {}
- for i in out[:-1]:
- key = i.split(':')[0].strip()
- if key not in self.allowed:
- meta[key] = i.split(':')[1].strip()
- return meta
-
-
-class JpegStripper(ExiftoolStripper):
- '''
- Care about jpeg files with help
- of exiftool
- '''
- def _set_allowed(self):
- self.allowed.extend(['JFIF Version', 'Resolution Unit',
- 'X Resolution', 'Y Resolution', 'Encoding Process', 'Bits Per Sample',
- 'Color Components', 'Y Cb Cr Sub Sampling'])
-
-class PngStripper(ExiftoolStripper):
- '''
- Care about png files with help
- of exiftool
- '''
- def _set_allowed(self):
- self.allowed.extend(['Bit Depth', 'Color Type', 'Compression',
- 'Filter', 'Interlace', 'Pixels Per Unit X', 'Pixels Per Unit Y',
- 'Pixel Units'])
diff --git a/mat/hachoir_editor/__init__.py b/mat/hachoir_editor/__init__.py
deleted file mode 100644
index 1835676..0000000
--- a/mat/hachoir_editor/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from field import (
- EditorError, FakeField)
-from typed_field import (
- EditableField, EditableBits, EditableBytes,
- EditableInteger, EditableString,
- createEditableField)
-from fieldset import EditableFieldSet, NewFieldSet, createEditor
-
diff --git a/mat/hachoir_editor/field.py b/mat/hachoir_editor/field.py
deleted file mode 100644
index 6b1efe3..0000000
--- a/mat/hachoir_editor/field.py
+++ /dev/null
@@ -1,69 +0,0 @@
-from hachoir_core.error import HachoirError
-from hachoir_core.field import joinPath, MissingField
-
-class EditorError(HachoirError):
- pass
-
-class FakeField(object):
- """
- This class have API looks similar to Field API, but objects don't contain
- any value: all values are _computed_ by parent methods.
-
- Example: FakeField(editor, "abc").size calls editor._getFieldSize("abc").
- """
- is_field_set = False
-
- def __init__(self, parent, name):
- self._parent = parent
- self._name = name
-
- def _getPath(self):
- return joinPath(self._parent.path, self._name)
- path = property(_getPath)
-
- def _getName(self):
- return self._name
- name = property(_getName)
-
- def _getAddress(self):
- return self._parent._getFieldAddress(self._name)
- address = property(_getAddress)
-
- def _getSize(self):
- return self._parent.input[self._name].size
- size = property(_getSize)
-
- def _getValue(self):
- return self._parent.input[self._name].value
- value = property(_getValue)
-
- def createDisplay(self):
- # TODO: Returns new value if field is altered
- return self._parent.input[self._name].display
- display = property(createDisplay)
-
- def _getParent(self):
- return self._parent
- parent = property(_getParent)
-
- def hasValue(self):
- return self._parent.input[self._name].hasValue()
-
- def __getitem__(self, key):
- # TODO: Implement this function!
- raise MissingField(self, key)
-
- def _isAltered(self):
- return False
- is_altered = property(_isAltered)
-
- def writeInto(self, output):
- size = self.size
- addr = self._parent._getFieldInputAddress(self._name)
- input = self._parent.input
- stream = input.stream
- if size % 8:
- output.copyBitsFrom(stream, addr, size, input.endian)
- else:
- output.copyBytesFrom(stream, addr, size//8)
-
diff --git a/mat/hachoir_editor/fieldset.py b/mat/hachoir_editor/fieldset.py
deleted file mode 100644
index a74c8e2..0000000
--- a/mat/hachoir_editor/fieldset.py
+++ /dev/null
@@ -1,352 +0,0 @@
-from hachoir_core.dict import UniqKeyError
-from hachoir_core.field import MissingField, Float32, Float64, FakeArray
-from hachoir_core.compatibility import any
-from hachoir_core.i18n import _
-from typed_field import createEditableField
-from field import EditorError
-from collections import deque # Python 2.4
-import weakref # Python 2.1
-import struct
-
-class EditableFieldSet(object):
- MAX_SIZE = (1 << 40) # Arbitrary limit to catch errors
- is_field_set = True
-
- def __init__(self, parent, fieldset):
- self._parent = parent
- self.input = fieldset # original FieldSet
- self._fields = {} # cache of editable fields
- self._deleted = set() # Names of deleted fields
- self._inserted = {} # Inserted field (name => list of field,
- # where name is the name after)
-
- def array(self, key):
- # FIXME: Use cache?
- return FakeArray(self, key)
-
- def _getParent(self):
- return self._parent
- parent = property(_getParent)
-
- def _isAltered(self):
- if self._inserted:
- return True
- if self._deleted:
- return True
- return any(field.is_altered for field in self._fields.itervalues())
- is_altered = property(_isAltered)
-
- def reset(self):
- """
- Reset the field set and the input field set.
- """
- for key, field in self._fields.iteritems():
- if not field.is_altered:
- del self._fields[key]
- self.input.reset()
-
- def __len__(self):
- return len(self.input) \
- - len(self._deleted) \
- + sum( len(new) for new in self._inserted.itervalues() )
-
- def __iter__(self):
- for field in self.input:
- name = field.name
- if name in self._inserted:
- for newfield in self._inserted[name]:
- yield weakref.proxy(newfield)
- if name not in self._deleted:
- yield self[name]
- if None in self._inserted:
- for newfield in self._inserted[None]:
- yield weakref.proxy(newfield)
-
- def insertBefore(self, name, *new_fields):
- self._insert(name, new_fields, False)
-
- def insertAfter(self, name, *new_fields):
- self._insert(name, new_fields, True)
-
- def insert(self, *new_fields):
- self._insert(None, new_fields, True)
-
- def _insert(self, key, new_fields, next):
- """
- key is the name of the field before which new_fields
- will be inserted. If next is True, the fields will be inserted
- _after_ this field.
- """
- # Set unique field name
- for field in new_fields:
- if field._name.endswith("[]"):
- self.input.setUniqueFieldName(field)
-
- # Check that there is no duplicate in inserted fields
- new_names = list(field.name for field in new_fields)
- names_set = set(new_names)
- if len(names_set) != len(new_fields):
- duplicates = (name for name in names_set if 1 < new_names.count(name))
- raise UniqKeyError(_("Duplicates in inserted fields: %s") % ", ".join(duplicates))
-
- # Check that field names are not in input
- if self.input: # Write special version for NewFieldSet?
- for name in new_names:
- if name in self.input and name not in self._deleted:
- raise UniqKeyError(_("Field name '%s' already exists") % name)
-
- # Check that field names are not in inserted fields
- for fields in self._inserted.itervalues():
- for field in fields:
- if field.name in new_names:
- raise UniqKeyError(_("Field name '%s' already exists") % field.name)
-
- # Input have already inserted field?
- if key in self._inserted:
- if next:
- self._inserted[key].extend( reversed(new_fields) )
- else:
- self._inserted[key].extendleft( reversed(new_fields) )
- return
-
- # Whould like to insert in inserted fields?
- if key:
- for fields in self._inserted.itervalues():
- names = [item.name for item in fields]
- try:
- pos = names.index(key)
- except ValueError:
- continue
- if 0 <= pos:
- if next:
- pos += 1
- fields.rotate(-pos)
- fields.extendleft( reversed(new_fields) )
- fields.rotate(pos)
- return
-
- # Get next field. Use None if we are at the end.
- if next:
- index = self.input[key].index + 1
- try:
- key = self.input[index].name
- except IndexError:
- key = None
-
- # Check that field names are not in input
- if key not in self.input:
- raise MissingField(self, key)
-
- # Insert in original input
- self._inserted[key]= deque(new_fields)
-
- def _getDescription(self):
- return self.input.description
- description = property(_getDescription)
-
- def _getStream(self):
- # FIXME: This property is maybe a bad idea since address may be differents
- return self.input.stream
- stream = property(_getStream)
-
- def _getName(self):
- return self.input.name
- name = property(_getName)
-
- def _getEndian(self):
- return self.input.endian
- endian = property(_getEndian)
-
- def _getAddress(self):
- if self._parent:
- return self._parent._getFieldAddress(self.name)
- else:
- return 0
- address = property(_getAddress)
-
- def _getAbsoluteAddress(self):
- address = self.address
- current = self._parent
- while current:
- address += current.address
- current = current._parent
- return address
- absolute_address = property(_getAbsoluteAddress)
-
- def hasValue(self):
- return False
-# return self._parent.input[self.name].hasValue()
-
- def _getSize(self):
- if self.is_altered:
- return sum(field.size for field in self)
- else:
- return self.input.size
- size = property(_getSize)
-
- def _getPath(self):
- return self.input.path
- path = property(_getPath)
-
- def _getOriginalField(self, name):
- assert name in self.input
- return self.input[name]
-
- def _getFieldInputAddress(self, name):
- """
- Absolute address of a field from the input field set.
- """
- assert name in self.input
- return self.input[name].absolute_address
-
- def _getFieldAddress(self, name):
- """
- Compute relative address of a field. The operation takes care of
- deleted and resized fields.
- """
- #assert name not in self._deleted
- addr = 0
- for field in self:
- if field.name == name:
- return addr
- addr += field.size
- raise MissingField(self, name)
-
- def _getItemByPath(self, path):
- if not path[0]:
- path = path[1:]
- field = self
- for name in path:
- field = field[name]
- return field
-
- def __contains__(self, name):
- try:
- field = self[name]
- return (field is not None)
- except MissingField:
- return False
-
- def __getitem__(self, key):
- """
- Create a weak reference to an editable field (EditableField) for the
- field with specified name. If the field is removed later, using the
- editable field will raise a weakref.ReferenceError exception.
-
- May raise a MissingField error if the field doesn't exist in original
- field set or it has been deleted.
- """
- if "/" in key:
- return self._getItemByPath(key.split("/"))
- if isinstance(key, (int, long)):
- raise EditorError("Integer index are not supported")
-
- if (key in self._deleted) or (key not in self.input):
- raise MissingField(self, key)
- if key not in self._fields:
- field = self.input[key]
- if field.is_field_set:
- self._fields[key] = createEditableFieldSet(self, field)
- else:
- self._fields[key] = createEditableField(self, field)
- return weakref.proxy(self._fields[key])
-
- def __delitem__(self, name):
- """
- Remove a field from the field set. May raise an MissingField exception
- if the field has already been deleted.
- """
- parts = name.partition('/')
- if parts[2]:
- fieldset = self[parts[0]]
- del fieldset[part[2]]
- return
- if name in self._deleted:
- raise MissingField(self, name)
- self._deleted.add(name)
- if name in self._fields:
- del self._fields[name]
-
- def writeInto(self, output):
- """
- Write the content if this field set into the output stream
- (OutputStream).
- """
- if not self.is_altered:
- # Not altered: just copy bits/bytes
- input = self.input
- if input.size % 8:
- output.copyBitsFrom(input.stream,
- input.absolute_address, input.size, input.endian)
- else:
- output.copyBytesFrom(input.stream,
- input.absolute_address, input.size//8)
- else:
- # Altered: call writeInto() method of each field
- realaddr = 0
- for field in self:
- field.writeInto(output)
- realaddr += field.size
-
- def _getValue(self):
- raise EditorError('Field set "%s" has no value' % self.path)
- def _setValue(self, value):
- raise EditorError('Field set "%s" value is read only' % self.path)
- value = property(_getValue, _setValue, "Value of field")
-
-class EditableFloat(EditableFieldSet):
- _value = None
-
- def _isAltered(self):
- return (self._value is not None)
- is_altered = property(_isAltered)
-
- def writeInto(self, output):
- if self._value is not None:
- self._write(output)
- else:
- EditableFieldSet.writeInto(self, output)
-
- def _write(self, output):
- format = self.input.struct_format
- raw = struct.pack(format, self._value)
- output.writeBytes(raw)
-
- def _setValue(self, value):
- self.parent._is_altered = True
- self._value = value
- value = property(EditableFieldSet._getValue, _setValue)
-
-def createEditableFieldSet(parent, field):
- cls = field.__class__
- # FIXME: Support Float80
- if cls in (Float32, Float64):
- return EditableFloat(parent, field)
- else:
- return EditableFieldSet(parent, field)
-
-class NewFieldSet(EditableFieldSet):
- def __init__(self, parent, name):
- EditableFieldSet.__init__(self, parent, None)
- self._name = name
- self._endian = parent.endian
-
- def __iter__(self):
- if None in self._inserted:
- return iter(self._inserted[None])
- else:
- raise StopIteration()
-
- def _getName(self):
- return self._name
- name = property(_getName)
-
- def _getEndian(self):
- return self._endian
- endian = property(_getEndian)
-
- is_altered = property(lambda self: True)
-
-def createEditor(fieldset):
- return EditableFieldSet(None, fieldset)
-
diff --git a/mat/hachoir_editor/typed_field.py b/mat/hachoir_editor/typed_field.py
deleted file mode 100644
index 0f0427b..0000000
--- a/mat/hachoir_editor/typed_field.py
+++ /dev/null
@@ -1,253 +0,0 @@
-from hachoir_core.field import (
- RawBits, Bit, Bits, PaddingBits,
- RawBytes, Bytes, PaddingBytes,
- GenericString, Character,
- isInteger, isString)
-from field import FakeField
-
-class EditableField(FakeField):
- """
- Pure virtual class used to write editable field class.
- """
-
- _is_altered = False
- def __init__(self, parent, name, value=None):
- FakeField.__init__(self, parent, name)
- self._value = value
-
- def _isAltered(self):
- return self._is_altered
- is_altered = property(_isAltered)
-
- def hasValue(self):
- return True
-
- def _computeSize(self):
- raise NotImplementedError()
- def _getValue(self):
- return self._value
- def _setValue(self, value):
- self._value = value
-
- def _propGetValue(self):
- if self._value is not None:
- return self._getValue()
- else:
- return FakeField._getValue(self)
- def _propSetValue(self, value):
- self._setValue(value)
- self._is_altered = True
- value = property(_propGetValue, _propSetValue)
-
- def _getSize(self):
- if self._value is not None:
- return self._computeSize()
- else:
- return FakeField._getSize(self)
- size = property(_getSize)
-
- def _write(self, output):
- raise NotImplementedError()
-
- def writeInto(self, output):
- if self._is_altered:
- self._write(output)
- else:
- return FakeField.writeInto(self, output)
-
-class EditableFixedField(EditableField):
- """
- Editable field with fixed size.
- """
-
- def __init__(self, parent, name, value=None, size=None):
- EditableField.__init__(self, parent, name, value)
- if size is not None:
- self._size = size
- else:
- self._size = self._parent._getOriginalField(self._name).size
-
- def _getSize(self):
- return self._size
- size = property(_getSize)
-
-class EditableBits(EditableFixedField):
- def __init__(self, parent, name, *args):
- if args:
- if len(args) != 2:
- raise TypeError(
- "Wrong argument count, EditableBits constructor prototype is: "
- "(parent, name, [size, value])")
- size = args[0]
- value = args[1]
- assert isinstance(value, (int, long))
- else:
- size = None
- value = None
- EditableFixedField.__init__(self, parent, name, value, size)
- if args:
- self._setValue(args[1])
- self._is_altered = True
-
- def _setValue(self, value):
- if not(0 <= value < (1 << self._size)):
- raise ValueError("Invalid value, must be in range %s..%s"
- % (0, (1 << self._size) - 1))
- self._value = value
-
- def _write(self, output):
- output.writeBits(self._size, self._value, self._parent.endian)
-
-class EditableBytes(EditableField):
- def _setValue(self, value):
- if not value: raise ValueError(
- "Unable to set empty string to a EditableBytes field")
- self._value = value
-
- def _computeSize(self):
- return len(self._value) * 8
-
- def _write(self, output):
- output.writeBytes(self._value)
-
-class EditableString(EditableField):
- MAX_SIZE = {
- "Pascal8": (1 << 8)-1,
- "Pascal16": (1 << 16)-1,
- "Pascal32": (1 << 32)-1,
- }
-
- def __init__(self, parent, name, *args, **kw):
- if len(args) == 2:
- value = args[1]
- assert isinstance(value, str) # TODO: support Unicode
- elif not args:
- value = None
- else:
- raise TypeError(
- "Wrong argument count, EditableString constructor prototype is:"
- "(parent, name, [format, value])")
- EditableField.__init__(self, parent, name, value)
- if len(args) == 2:
- self._charset = kw.get('charset', None)
- self._format = args[0]
- if self._format in GenericString.PASCAL_FORMATS:
- self._prefix_size = GenericString.PASCAL_FORMATS[self._format]
- else:
- self._prefix_size = 0
- self._suffix_str = GenericString.staticSuffixStr(
- self._format, self._charset, self._parent.endian)
- self._is_altered = True
- else:
- orig = self._parent._getOriginalField(name)
- self._charset = orig.charset
- self._format = orig.format
- self._prefix_size = orig.content_offset
- self._suffix_str = orig.suffix_str
-
- def _setValue(self, value):
- size = len(value)
- if self._format in self.MAX_SIZE and self.MAX_SIZE[self._format] < size:
- raise ValueError("String is too big")
- self._value = value
-
- def _computeSize(self):
- return (self._prefix_size + len(self._value) + len(self._suffix_str))*8
-
- def _write(self, output):
- if self._format in GenericString.SUFFIX_FORMAT:
- output.writeBytes(self._value)
- output.writeBytes(self._suffix_str)
- elif self._format == "fixed":
- output.writeBytes(self._value)
- else:
- assert self._format in GenericString.PASCAL_FORMATS
- size = GenericString.PASCAL_FORMATS[self._format]
- output.writeInteger(len(self._value), False, size, self._parent.endian)
- output.writeBytes(self._value)
-
-class EditableCharacter(EditableFixedField):
- def __init__(self, parent, name, *args):
- if args:
- if len(args) != 3:
- raise TypeError(
- "Wrong argument count, EditableCharacter "
- "constructor prototype is: (parent, name, [value])")
- value = args[0]
- if not isinstance(value, str) or len(value) != 1:
- raise TypeError("EditableCharacter needs a character")
- else:
- value = None
- EditableFixedField.__init__(self, parent, name, value, 8)
- if args:
- self._is_altered = True
-
- def _setValue(self, value):
- if not isinstance(value, str) or len(value) != 1:
- raise TypeError("EditableCharacter needs a character")
- self._value = value
-
- def _write(self, output):
- output.writeBytes(self._value)
-
-class EditableInteger(EditableFixedField):
- VALID_VALUE_SIGNED = {
- 8: (-(1 << 8), (1 << 8)-1),
- 16: (-(1 << 15), (1 << 15)-1),
- 32: (-(1 << 31), (1 << 31)-1),
- }
- VALID_VALUE_UNSIGNED = {
- 8: (0, (1 << 8)-1),
- 16: (0, (1 << 16)-1),
- 32: (0, (1 << 32)-1)
- }
-
- def __init__(self, parent, name, *args):
- if args:
- if len(args) != 3:
- raise TypeError(
- "Wrong argument count, EditableInteger constructor prototype is: "
- "(parent, name, [signed, size, value])")
- size = args[1]
- value = args[2]
- assert isinstance(value, (int, long))
- else:
- size = None
- value = None
- EditableFixedField.__init__(self, parent, name, value, size)
- if args:
- self._signed = args[0]
- self._is_altered = True
- else:
- self._signed = self._parent._getOriginalField(self._name).signed
-
- def _setValue(self, value):
- if self._signed:
- valid = self.VALID_VALUE_SIGNED
- else:
- valid = self.VALID_VALUE_UNSIGNED
- minval, maxval = valid[self._size]
- if not(minval <= value <= maxval):
- raise ValueError("Invalid value, must be in range %s..%s"
- % (minval, maxval))
- self._value = value
-
- def _write(self, output):
- output.writeInteger(
- self.value, self._signed, self._size//8, self._parent.endian)
-
-def createEditableField(fieldset, field):
- if isInteger(field):
- cls = EditableInteger
- elif isString(field):
- cls = EditableString
- elif field.__class__ in (RawBytes, Bytes, PaddingBytes):
- cls = EditableBytes
- elif field.__class__ in (RawBits, Bits, Bit, PaddingBits):
- cls = EditableBits
- elif field.__class__ == Character:
- cls = EditableCharacter
- else:
- cls = FakeField
- return cls(fieldset, field.name)
-
diff --git a/mat/images.py b/mat/images.py
deleted file mode 100644
index 3eb3544..0000000
--- a/mat/images.py
+++ /dev/null
@@ -1,48 +0,0 @@
-'''
- Takes care about pictures formats
-'''
-
-import parser
-
-
-class JpegStripper(parser.GenericParser):
- '''
- represents a jpeg file
- remaining :
- http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/CanonRaw.html
- '''
- def _should_remove(self, field):
- '''
- return True if the field is compromizing
- '''
- name = field.name
- if name.startswith('comment'):
- return True
- elif name in ('photoshop', 'exif', 'adobe', 'app12'):
- return True
- elif name in ('icc'): # should we remove the icc profile ?
- return True
- else:
- return False
-
-
-class PngStripper(parser.GenericParser):
- '''
- represents a png file
- see : http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/PNG.html
- '''
- def _should_remove(self, field):
- '''
- return True if the field is compromizing
- '''
- name = field.name
- if name.startswith('text['): # textual meta
- return True
- elif name.startswith('utf8_text['): # uncompressed adobe crap
- return True
- elif name.startswith('compt_text['): # compressed adobe crap
- return True
- elif name == "time": # timestamp
- return True
- else:
- return False
diff --git a/mat/mat.py b/mat/mat.py
deleted file mode 100644
index 53d02d8..0000000
--- a/mat/mat.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python
-
-'''
- Metadata anonymisation toolkit library
-'''
-
-import os
-import subprocess
-import logging
-import mimetypes
-import xml.sax
-
-import hachoir_core.cmd_line
-import hachoir_parser
-
-import strippers
-
-__version__ = '0.2.2'
-__author__ = 'jvoisin'
-
-#Silence
-LOGGING_LEVEL = logging.CRITICAL
-hachoir_core.config.quiet = True
-fname = ''
-
-#Verbose
-#LOGGING_LEVEL = logging.DEBUG
-#hachoir_core.config.quiet = False
-#logname = 'report.log'
-
-logging.basicConfig(filename=fname, level=LOGGING_LEVEL)
-
-
-def get_sharedir():
- '''
- An ugly hack to find where is the "FORMATS" file.
- '''
- if os.path.isfile('FORMATS'):
- return ''
- elif os.path.exists('/usr/local/share/mat/'):
- return '/usr/local/share/mat/'
- elif os.path.exists('/usr/share/mat/'):
- return '/usr/share/mat'
-
-
-class XMLParser(xml.sax.handler.ContentHandler):
- '''
- Parse the supported format xml, and return a corresponding
- list of dict
- '''
- def __init__(self):
- self.dict = {}
- self.list = []
- self.content, self.key = '', ''
- self.between = False
-
- def startElement(self, name, attrs):
- '''
- Called when entering into xml balise
- '''
- self.between = True
- self.key = name
- self.content = ''
-
- def endElement(self, name):
- '''
- Called when exiting a xml balise
- '''
- if name == 'format': # exiting a fileformat section
- self.list.append(self.dict.copy())
- self.dict.clear()
- else:
- content = self.content.replace('\s', ' ')
- self.dict[self.key] = content
- self.between = False
-
- def characters(self, characters):
- '''
- Concatenate the content between opening and closing balises
- '''
- if self.between:
- self.content += characters
-
-
-def secure_remove(filename):
- '''
- securely remove the file
- '''
- removed = False
- try:
- subprocess.call(['shred', '--remove', filename])
- removed = True
- except:
- logging.error('Unable to securely remove %s' % filename)
-
- if removed is False:
- try:
- os.remove(filename)
- except:
- logging.error('Unable to remove %s' % filename)
-
-
-def create_class_file(name, backup, add2archive):
- '''
- return a $FILETYPEStripper() class,
- corresponding to the filetype of the given file
- '''
- if not os.path.isfile(name):
- # check if the file exists
- logging.error('%s is not a valid file' % name)
- return None
-
- if not os.access(name, os.R_OK):
- #check read permissions
- logging.error('%s is is not readable' % name)
- return None
-
- if not os.access(name, os.W_OK):
- #check write permission
- logging.error('%s is not writtable' % name)
- return None
-
- filename = ''
- try:
- filename = hachoir_core.cmd_line.unicodeFilename(name)
- except TypeError: # get rid of "decoding Unicode is not supported"
- filename = name
-
- parser = hachoir_parser.createParser(filename)
- if not parser:
- logging.info('Unable to parse %s' % filename)
- return None
-
- mime = parser.mime_type
-
- if mime == 'application/zip': # some formats are zipped stuff
- mime = mimetypes.guess_type(name)[0]
-
- if mime.startswith('application/vnd.oasis.opendocument'):
- mime = 'application/opendocument' # opendocument fileformat
- elif mime.startswith('application/vnd.openxmlformats-officedocument'):
- mime = 'application/officeopenxml' # office openxml
-
- try:
- stripper_class = strippers.STRIPPERS[mime]
- except KeyError:
- logging.info('Don\'t have stripper for %s format' % mime)
- return None
-
- return stripper_class(filename, parser, mime, backup, add2archive)
diff --git a/mat/misc.py b/mat/misc.py
deleted file mode 100644
index d084861..0000000
--- a/mat/misc.py
+++ /dev/null
@@ -1,63 +0,0 @@
-'''
- Care about misc formats
-'''
-
-import parser
-
-from bencode import bencode
-
-
-class TorrentStripper(parser.GenericParser):
- '''
- Represent a torrent file with the help
- of the bencode lib from Petru Paler
- '''
- def __init__(self, filename, parser, mime, backup, add2archive):
- super(TorrentStripper, self).__init__(filename, parser, mime,
- backup, add2archive)
- self.fields = ['comment', 'creation date', 'created by']
-
- def is_clean(self):
- '''
- Check if the file is clean from harmful metadatas
- '''
- with open(self.filename, 'r') as f:
- decoded = bencode.bdecode(f.read())
- for key in self.fields:
- try:
- if decoded[key] != '':
- return False
- except:
- pass
- return True
-
- def get_meta(self):
- '''
- Return a dict with all the meta of the file
- '''
- metadata = {}
- with open(self.filename, 'r') as f:
- decoded = bencode.bdecode(f.read())
- for key in self.fields:
- try:
- if decoded[key] != '':
- metadata[key] = decoded[key]
- except:
- pass
- return metadata
-
- def remove_all(self):
- '''
- Remove all the files that are compromizing
- '''
- with open(self.filename, 'r') as f:
- decoded = bencode.bdecode(f.read())
- for key in self.fields:
- try:
- decoded[key] = ''
- except:
- pass
- with open(self.output, 'w') as f: # encode the decoded torrent
- f.write(bencode.bencode(decoded)) # and write it in self.output
- self.do_backup()
- return True
diff --git a/mat/office.py b/mat/office.py
deleted file mode 100644
index e1d738e..0000000
--- a/mat/office.py
+++ /dev/null
@@ -1,305 +0,0 @@
-'''
- Care about office's formats
-'''
-
-import os
-import logging
-import zipfile
-import fileinput
-import subprocess
-import xml.dom.minidom as minidom
-
-try:
- import cairo
- import poppler
-except ImportError:
- pass
-
-import mat
-import parser
-import archive
-
-class OpenDocumentStripper(archive.GenericArchiveStripper):
- '''
- An open document file is a zip, with xml file into.
- The one that interest us is meta.xml
- '''
-
- def get_meta(self):
- '''
- Return a dict with all the meta of the file by
- trying to read the meta.xml file.
- '''
- zipin = zipfile.ZipFile(self.filename, 'r')
- metadata = {}
- try:
- content = zipin.read('meta.xml')
- dom1 = minidom.parseString(content)
- elements = dom1.getElementsByTagName('office:meta')
- for i in elements[0].childNodes:
- if i.tagName != 'meta:document-statistic':
- nodename = ''.join([k for k in i.nodeName.split(':')[1:]])
- metadata[nodename] = ''.join([j.data for j in i.childNodes])
- else:
- # thank you w3c for not providing a nice
- # method to get all attributes from a node
- pass
- zipin.close()
- except KeyError: # no meta.xml file found
- logging.debug('%s has no opendocument metadata' % self.filename)
- return metadata
-
- def _remove_all(self, method):
- '''
- FIXME ?
- There is a patch implementing the Zipfile.remove()
- method here : http://bugs.python.org/issue6818
- '''
- zipin = zipfile.ZipFile(self.filename, 'r')
- zipout = zipfile.ZipFile(self.output, 'w', allowZip64=True)
-
- for item in zipin.namelist():
- name = os.path.join(self.tempdir, item)
- _, ext = os.path.splitext(name)
-
- if item.endswith('manifest.xml'):
- # contain the list of all files present in the archive
- zipin.extract(item, self.tempdir)
- for line in fileinput.input(name, inplace=1):
- #remove the line which contains "meta.xml"
- line = line.strip()
- if not 'meta.xml' in line:
- print line
- zipout.write(name, item)
-
- elif ext in parser.NOMETA or item == 'mimetype':
- #keep NOMETA files, and the "manifest" file
- if item != 'meta.xml': # contains the metadata
- zipin.extract(item, self.tempdir)
- zipout.write(name, item)
-
- else:
- zipin.extract(item, self.tempdir)
- if os.path.isfile(name):
- try:
- cfile = mat.create_class_file(name, False,
- self.add2archive)
- if method == 'normal':
- cfile.remove_all()
- else:
- cfile.remove_all_strict()
- logging.debug('Processing %s from %s' % (item,
- self.filename))
- zipout.write(name, item)
- except:
- logging.info('%s\' fileformat is not supported' % item)
- if self.add2archive:
- zipout.write(name, item)
- zipout.comment = ''
- logging.info('%s treated' % self.filename)
- zipin.close()
- zipout.close()
- self.do_backup()
- return True
-
- def is_clean(self):
- '''
- Check if the file is clean from harmful metadatas
- '''
- zipin = zipfile.ZipFile(self.filename, 'r')
- try:
- zipin.getinfo('meta.xml')
- except KeyError: # no meta.xml in the file
- czf = archive.ZipStripper(self.filename, self.parser,
- 'application/zip', self.backup, self.add2archive)
- if czf.is_clean():
- zipin.close()
- return True
- zipin.close()
- return False
-
-
-class PdfStripper(parser.GenericParser):
- '''
- Represent a PDF file
- '''
- def __init__(self, filename, parser, mime, backup, add2archive):
- super(PdfStripper, self).__init__(filename, parser, mime, backup,
- add2archive)
- uri = 'file://' + os.path.abspath(self.filename)
- self.password = None
- self.document = poppler.document_new_from_file(uri, self.password)
- self.meta_list = ('title', 'author', 'subject', 'keywords', 'creator',
- 'producer', 'metadata')
-
- def is_clean(self):
- '''
- Check if the file is clean from harmful metadatas
- '''
- for key in self.meta_list:
- if self.document.get_property(key) is not None and \
- self.document.get_property(key) != '':
- return False
- return True
-
-
- def remove_all(self):
- '''
- Remove supperficial
- '''
- return self._remove_meta()
-
-
- def remove_all_strict(self):
- '''
- Opening the PDF with poppler, then doing a render
- on a cairo pdfsurface for each pages.
- Thanks to Lunar^for the idea.
- http://cairographics.org/documentation/pycairo/2/
- python-poppler is not documented at all : have fun ;)
- '''
- page = self.document.get_page(0)
- page_width, page_height = page.get_size()
- surface = cairo.PDFSurface(self.output, page_width, page_height)
- context = cairo.Context(surface) # context draws on the surface
- logging.debug('PDF rendering of %s' % self.filename)
- for pagenum in xrange(self.document.get_n_pages()):
- page = self.document.get_page(pagenum)
- context.translate(0, 0)
- page.render(context) # render the page on context
- context.show_page() # draw context on surface
- surface.finish()
- return self._remove_meta()
-
- def _remove_meta(self):
- '''
- Remove superficial/external metadata
- from a PDF file, using exiftool,
- of pdfrw if exiftool is not installed
- '''
- processed = False
- try:# try with pdfrw
- import pdfrw
- #For now, poppler cannot write meta, so we must use pdfrw
- logging.debug('Removing %s\'s superficial metadata' % self.filename)
- trailer = pdfrw.PdfReader(self.output)
- trailer.Info.Producer = trailer.Author = trailer.Info.Creator = None
- writer = pdfrw.PdfWriter()
- writer.trailer = trailer
- writer.write(self.output)
- self.do_backup()
- processed = True
- except:
- pass
-
- try: # try with exiftool
- subprocess.Popen('exiftool', stdout=open('/dev/null'))
- import exiftool
- # Note: '-All=' must be followed by a known exiftool option.
- if self.backup:
- process = subprocess.Popen(['exiftool', '-m', '-All=',
- '-out', self.output, self.filename], stdout=open('/dev/null'))
- process.wait()
- else:
- # Note: '-All=' must be followed by a known exiftool option.
- process = subprocess.Popen(
- ['exiftool', '-All=', '-overwrite_original', self.filename],
- stdout=open('/dev/null'))
- process.wait()
- processed = True
- except:
- pass
-
- if processed is False:
- logging.error('Please install either pdfrw, or exiftool to\
- fully handle PDF files')
- return processed
-
- def get_meta(self):
- '''
- Return a dict with all the meta of the file
- '''
- metadata = {}
- for key in self.meta_list:
- if self.document.get_property(key) is not None and \
- self.document.get_property(key) != '':
- metadata[key] = self.document.get_property(key)
- return metadata
-
-
-class OpenXmlStripper(archive.GenericArchiveStripper):
- '''
- Represent an office openxml document, which is like
- an opendocument format, with some tricky stuff added.
- It contains mostly xml, but can have media blobs, crap, ...
- (I don't like this format.)
- '''
- def _remove_all(self, method):
- '''
- FIXME ?
- There is a patch implementing the Zipfile.remove()
- method here : http://bugs.python.org/issue6818
- '''
- zipin = zipfile.ZipFile(self.filename, 'r')
- zipout = zipfile.ZipFile(self.output, 'w',
- allowZip64=True)
- for item in zipin.namelist():
- name = os.path.join(self.tempdir, item)
- _, ext = os.path.splitext(name)
- if item.startswith('docProps/'): # metadatas
- pass
- elif ext in parser.NOMETA or item == '.rels':
- #keep parser.NOMETA files, and the file named ".rels"
- zipin.extract(item, self.tempdir)
- zipout.write(name, item)
- else:
- zipin.extract(item, self.tempdir)
- if os.path.isfile(name): # don't care about folders
- try:
- cfile = mat.create_class_file(name, False,
- self.add2archive)
- if method == 'normal':
- cfile.remove_all()
- else:
- cfile.remove_all_strict()
- logging.debug('Processing %s from %s' % (item,
- self.filename))
- zipout.write(name, item)
- except:
- logging.info('%s\' fileformat is not supported' % item)
- if self.add2archive:
- zipout.write(name, item)
- zipout.comment = ''
- logging.info('%s treated' % self.filename)
- zipin.close()
- zipout.close()
- self.do_backup()
- return True
-
- def is_clean(self):
- '''
- Check if the file is clean from harmful metadatas
- '''
- zipin = zipfile.ZipFile(self.filename, 'r')
- for item in zipin.namelist():
- if item.startswith('docProps/'):
- return False
- zipin.close()
- czf = archive.ZipStripper(self.filename, self.parser,
- 'application/zip', self.backup, self.add2archive)
- if not czf.is_clean():
- return False
- else:
- return True
-
- def get_meta(self):
- '''
- Return a dict with all the meta of the file
- '''
- zipin = zipfile.ZipFile(self.filename, 'r')
- metadata = {}
- for item in zipin.namelist():
- if item.startswith('docProps/'):
- metadata[item] = 'harmful content'
- zipin.close()
- return metadata
diff --git a/mat/parser.py b/mat/parser.py
deleted file mode 100644
index 6dc5d0b..0000000
--- a/mat/parser.py
+++ /dev/null
@@ -1,130 +0,0 @@
-'''
- Parent class of all parser
-'''
-
-import hachoir_core
-import hachoir_editor
-
-import os
-
-import mat
-
-NOMETA = ('.bmp', '.rdf', '.txt', '.xml', '.rels')
-#bmp : image
-#rdf : text
-#txt : plain text
-#xml : formated text
-#rels : openxml foramted text
-
-
-FIELD = object()
-
-class GenericParser(object):
- '''
- Parent class of all parsers
- '''
- def __init__(self, filename, parser, mime, backup, add2archive):
- self.filename = ''
- self.parser = parser
- self.mime = mime
- self.backup = backup
- self.editor = hachoir_editor.createEditor(parser)
- self.realname = filename
- try:
- self.filename = hachoir_core.cmd_line.unicodeFilename(filename)
- except TypeError: # get rid of "decoding Unicode is not supported"
- self.filename = filename
- basename, ext = os.path.splitext(filename)
- self.output = basename + '.cleaned' + ext
- self.basename = os.path.basename(filename) # only filename
-
- def is_clean(self):
- '''
- Check if the file is clean from harmful metadatas
- '''
- for field in self.editor:
- if self._should_remove(field):
- return self._is_clean(self.editor)
- return True
-
- def _is_clean(self, fieldset):
- for field in fieldset:
- remove = self._should_remove(field)
- if remove is True:
- return False
- if remove is FIELD:
- if not self._is_clean(field):
- return False
- return True
-
- def remove_all(self):
- '''
- Remove all the files that are compromizing
- '''
- state = self._remove_all(self.editor)
- hachoir_core.field.writeIntoFile(self.editor, self.output)
- self.do_backup()
- return state
-
- def _remove_all(self, fieldset):
- try:
- for field in fieldset:
- remove = self._should_remove(field)
- if remove is True:
- self._remove(fieldset, field.name)
- if remove is FIELD:
- self._remove_all(field)
- return True
- except:
- return False
-
- def remove_all_strict(self):
- '''
- If the remove_all() is not efficient enough,
- this method is implemented :
- It is efficient, but destructive.
- In a perfect world, with nice fileformat,
- this method would not exist.
- '''
- self.remove_all()
-
- def _remove(self, fieldset, field):
- '''
- Delete the given field
- '''
- del fieldset[field]
-
- def get_meta(self):
- '''
- Return a dict with all the meta of the file
- '''
- metadata = {}
- self._get_meta(self.editor, metadata)
- return metadata
-
- def _get_meta(self, fieldset, metadata):
- for field in fieldset:
- remove = self._should_remove(field)
- if remove is True:
- try:
- metadata[field.name] = field.value
- except:
- metadata[field.name] = 'harmful content'
- if remove is FIELD:
- self._get_meta(field)
-
- def _should_remove(self, key):
- '''
- return True if the field is compromizing
- abstract method
- '''
- raise NotImplementedError
-
- def do_backup(self):
- '''
- Do a backup of the file if asked,
- and change his creation/access date
- '''
- if self.backup is False:
- mat.secure_remove(self.filename)
- os.rename(self.output, self.filename)
diff --git a/mat/strippers.py b/mat/strippers.py
deleted file mode 100644
index 7d27874..0000000
--- a/mat/strippers.py
+++ /dev/null
@@ -1,48 +0,0 @@
-'''
- Manage which fileformat can be processed
-'''
-
-import images
-import audio
-import office
-import archive
-import misc
-import subprocess
-
-STRIPPERS = {
- 'application/x-tar': archive.TarStripper,
- 'application/x-gzip': archive.GzipStripper,
- 'application/x-bzip2': archive.Bzip2Stripper,
- 'application/zip': archive.ZipStripper,
- 'audio/mpeg': audio.MpegAudioStripper,
- 'application/x-bittorrent': misc.TorrentStripper,
- 'application/opendocument': office.OpenDocumentStripper,
- 'application/officeopenxml': office.OpenXmlStripper,
-}
-
-try: # PDF support
- import poppler
- import cairo
- STRIPPERS['application/x-pdf'] = office.PdfStripper
- STRIPPERS['application/pdf'] = office.PdfStripper
-except ImportError:
- print('Unable to import python-poppler and/or python-cairo: no PDF \
- support')
-
-try: # mutangen-python : audio format support
- import mutagen
- STRIPPERS['audio/x-flac'] = audio.FlacStripper
- STRIPPERS['audio/vorbis'] = audio.OggStripper
-except ImportError:
- print('Unable to import python-mutagen: limited audio format support')
-
-try: # check if exiftool is installed on the system
- subprocess.Popen('exiftool', stdout=open('/dev/null'))
- import exiftool
- STRIPPERS['image/jpeg'] = exiftool.JpegStripper
- STRIPPERS['image/png'] = exiftool.PngStripper
-except: # if exiftool is not installed, use hachoir
- print('Unable to find exiftool: limited images support')
- STRIPPERS['image/jpeg'] = images.JpegStripper
- STRIPPERS['image/png'] = images.PngStripper
-
diff --git a/mat/tarfile/__init__.py b/mat/tarfile/__init__.py
deleted file mode 100644
index 8b13789..0000000
--- a/mat/tarfile/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/mat/tarfile/tarfile.py b/mat/tarfile/tarfile.py
deleted file mode 100644
index a40f9fc..0000000
--- a/mat/tarfile/tarfile.py
+++ /dev/null
@@ -1,2593 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-#-------------------------------------------------------------------
-# tarfile.py
-#-------------------------------------------------------------------
-# Copyright (C) 2002 Lars Gustäbel
-# All rights reserved.
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation
-# files (the "Software"), to deal in the Software without
-# restriction, including without limitation the rights to use,
-# copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following
-# conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-"""Read from and write to tar format archives.
-"""
-
-__version__ = "$Revision$"
-# $Source$
-
-version = "0.9.0"
-__author__ = "Lars Gustäbel (lars@gustaebel.de)"
-__date__ = "$Date$"
-__cvsid__ = "$Id$"
-__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
-
-#---------
-# Imports
-#---------
-import sys
-import os
-import shutil
-import stat
-import errno
-import time
-import struct
-import copy
-import re
-import operator
-
-try:
- import grp, pwd
-except ImportError:
- grp = pwd = None
-
-# from tarfile import *
-__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
-
-#---------------------------------------------------------
-# tar constants
-#---------------------------------------------------------
-NUL = "\0" # the null character
-BLOCKSIZE = 512 # length of processing blocks
-RECORDSIZE = BLOCKSIZE * 20 # length of records
-GNU_MAGIC = "ustar \0" # magic gnu tar string
-POSIX_MAGIC = "ustar\x0000" # magic posix tar string
-
-LENGTH_NAME = 100 # maximum length of a filename
-LENGTH_LINK = 100 # maximum length of a linkname
-LENGTH_PREFIX = 155 # maximum length of the prefix field
-
-REGTYPE = "0" # regular file
-AREGTYPE = "\0" # regular file
-LNKTYPE = "1" # link (inside tarfile)
-SYMTYPE = "2" # symbolic link
-CHRTYPE = "3" # character special device
-BLKTYPE = "4" # block special device
-DIRTYPE = "5" # directory
-FIFOTYPE = "6" # fifo special device
-CONTTYPE = "7" # contiguous file
-
-GNUTYPE_LONGNAME = "L" # GNU tar longname
-GNUTYPE_LONGLINK = "K" # GNU tar longlink
-GNUTYPE_SPARSE = "S" # GNU tar sparse file
-
-XHDTYPE = "x" # POSIX.1-2001 extended header
-XGLTYPE = "g" # POSIX.1-2001 global header
-SOLARIS_XHDTYPE = "X" # Solaris extended header
-
-USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
-GNU_FORMAT = 1 # GNU tar format
-PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
-DEFAULT_FORMAT = GNU_FORMAT
-
-#---------------------------------------------------------
-# tarfile constants
-#---------------------------------------------------------
-# File types that tarfile supports:
-SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
- SYMTYPE, DIRTYPE, FIFOTYPE,
- CONTTYPE, CHRTYPE, BLKTYPE,
- GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
- GNUTYPE_SPARSE)
-
-# File types that will be treated as a regular file.
-REGULAR_TYPES = (REGTYPE, AREGTYPE,
- CONTTYPE, GNUTYPE_SPARSE)
-
-# File types that are part of the GNU tar format.
-GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
- GNUTYPE_SPARSE)
-
-# Fields from a pax header that override a TarInfo attribute.
-PAX_FIELDS = ("path", "linkpath", "size", "mtime",
- "uid", "gid", "uname", "gname")
-
-# Fields in a pax header that are numbers, all other fields
-# are treated as strings.
-PAX_NUMBER_FIELDS = {
- "atime": float,
- "ctime": float,
- "mtime": float,
- "uid": int,
- "gid": int,
- "size": int
-}
-
-#---------------------------------------------------------
-# Bits used in the mode field, values in octal.
-#---------------------------------------------------------
-S_IFLNK = 0120000 # symbolic link
-S_IFREG = 0100000 # regular file
-S_IFBLK = 0060000 # block device
-S_IFDIR = 0040000 # directory
-S_IFCHR = 0020000 # character device
-S_IFIFO = 0010000 # fifo
-
-TSUID = 04000 # set UID on execution
-TSGID = 02000 # set GID on execution
-TSVTX = 01000 # reserved
-
-TUREAD = 0400 # read by owner
-TUWRITE = 0200 # write by owner
-TUEXEC = 0100 # execute/search by owner
-TGREAD = 0040 # read by group
-TGWRITE = 0020 # write by group
-TGEXEC = 0010 # execute/search by group
-TOREAD = 0004 # read by other
-TOWRITE = 0002 # write by other
-TOEXEC = 0001 # execute/search by other
-
-#---------------------------------------------------------
-# initialization
-#---------------------------------------------------------
-ENCODING = sys.getfilesystemencoding()
-if ENCODING is None:
- ENCODING = sys.getdefaultencoding()
-
-#---------------------------------------------------------
-# Some useful functions
-#---------------------------------------------------------
-
-def stn(s, length):
- """Convert a python string to a null-terminated string buffer.
- """
- return s[:length] + (length - len(s)) * NUL
-
-def nts(s):
- """Convert a null-terminated string field to a python string.
- """
- # Use the string up to the first null char.
- p = s.find("\0")
- if p == -1:
- return s
- return s[:p]
-
-def nti(s):
- """Convert a number field to a python number.
- """
- # There are two possible encodings for a number field, see
- # itn() below.
- if s[0] != chr(0200):
- try:
- n = int(nts(s) or "0", 8)
- except ValueError:
- raise InvalidHeaderError("invalid header")
- else:
- n = 0L
- for i in xrange(len(s) - 1):
- n <<= 8
- n += ord(s[i + 1])
- return n
-
-def itn(n, digits=8, format=DEFAULT_FORMAT):
- """Convert a python number to a number field.
- """
- # POSIX 1003.1-1988 requires numbers to be encoded as a string of
- # octal digits followed by a null-byte, this allows values up to
- # (8**(digits-1))-1. GNU tar allows storing numbers greater than
- # that if necessary. A leading 0200 byte indicates this particular
- # encoding, the following digits-1 bytes are a big-endian
- # representation. This allows values up to (256**(digits-1))-1.
- if 0 <= n < 8 ** (digits - 1):
- s = "%0*o" % (digits - 1, n) + NUL
- else:
- if format != GNU_FORMAT or n >= 256 ** (digits - 1):
- raise ValueError("overflow in number field")
-
- if n < 0:
- # XXX We mimic GNU tar's behaviour with negative numbers,
- # this could raise OverflowError.
- n = struct.unpack("L", struct.pack("l", n))[0]
-
- s = ""
- for i in xrange(digits - 1):
- s = chr(n & 0377) + s
- n >>= 8
- s = chr(0200) + s
- return s
-
-def uts(s, encoding, errors):
- """Convert a unicode object to a string.
- """
- if errors == "utf-8":
- # An extra error handler similar to the -o invalid=UTF-8 option
- # in POSIX.1-2001. Replace untranslatable characters with their
- # UTF-8 representation.
- try:
- return s.encode(encoding, "strict")
- except UnicodeEncodeError:
- x = []
- for c in s:
- try:
- x.append(c.encode(encoding, "strict"))
- except UnicodeEncodeError:
- x.append(c.encode("utf8"))
- return "".join(x)
- else:
- return s.encode(encoding, errors)
-
-def calc_chksums(buf):
- """Calculate the checksum for a member's header by summing up all
- characters except for the chksum field which is treated as if
- it was filled with spaces. According to the GNU tar sources,
- some tars (Sun and NeXT) calculate chksum with signed char,
- which will be different if there are chars in the buffer with
- the high bit set. So we calculate two checksums, unsigned and
- signed.
- """
- unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
- signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
- return unsigned_chksum, signed_chksum
-
-def copyfileobj(src, dst, length=None):
- """Copy length bytes from fileobj src to fileobj dst.
- If length is None, copy the entire content.
- """
- if length == 0:
- return
- if length is None:
- shutil.copyfileobj(src, dst)
- return
-
- BUFSIZE = 16 * 1024
- blocks, remainder = divmod(length, BUFSIZE)
- for b in xrange(blocks):
- buf = src.read(BUFSIZE)
- if len(buf) < BUFSIZE:
- raise IOError("end of file reached")
- dst.write(buf)
-
- if remainder != 0:
- buf = src.read(remainder)
- if len(buf) < remainder:
- raise IOError("end of file reached")
- dst.write(buf)
- return
-
-filemode_table = (
- ((S_IFLNK, "l"),
- (S_IFREG, "-"),
- (S_IFBLK, "b"),
- (S_IFDIR, "d"),
- (S_IFCHR, "c"),
- (S_IFIFO, "p")),
-
- ((TUREAD, "r"),),
- ((TUWRITE, "w"),),
- ((TUEXEC|TSUID, "s"),
- (TSUID, "S"),
- (TUEXEC, "x")),
-
- ((TGREAD, "r"),),
- ((TGWRITE, "w"),),
- ((TGEXEC|TSGID, "s"),
- (TSGID, "S"),
- (TGEXEC, "x")),
-
- ((TOREAD, "r"),),
- ((TOWRITE, "w"),),
- ((TOEXEC|TSVTX, "t"),
- (TSVTX, "T"),
- (TOEXEC, "x"))
-)
-
-def filemode(mode):
- """Convert a file's mode to a string of the form
- -rwxrwxrwx.
- Used by TarFile.list()
- """
- perm = []
- for table in filemode_table:
- for bit, char in table:
- if mode & bit == bit:
- perm.append(char)
- break
- else:
- perm.append("-")
- return "".join(perm)
-
-class TarError(Exception):
- """Base exception."""
- pass
-class ExtractError(TarError):
- """General exception for extract errors."""
- pass
-class ReadError(TarError):
- """Exception for unreadble tar archives."""
- pass
-class CompressionError(TarError):
- """Exception for unavailable compression methods."""
- pass
-class StreamError(TarError):
- """Exception for unsupported operations on stream-like TarFiles."""
- pass
-class HeaderError(TarError):
- """Base exception for header errors."""
- pass
-class EmptyHeaderError(HeaderError):
- """Exception for empty headers."""
- pass
-class TruncatedHeaderError(HeaderError):
- """Exception for truncated headers."""
- pass
-class EOFHeaderError(HeaderError):
- """Exception for end of file headers."""
- pass
-class InvalidHeaderError(HeaderError):
- """Exception for invalid headers."""
- pass
-class SubsequentHeaderError(HeaderError):
- """Exception for missing and invalid extended headers."""
- pass
-
-#---------------------------
-# internal stream interface
-#---------------------------
-class _LowLevelFile:
- """Low-level file object. Supports reading and writing.
- It is used instead of a regular file object for streaming
- access.
- """
-
- def __init__(self, name, mode):
- mode = {
- "r": os.O_RDONLY,
- "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
- }[mode]
- if hasattr(os, "O_BINARY"):
- mode |= os.O_BINARY
- self.fd = os.open(name, mode, 0666)
-
- def close(self):
- os.close(self.fd)
-
- def read(self, size):
- return os.read(self.fd, size)
-
- def write(self, s):
- os.write(self.fd, s)
-
-class _Stream:
- """Class that serves as an adapter between TarFile and
- a stream-like object. The stream-like object only
- needs to have a read() or write() method and is accessed
- blockwise. Use of gzip or bzip2 compression is possible.
- A stream-like object could be for example: sys.stdin,
- sys.stdout, a socket, a tape device etc.
-
- _Stream is intended to be used only internally.
- """
-
- def __init__(self, name, mode, comptype, fileobj, bufsize):
- """Construct a _Stream object.
- """
- self._extfileobj = True
- if fileobj is None:
- fileobj = _LowLevelFile(name, mode)
- self._extfileobj = False
-
- if comptype == '*':
- # Enable transparent compression detection for the
- # stream interface
- fileobj = _StreamProxy(fileobj)
- comptype = fileobj.getcomptype()
-
- self.name = name or ""
- self.mode = mode
- self.comptype = comptype
- self.fileobj = fileobj
- self.bufsize = bufsize
- self.buf = ""
- self.pos = 0L
- self.closed = False
-
- if comptype == "gz":
- try:
- import zlib
- except ImportError:
- raise CompressionError("zlib module is not available")
- self.zlib = zlib
- self.crc = zlib.crc32("") & 0xffffffffL
- if mode == "r":
- self._init_read_gz()
- else:
- self._init_write_gz()
-
- if comptype == "bz2":
- try:
- import bz2
- except ImportError:
- raise CompressionError("bz2 module is not available")
- if mode == "r":
- self.dbuf = ""
- self.cmp = bz2.BZ2Decompressor()
- else:
- self.cmp = bz2.BZ2Compressor()
-
- def __del__(self):
- if hasattr(self, "closed") and not self.closed:
- self.close()
-
- def _init_write_gz(self):
- """Initialize for writing with gzip compression.
- """
- self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
- -self.zlib.MAX_WBITS,
- self.zlib.DEF_MEM_LEVEL,
- 0)
- timestamp = struct.pack(" self.bufsize:
- self.fileobj.write(self.buf[:self.bufsize])
- self.buf = self.buf[self.bufsize:]
-
- def close(self):
- """Close the _Stream object. No operation should be
- done on it afterwards.
- """
- if self.closed:
- return
-
- if self.mode == "w" and self.comptype != "tar":
- self.buf += self.cmp.flush()
-
- if self.mode == "w" and self.buf:
- self.fileobj.write(self.buf)
- self.buf = ""
- if self.comptype == "gz":
- # The native zlib crc is an unsigned 32-bit integer, but
- # the Python wrapper implicitly casts that to a signed C
- # long. So, on a 32-bit box self.crc may "look negative",
- # while the same crc on a 64-bit box may "look positive".
- # To avoid irksome warnings from the `struct` module, force
- # it to look positive on all boxes.
- self.fileobj.write(struct.pack("= 0:
- blocks, remainder = divmod(pos - self.pos, self.bufsize)
- for i in xrange(blocks):
- self.read(self.bufsize)
- self.read(remainder)
- else:
- raise StreamError("seeking backwards is not allowed")
- return self.pos
-
- def read(self, size=None):
- """Return the next size number of bytes from the stream.
- If size is not defined, return all bytes of the stream
- up to EOF.
- """
- if size is None:
- t = []
- while True:
- buf = self._read(self.bufsize)
- if not buf:
- break
- t.append(buf)
- buf = "".join(t)
- else:
- buf = self._read(size)
- self.pos += len(buf)
- return buf
-
- def _read(self, size):
- """Return size bytes from the stream.
- """
- if self.comptype == "tar":
- return self.__read(size)
-
- c = len(self.dbuf)
- t = [self.dbuf]
- while c < size:
- buf = self.__read(self.bufsize)
- if not buf:
- break
- try:
- buf = self.cmp.decompress(buf)
- except IOError:
- raise ReadError("invalid compressed data")
- t.append(buf)
- c += len(buf)
- t = "".join(t)
- self.dbuf = t[size:]
- return t[:size]
-
- def __read(self, size):
- """Return size bytes from stream. If internal buffer is empty,
- read another block from the stream.
- """
- c = len(self.buf)
- t = [self.buf]
- while c < size:
- buf = self.fileobj.read(self.bufsize)
- if not buf:
- break
- t.append(buf)
- c += len(buf)
- t = "".join(t)
- self.buf = t[size:]
- return t[:size]
-# class _Stream
-
-class _StreamProxy(object):
- """Small proxy class that enables transparent compression
- detection for the Stream interface (mode 'r|*').
- """
-
- def __init__(self, fileobj):
- self.fileobj = fileobj
- self.buf = self.fileobj.read(BLOCKSIZE)
-
- def read(self, size):
- self.read = self.fileobj.read
- return self.buf
-
- def getcomptype(self):
- if self.buf.startswith("\037\213\010"):
- return "gz"
- if self.buf.startswith("BZh91"):
- return "bz2"
- return "tar"
-
- def close(self):
- self.fileobj.close()
-# class StreamProxy
-
-class _BZ2Proxy(object):
- """Small proxy class that enables external file object
- support for "r:bz2" and "w:bz2" modes. This is actually
- a workaround for a limitation in bz2 module's BZ2File
- class which (unlike gzip.GzipFile) has no support for
- a file object argument.
- """
-
- blocksize = 16 * 1024
-
- def __init__(self, fileobj, mode):
- self.fileobj = fileobj
- self.mode = mode
- self.name = getattr(self.fileobj, "name", None)
- self.init()
-
- def init(self):
- import bz2
- self.pos = 0
- if self.mode == "r":
- self.bz2obj = bz2.BZ2Decompressor()
- self.fileobj.seek(0)
- self.buf = ""
- else:
- self.bz2obj = bz2.BZ2Compressor()
-
- def read(self, size):
- b = [self.buf]
- x = len(self.buf)
- while x < size:
- raw = self.fileobj.read(self.blocksize)
- if not raw:
- break
- data = self.bz2obj.decompress(raw)
- b.append(data)
- x += len(data)
- self.buf = "".join(b)
-
- buf = self.buf[:size]
- self.buf = self.buf[size:]
- self.pos += len(buf)
- return buf
-
- def seek(self, pos):
- if pos < self.pos:
- self.init()
- self.read(pos - self.pos)
-
- def tell(self):
- return self.pos
-
- def write(self, data):
- self.pos += len(data)
- raw = self.bz2obj.compress(data)
- self.fileobj.write(raw)
-
- def close(self):
- if self.mode == "w":
- raw = self.bz2obj.flush()
- self.fileobj.write(raw)
-# class _BZ2Proxy
-
-#------------------------
-# Extraction file object
-#------------------------
-class _FileInFile(object):
- """A thin wrapper around an existing file object that
- provides a part of its data as an individual file
- object.
- """
-
- def __init__(self, fileobj, offset, size, sparse=None):
- self.fileobj = fileobj
- self.offset = offset
- self.size = size
- self.sparse = sparse
- self.position = 0
-
- def tell(self):
- """Return the current file position.
- """
- return self.position
-
- def seek(self, position):
- """Seek to a position in the file.
- """
- self.position = position
-
- def read(self, size=None):
- """Read data from the file.
- """
- if size is None:
- size = self.size - self.position
- else:
- size = min(size, self.size - self.position)
-
- if self.sparse is None:
- return self.readnormal(size)
- else:
- return self.readsparse(size)
-
- def readnormal(self, size):
- """Read operation for regular files.
- """
- self.fileobj.seek(self.offset + self.position)
- self.position += size
- return self.fileobj.read(size)
-
- def readsparse(self, size):
- """Read operation for sparse files.
- """
- data = []
- while size > 0:
- buf = self.readsparsesection(size)
- if not buf:
- break
- size -= len(buf)
- data.append(buf)
- return "".join(data)
-
- def readsparsesection(self, size):
- """Read a single section of a sparse file.
- """
- section = self.sparse.find(self.position)
-
- if section is None:
- return ""
-
- size = min(size, section.offset + section.size - self.position)
-
- if isinstance(section, _data):
- realpos = section.realpos + self.position - section.offset
- self.fileobj.seek(self.offset + realpos)
- self.position += size
- return self.fileobj.read(size)
- else:
- self.position += size
- return NUL * size
-#class _FileInFile
-
-
-class ExFileObject(object):
- """File-like object for reading an archive member.
- Is returned by TarFile.extractfile().
- """
- blocksize = 1024
-
- def __init__(self, tarfile, tarinfo):
- self.fileobj = _FileInFile(tarfile.fileobj,
- tarinfo.offset_data,
- tarinfo.size,
- getattr(tarinfo, "sparse", None))
- self.name = tarinfo.name
- self.mode = "r"
- self.closed = False
- self.size = tarinfo.size
-
- self.position = 0
- self.buffer = ""
-
- def read(self, size=None):
- """Read at most size bytes from the file. If size is not
- present or None, read all data until EOF is reached.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- buf = ""
- if self.buffer:
- if size is None:
- buf = self.buffer
- self.buffer = ""
- else:
- buf = self.buffer[:size]
- self.buffer = self.buffer[size:]
-
- if size is None:
- buf += self.fileobj.read()
- else:
- buf += self.fileobj.read(size - len(buf))
-
- self.position += len(buf)
- return buf
-
- def readline(self, size=-1):
- """Read one entire line from the file. If size is present
- and non-negative, return a string with at most that
- size, which may be an incomplete line.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- if "\n" in self.buffer:
- pos = self.buffer.find("\n") + 1
- else:
- buffers = [self.buffer]
- while True:
- buf = self.fileobj.read(self.blocksize)
- buffers.append(buf)
- if not buf or "\n" in buf:
- self.buffer = "".join(buffers)
- pos = self.buffer.find("\n") + 1
- if pos == 0:
- # no newline found.
- pos = len(self.buffer)
- break
-
- if size != -1:
- pos = min(size, pos)
-
- buf = self.buffer[:pos]
- self.buffer = self.buffer[pos:]
- self.position += len(buf)
- return buf
-
- def readlines(self):
- """Return a list with all remaining lines.
- """
- result = []
- while True:
- line = self.readline()
- if not line: break
- result.append(line)
- return result
-
- def tell(self):
- """Return the current file position.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- return self.position
-
- def seek(self, pos, whence=os.SEEK_SET):
- """Seek to a position in the file.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- if whence == os.SEEK_SET:
- self.position = min(max(pos, 0), self.size)
- elif whence == os.SEEK_CUR:
- if pos < 0:
- self.position = max(self.position + pos, 0)
- else:
- self.position = min(self.position + pos, self.size)
- elif whence == os.SEEK_END:
- self.position = max(min(self.size + pos, self.size), 0)
- else:
- raise ValueError("Invalid argument")
-
- self.buffer = ""
- self.fileobj.seek(self.position)
-
- def close(self):
- """Close the file object.
- """
- self.closed = True
-
- def __iter__(self):
- """Get an iterator over the file's lines.
- """
- while True:
- line = self.readline()
- if not line:
- break
- yield line
-#class ExFileObject
-
-#------------------
-# Exported Classes
-#------------------
-class TarInfo(object):
- """Informational class which holds the details about an
- archive member given by a tar header block.
- TarInfo objects are returned by TarFile.getmember(),
- TarFile.getmembers() and TarFile.gettarinfo() and are
- usually created internally.
- """
-
- def __init__(self, name=""):
- """Construct a TarInfo object. name is the optional name
- of the member.
- """
- self.name = name # member name
- self.mode = 0644 # file permissions
- self.uid = 0 # user id
- self.gid = 0 # group id
- self.size = 0 # file size
- self.mtime = 0 # modification time
- self.chksum = 0 # header checksum
- self.type = REGTYPE # member type
- self.linkname = "" # link name
- self.uname = "" # user name
- self.gname = "" # group name
- self.devmajor = 0 # device major number
- self.devminor = 0 # device minor number
-
- self.offset = 0 # the tar header starts here
- self.offset_data = 0 # the file's data starts here
-
- self.pax_headers = {} # pax header information
-
- # In pax headers the "name" and "linkname" field are called
- # "path" and "linkpath".
- def _getpath(self):
- return self.name
- def _setpath(self, name):
- self.name = name
- path = property(_getpath, _setpath)
-
- def _getlinkpath(self):
- return self.linkname
- def _setlinkpath(self, linkname):
- self.linkname = linkname
- linkpath = property(_getlinkpath, _setlinkpath)
-
- def __repr__(self):
- return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
-
- def get_info(self, encoding, errors):
- """Return the TarInfo's attributes as a dictionary.
- """
- info = {
- "name": self.name,
- "mode": self.mode & 07777,
- "uid": self.uid,
- "gid": self.gid,
- "size": self.size,
- "mtime": self.mtime,
- "chksum": self.chksum,
- "type": self.type,
- "linkname": self.linkname,
- "uname": self.uname,
- "gname": self.gname,
- "devmajor": self.devmajor,
- "devminor": self.devminor
- }
-
- if info["type"] == DIRTYPE and not info["name"].endswith("/"):
- info["name"] += "/"
-
- for key in ("name", "linkname", "uname", "gname"):
- if type(info[key]) is unicode:
- info[key] = info[key].encode(encoding, errors)
-
- return info
-
- def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"):
- """Return a tar header as a string of 512 byte blocks.
- """
- info = self.get_info(encoding, errors)
-
- if format == USTAR_FORMAT:
- return self.create_ustar_header(info)
- elif format == GNU_FORMAT:
- return self.create_gnu_header(info)
- elif format == PAX_FORMAT:
- return self.create_pax_header(info, encoding, errors)
- else:
- raise ValueError("invalid format")
-
- def create_ustar_header(self, info):
- """Return the object as a ustar header block.
- """
- info["magic"] = POSIX_MAGIC
-
- if len(info["linkname"]) > LENGTH_LINK:
- raise ValueError("linkname is too long")
-
- if len(info["name"]) > LENGTH_NAME:
- info["prefix"], info["name"] = self._posix_split_name(info["name"])
-
- return self._create_header(info, USTAR_FORMAT)
-
- def create_gnu_header(self, info):
- """Return the object as a GNU header block sequence.
- """
- info["magic"] = GNU_MAGIC
-
- buf = ""
- if len(info["linkname"]) > LENGTH_LINK:
- buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK)
-
- if len(info["name"]) > LENGTH_NAME:
- buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME)
-
- return buf + self._create_header(info, GNU_FORMAT)
-
- def create_pax_header(self, info, encoding, errors):
- """Return the object as a ustar header block. If it cannot be
- represented this way, prepend a pax extended header sequence
- with supplement information.
- """
- info["magic"] = POSIX_MAGIC
- pax_headers = self.pax_headers.copy()
-
- # Test string fields for values that exceed the field length or cannot
- # be represented in ASCII encoding.
- for name, hname, length in (
- ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
- ("uname", "uname", 32), ("gname", "gname", 32)):
-
- if hname in pax_headers:
- # The pax header has priority.
- continue
-
- val = info[name].decode(encoding, errors)
-
- # Try to encode the string as ASCII.
- try:
- val.encode("ascii")
- except UnicodeEncodeError:
- pax_headers[hname] = val
- continue
-
- if len(info[name]) > length:
- pax_headers[hname] = val
-
- # Test number fields for values that exceed the field limit or values
- # that like to be stored as float.
- for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
- if name in pax_headers:
- # The pax header has priority. Avoid overflow.
- info[name] = 0
- continue
-
- val = info[name]
- if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
- pax_headers[name] = unicode(val)
- info[name] = 0
-
- # Create a pax extended header if necessary.
- if pax_headers:
- buf = self._create_pax_generic_header(pax_headers)
- else:
- buf = ""
-
- return buf + self._create_header(info, USTAR_FORMAT)
-
- @classmethod
- def create_pax_global_header(cls, pax_headers):
- """Return the object as a pax global header block sequence.
- """
- return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
-
- def _posix_split_name(self, name):
- """Split a name longer than 100 chars into a prefix
- and a name part.
- """
- prefix = name[:LENGTH_PREFIX + 1]
- while prefix and prefix[-1] != "/":
- prefix = prefix[:-1]
-
- name = name[len(prefix):]
- prefix = prefix[:-1]
-
- if not prefix or len(name) > LENGTH_NAME:
- raise ValueError("name is too long")
- return prefix, name
-
- @staticmethod
- def _create_header(info, format):
- """Return a header block. info is a dictionary with file
- information, format must be one of the *_FORMAT constants.
- """
- parts = [
- stn(info.get("name", ""), 100),
- itn(info.get("mode", 0) & 07777, 8, format),
- itn(info.get("uid", 0), 8, format),
- itn(info.get("gid", 0), 8, format),
- itn(info.get("size", 0), 12, format),
- itn(info.get("mtime", 0), 12, format),
- " ", # checksum field
- info.get("type", REGTYPE),
- stn(info.get("linkname", ""), 100),
- stn(info.get("magic", POSIX_MAGIC), 8),
- stn(info.get("uname", ""), 32),
- stn(info.get("gname", ""), 32),
- itn(info.get("devmajor", 0), 8, format),
- itn(info.get("devminor", 0), 8, format),
- stn(info.get("prefix", ""), 155)
- ]
-
- buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
- chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
- buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
- return buf
-
- @staticmethod
- def _create_payload(payload):
- """Return the string payload filled with zero bytes
- up to the next 512 byte border.
- """
- blocks, remainder = divmod(len(payload), BLOCKSIZE)
- if remainder > 0:
- payload += (BLOCKSIZE - remainder) * NUL
- return payload
-
- @classmethod
- def _create_gnu_long_header(cls, name, type):
- """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
- for name.
- """
- name += NUL
-
- info = {}
- info["name"] = "././@LongLink"
- info["type"] = type
- info["size"] = len(name)
- info["magic"] = GNU_MAGIC
-
- # create extended header + name blocks.
- return cls._create_header(info, USTAR_FORMAT) + \
- cls._create_payload(name)
-
- @classmethod
- def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
- """Return a POSIX.1-2001 extended or global header sequence
- that contains a list of keyword, value pairs. The values
- must be unicode objects.
- """
- records = []
- for keyword, value in pax_headers.iteritems():
- keyword = keyword.encode("utf8")
- value = value.encode("utf8")
- l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
- n = p = 0
- while True:
- n = l + len(str(p))
- if n == p:
- break
- p = n
- records.append("%d %s=%s\n" % (p, keyword, value))
- records = "".join(records)
-
- # We use a hardcoded "././@PaxHeader" name like star does
- # instead of the one that POSIX recommends.
- info = {}
- info["name"] = "././@PaxHeader"
- info["type"] = type
- info["size"] = len(records)
- info["magic"] = POSIX_MAGIC
-
- # Create pax header + record blocks.
- return cls._create_header(info, USTAR_FORMAT) + \
- cls._create_payload(records)
-
- @classmethod
- def frombuf(cls, buf):
- """Construct a TarInfo object from a 512 byte string buffer.
- """
- if len(buf) == 0:
- raise EmptyHeaderError("empty header")
- if len(buf) != BLOCKSIZE:
- raise TruncatedHeaderError("truncated header")
- if buf.count(NUL) == BLOCKSIZE:
- raise EOFHeaderError("end of file header")
-
- chksum = nti(buf[148:156])
- if chksum not in calc_chksums(buf):
- raise InvalidHeaderError("bad checksum")
-
- obj = cls()
- obj.buf = buf
- obj.name = nts(buf[0:100])
- obj.mode = nti(buf[100:108])
- obj.uid = nti(buf[108:116])
- obj.gid = nti(buf[116:124])
- obj.size = nti(buf[124:136])
- obj.mtime = nti(buf[136:148])
- obj.chksum = chksum
- obj.type = buf[156:157]
- obj.linkname = nts(buf[157:257])
- obj.uname = nts(buf[265:297])
- obj.gname = nts(buf[297:329])
- obj.devmajor = nti(buf[329:337])
- obj.devminor = nti(buf[337:345])
- prefix = nts(buf[345:500])
-
- # Old V7 tar format represents a directory as a regular
- # file with a trailing slash.
- if obj.type == AREGTYPE and obj.name.endswith("/"):
- obj.type = DIRTYPE
-
- # Remove redundant slashes from directories.
- if obj.isdir():
- obj.name = obj.name.rstrip("/")
-
- # Reconstruct a ustar longname.
- if prefix and obj.type not in GNU_TYPES:
- obj.name = prefix + "/" + obj.name
- return obj
-
- @classmethod
- def fromtarfile(cls, tarfile):
- """Return the next TarInfo object from TarFile object
- tarfile.
- """
- buf = tarfile.fileobj.read(BLOCKSIZE)
- obj = cls.frombuf(buf)
- obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
- return obj._proc_member(tarfile)
-
- #--------------------------------------------------------------------------
- # The following are methods that are called depending on the type of a
- # member. The entry point is _proc_member() which can be overridden in a
- # subclass to add custom _proc_*() methods. A _proc_*() method MUST
- # implement the following
- # operations:
- # 1. Set self.offset_data to the position where the data blocks begin,
- # if there is data that follows.
- # 2. Set tarfile.offset to the position where the next member's header will
- # begin.
- # 3. Return self or another valid TarInfo object.
- def _proc_member(self, tarfile):
- """Choose the right processing method depending on
- the type and call it.
- """
- if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
- return self._proc_gnulong(tarfile)
- elif self.type == GNUTYPE_SPARSE:
- return self._proc_sparse(tarfile)
- elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
- return self._proc_pax(tarfile)
- else:
- return self._proc_builtin(tarfile)
-
- def _proc_builtin(self, tarfile):
- """Process a builtin type or an unknown type which
- will be treated as a regular file.
- """
- self.offset_data = tarfile.fileobj.tell()
- offset = self.offset_data
- if self.isreg() or self.type not in SUPPORTED_TYPES:
- # Skip the following data blocks.
- offset += self._block(self.size)
- tarfile.offset = offset
-
- # Patch the TarInfo object with saved global
- # header information.
- self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
-
- return self
-
- def _proc_gnulong(self, tarfile):
- """Process the blocks that hold a GNU longname
- or longlink member.
- """
- buf = tarfile.fileobj.read(self._block(self.size))
-
- # Fetch the next header and process it.
- try:
- next = self.fromtarfile(tarfile)
- except HeaderError:
- raise SubsequentHeaderError("missing or bad subsequent header")
-
- # Patch the TarInfo object from the next header with
- # the longname information.
- next.offset = self.offset
- if self.type == GNUTYPE_LONGNAME:
- next.name = nts(buf)
- elif self.type == GNUTYPE_LONGLINK:
- next.linkname = nts(buf)
-
- return next
-
- def _proc_sparse(self, tarfile):
- """Process a GNU sparse header plus extra headers.
- """
- buf = self.buf
- sp = _ringbuffer()
- pos = 386
- lastpos = 0L
- realpos = 0L
- # There are 4 possible sparse structs in the
- # first header.
- for i in xrange(4):
- try:
- offset = nti(buf[pos:pos + 12])
- numbytes = nti(buf[pos + 12:pos + 24])
- except ValueError:
- break
- if offset > lastpos:
- sp.append(_hole(lastpos, offset - lastpos))
- sp.append(_data(offset, numbytes, realpos))
- realpos += numbytes
- lastpos = offset + numbytes
- pos += 24
-
- isextended = ord(buf[482])
- origsize = nti(buf[483:495])
-
- # If the isextended flag is given,
- # there are extra headers to process.
- while isextended == 1:
- buf = tarfile.fileobj.read(BLOCKSIZE)
- pos = 0
- for i in xrange(21):
- try:
- offset = nti(buf[pos:pos + 12])
- numbytes = nti(buf[pos + 12:pos + 24])
- except ValueError:
- break
- if offset > lastpos:
- sp.append(_hole(lastpos, offset - lastpos))
- sp.append(_data(offset, numbytes, realpos))
- realpos += numbytes
- lastpos = offset + numbytes
- pos += 24
- isextended = ord(buf[504])
-
- if lastpos < origsize:
- sp.append(_hole(lastpos, origsize - lastpos))
-
- self.sparse = sp
-
- self.offset_data = tarfile.fileobj.tell()
- tarfile.offset = self.offset_data + self._block(self.size)
- self.size = origsize
-
- return self
-
- def _proc_pax(self, tarfile):
- """Process an extended or global header as described in
- POSIX.1-2001.
- """
- # Read the header information.
- buf = tarfile.fileobj.read(self._block(self.size))
-
- # A pax header stores supplemental information for either
- # the following file (extended) or all following files
- # (global).
- if self.type == XGLTYPE:
- pax_headers = tarfile.pax_headers
- else:
- pax_headers = tarfile.pax_headers.copy()
-
- # Parse pax header information. A record looks like that:
- # "%d %s=%s\n" % (length, keyword, value). length is the size
- # of the complete record including the length field itself and
- # the newline. keyword and value are both UTF-8 encoded strings.
- regex = re.compile(r"(\d+) ([^=]+)=", re.U)
- pos = 0
- while True:
- match = regex.match(buf, pos)
- if not match:
- break
-
- length, keyword = match.groups()
- length = int(length)
- value = buf[match.end(2) + 1:match.start(1) + length - 1]
-
- keyword = keyword.decode("utf8")
- value = value.decode("utf8")
-
- pax_headers[keyword] = value
- pos += length
-
- # Fetch the next header.
- try:
- next = self.fromtarfile(tarfile)
- except HeaderError:
- raise SubsequentHeaderError("missing or bad subsequent header")
-
- if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
- # Patch the TarInfo object with the extended header info.
- next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
- next.offset = self.offset
-
- if "size" in pax_headers:
- # If the extended header replaces the size field,
- # we need to recalculate the offset where the next
- # header starts.
- offset = next.offset_data
- if next.isreg() or next.type not in SUPPORTED_TYPES:
- offset += next._block(next.size)
- tarfile.offset = offset
-
- return next
-
- def _apply_pax_info(self, pax_headers, encoding, errors):
- """Replace fields with supplemental information from a previous
- pax extended or global header.
- """
- for keyword, value in pax_headers.iteritems():
- if keyword not in PAX_FIELDS:
- continue
-
- if keyword == "path":
- value = value.rstrip("/")
-
- if keyword in PAX_NUMBER_FIELDS:
- try:
- value = PAX_NUMBER_FIELDS[keyword](value)
- except ValueError:
- value = 0
- else:
- value = uts(value, encoding, errors)
-
- setattr(self, keyword, value)
-
- self.pax_headers = pax_headers.copy()
-
- def _block(self, count):
- """Round up a byte count by BLOCKSIZE and return it,
- e.g. _block(834) => 1024.
- """
- blocks, remainder = divmod(count, BLOCKSIZE)
- if remainder:
- blocks += 1
- return blocks * BLOCKSIZE
-
- def isreg(self):
- return self.type in REGULAR_TYPES
- def isfile(self):
- return self.isreg()
- def isdir(self):
- return self.type == DIRTYPE
- def issym(self):
- return self.type == SYMTYPE
- def islnk(self):
- return self.type == LNKTYPE
- def ischr(self):
- return self.type == CHRTYPE
- def isblk(self):
- return self.type == BLKTYPE
- def isfifo(self):
- return self.type == FIFOTYPE
- def issparse(self):
- return self.type == GNUTYPE_SPARSE
- def isdev(self):
- return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
-# class TarInfo
-
-class TarFile(object):
- """The TarFile Class provides an interface to tar archives.
- """
-
- debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
-
- dereference = False # If true, add content of linked file to the
- # tar file, else the link.
-
- ignore_zeros = False # If true, skips empty or invalid blocks and
- # continues processing.
-
- errorlevel = 1 # If 0, fatal errors only appear in debug
- # messages (if debug >= 0). If > 0, errors
- # are passed to the caller as exceptions.
-
- format = DEFAULT_FORMAT # The format to use when creating an archive.
-
- encoding = ENCODING # Encoding for 8-bit character strings.
-
- errors = None # Error handler for unicode conversion.
-
- tarinfo = TarInfo # The default TarInfo class to use.
-
- fileobject = ExFileObject # The default ExFileObject class to use.
-
- def __init__(self, name=None, mode="r", fileobj=None, format=None,
- tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
- errors=None, pax_headers=None, debug=None, errorlevel=None):
- """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
- read from an existing archive, 'a' to append data to an existing
- file or 'w' to create a new file overwriting an existing one. `mode'
- defaults to 'r'.
- If `fileobj' is given, it is used for reading or writing data. If it
- can be determined, `mode' is overridden by `fileobj's mode.
- `fileobj' is not closed, when TarFile is closed.
- """
- if len(mode) > 1 or mode not in "raw":
- raise ValueError("mode must be 'r', 'a' or 'w'")
- self.mode = mode
- self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
-
- if not fileobj:
- if self.mode == "a" and not os.path.exists(name):
- # Create nonexistent files in append mode.
- self.mode = "w"
- self._mode = "wb"
- fileobj = bltn_open(name, self._mode)
- self._extfileobj = False
- else:
- if name is None and hasattr(fileobj, "name"):
- name = fileobj.name
- if hasattr(fileobj, "mode"):
- self._mode = fileobj.mode
- self._extfileobj = True
- self.name = os.path.abspath(name) if name else None
- self.fileobj = fileobj
-
- # Init attributes.
- if format is not None:
- self.format = format
- if tarinfo is not None:
- self.tarinfo = tarinfo
- if dereference is not None:
- self.dereference = dereference
- if ignore_zeros is not None:
- self.ignore_zeros = ignore_zeros
- if encoding is not None:
- self.encoding = encoding
-
- if errors is not None:
- self.errors = errors
- elif mode == "r":
- self.errors = "utf-8"
- else:
- self.errors = "strict"
-
- if pax_headers is not None and self.format == PAX_FORMAT:
- self.pax_headers = pax_headers
- else:
- self.pax_headers = {}
-
- if debug is not None:
- self.debug = debug
- if errorlevel is not None:
- self.errorlevel = errorlevel
-
- # Init datastructures.
- self.closed = False
- self.members = [] # list of members as TarInfo objects
- self._loaded = False # flag if all members have been read
- self.offset = self.fileobj.tell()
- # current position in the archive file
- self.inodes = {} # dictionary caching the inodes of
- # archive members already added
-
- try:
- if self.mode == "r":
- self.firstmember = None
- self.firstmember = self.next()
-
- if self.mode == "a":
- # Move to the end of the archive,
- # before the first empty block.
- while True:
- self.fileobj.seek(self.offset)
- try:
- tarinfo = self.tarinfo.fromtarfile(self)
- self.members.append(tarinfo)
- except EOFHeaderError:
- self.fileobj.seek(self.offset)
- break
- except HeaderError, e:
- raise ReadError(str(e))
-
- if self.mode in "aw":
- self._loaded = True
-
- if self.pax_headers:
- buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
- self.fileobj.write(buf)
- self.offset += len(buf)
- except:
- if not self._extfileobj:
- self.fileobj.close()
- self.closed = True
- raise
-
- def _getposix(self):
- return self.format == USTAR_FORMAT
- def _setposix(self, value):
- import warnings
- warnings.warn("use the format attribute instead", DeprecationWarning,
- 2)
- if value:
- self.format = USTAR_FORMAT
- else:
- self.format = GNU_FORMAT
- posix = property(_getposix, _setposix)
-
- #--------------------------------------------------------------------------
- # Below are the classmethods which act as alternate constructors to the
- # TarFile class. The open() method is the only one that is needed for
- # public use; it is the "super"-constructor and is able to select an
- # adequate "sub"-constructor for a particular compression using the mapping
- # from OPEN_METH.
- #
- # This concept allows one to subclass TarFile without losing the comfort of
- # the super-constructor. A sub-constructor is registered and made available
- # by adding it to the mapping in OPEN_METH.
-
- @classmethod
- def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
- """Open a tar archive for reading, writing or appending. Return
- an appropriate TarFile class.
-
- mode:
- 'r' or 'r:*' open for reading with transparent compression
- 'r:' open for reading exclusively uncompressed
- 'r:gz' open for reading with gzip compression
- 'r:bz2' open for reading with bzip2 compression
- 'a' or 'a:' open for appending, creating the file if necessary
- 'w' or 'w:' open for writing without compression
- 'w:gz' open for writing with gzip compression
- 'w:bz2' open for writing with bzip2 compression
-
- 'r|*' open a stream of tar blocks with transparent compression
- 'r|' open an uncompressed stream of tar blocks for reading
- 'r|gz' open a gzip compressed stream of tar blocks
- 'r|bz2' open a bzip2 compressed stream of tar blocks
- 'w|' open an uncompressed stream for writing
- 'w|gz' open a gzip compressed stream for writing
- 'w|bz2' open a bzip2 compressed stream for writing
- """
-
- if not name and not fileobj:
- raise ValueError("nothing to open")
-
- if mode in ("r", "r:*"):
- # Find out which *open() is appropriate for opening the file.
- for comptype in cls.OPEN_METH:
- func = getattr(cls, cls.OPEN_METH[comptype])
- if fileobj is not None:
- saved_pos = fileobj.tell()
- try:
- return func(name, "r", fileobj, **kwargs)
- except (ReadError, CompressionError), e:
- if fileobj is not None:
- fileobj.seek(saved_pos)
- continue
- raise ReadError("file could not be opened successfully")
-
- elif ":" in mode:
- filemode, comptype = mode.split(":", 1)
- filemode = filemode or "r"
- comptype = comptype or "tar"
-
- # Select the *open() function according to
- # given compression.
- if comptype in cls.OPEN_METH:
- func = getattr(cls, cls.OPEN_METH[comptype])
- else:
- raise CompressionError("unknown compression type %r" % comptype)
- return func(name, filemode, fileobj, **kwargs)
-
- elif "|" in mode:
- filemode, comptype = mode.split("|", 1)
- filemode = filemode or "r"
- comptype = comptype or "tar"
-
- if filemode not in "rw":
- raise ValueError("mode must be 'r' or 'w'")
-
- t = cls(name, filemode,
- _Stream(name, filemode, comptype, fileobj, bufsize),
- **kwargs)
- t._extfileobj = False
- return t
-
- elif mode in "aw":
- return cls.taropen(name, mode, fileobj, **kwargs)
-
- raise ValueError("undiscernible mode")
-
- @classmethod
- def taropen(cls, name, mode="r", fileobj=None, **kwargs):
- """Open uncompressed tar archive name for reading or writing.
- """
- if len(mode) > 1 or mode not in "raw":
- raise ValueError("mode must be 'r', 'a' or 'w'")
- return cls(name, mode, fileobj, **kwargs)
-
- @classmethod
- def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
- """Open gzip compressed tar archive name for reading or writing.
- Appending is not allowed.
- """
- if len(mode) > 1 or mode not in "rw":
- raise ValueError("mode must be 'r' or 'w'")
-
- try:
- import gzip
- gzip.GzipFile
- except (ImportError, AttributeError):
- raise CompressionError("gzip module is not available")
-
- if fileobj is None:
- fileobj = bltn_open(name, mode + "b")
-
- try:
- t = cls.taropen(name, mode,
- gzip.GzipFile(name, mode, compresslevel, fileobj),
- **kwargs)
- except IOError:
- raise ReadError("not a gzip file")
- t._extfileobj = False
- return t
-
- @classmethod
- def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
- """Open bzip2 compressed tar archive name for reading or writing.
- Appending is not allowed.
- """
- if len(mode) > 1 or mode not in "rw":
- raise ValueError("mode must be 'r' or 'w'.")
-
- try:
- import bz2
- except ImportError:
- raise CompressionError("bz2 module is not available")
-
- if fileobj is not None:
- fileobj = _BZ2Proxy(fileobj, mode)
- else:
- fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
-
- try:
- t = cls.taropen(name, mode, fileobj, **kwargs)
- except (IOError, EOFError):
- raise ReadError("not a bzip2 file")
- t._extfileobj = False
- return t
-
- # All *open() methods are registered here.
- OPEN_METH = {
- "tar": "taropen", # uncompressed tar
- "gz": "gzopen", # gzip compressed tar
- "bz2": "bz2open" # bzip2 compressed tar
- }
-
- #--------------------------------------------------------------------------
- # The public methods which TarFile provides:
-
- def close(self):
- """Close the TarFile. In write-mode, two finishing zero blocks are
- appended to the archive.
- """
- if self.closed:
- return
-
- if self.mode in "aw":
- self.fileobj.write(NUL * (BLOCKSIZE * 2))
- self.offset += (BLOCKSIZE * 2)
- # fill up the end with zero-blocks
- # (like option -b20 for tar does)
- blocks, remainder = divmod(self.offset, RECORDSIZE)
- if remainder > 0:
- self.fileobj.write(NUL * (RECORDSIZE - remainder))
-
- if not self._extfileobj:
- self.fileobj.close()
- self.closed = True
-
- def getmember(self, name):
- """Return a TarInfo object for member `name'. If `name' can not be
- found in the archive, KeyError is raised. If a member occurs more
- than once in the archive, its last occurrence is assumed to be the
- most up-to-date version.
- """
- tarinfo = self._getmember(name)
- if tarinfo is None:
- raise KeyError("filename %r not found" % name)
- return tarinfo
-
- def getmembers(self):
- """Return the members of the archive as a list of TarInfo objects. The
- list has the same order as the members in the archive.
- """
- self._check()
- if not self._loaded: # if we want to obtain a list of
- self._load() # all members, we first have to
- # scan the whole archive.
- return self.members
-
- def getnames(self):
- """Return the members of the archive as a list of their names. It has
- the same order as the list returned by getmembers().
- """
- return [tarinfo.name for tarinfo in self.getmembers()]
-
- def gettarinfo(self, name=None, arcname=None, fileobj=None):
- """Create a TarInfo object for either the file `name' or the file
- object `fileobj' (using os.fstat on its file descriptor). You can
- modify some of the TarInfo's attributes before you add it using
- addfile(). If given, `arcname' specifies an alternative name for the
- file in the archive.
- """
- self._check("aw")
-
- # When fileobj is given, replace name by
- # fileobj's real name.
- if fileobj is not None:
- name = fileobj.name
-
- # Building the name of the member in the archive.
- # Backward slashes are converted to forward slashes,
- # Absolute paths are turned to relative paths.
- if arcname is None:
- arcname = name
- drv, arcname = os.path.splitdrive(arcname)
- arcname = arcname.replace(os.sep, "/")
- arcname = arcname.lstrip("/")
-
- # Now, fill the TarInfo object with
- # information specific for the file.
- tarinfo = self.tarinfo()
- tarinfo.tarfile = self
-
- # Use os.stat or os.lstat, depending on platform
- # and if symlinks shall be resolved.
- if fileobj is None:
- if hasattr(os, "lstat") and not self.dereference:
- statres = os.lstat(name)
- else:
- statres = os.stat(name)
- else:
- statres = os.fstat(fileobj.fileno())
- linkname = ""
-
- stmd = statres.st_mode
- if stat.S_ISREG(stmd):
- inode = (statres.st_ino, statres.st_dev)
- if not self.dereference and statres.st_nlink > 1 and \
- inode in self.inodes and arcname != self.inodes[inode]:
- # Is it a hardlink to an already
- # archived file?
- type = LNKTYPE
- linkname = self.inodes[inode]
- else:
- # The inode is added only if its valid.
- # For win32 it is always 0.
- type = REGTYPE
- if inode[0]:
- self.inodes[inode] = arcname
- elif stat.S_ISDIR(stmd):
- type = DIRTYPE
- elif stat.S_ISFIFO(stmd):
- type = FIFOTYPE
- elif stat.S_ISLNK(stmd):
- type = SYMTYPE
- linkname = os.readlink(name)
- elif stat.S_ISCHR(stmd):
- type = CHRTYPE
- elif stat.S_ISBLK(stmd):
- type = BLKTYPE
- else:
- return None
-
- # Fill the TarInfo object with all
- # information we can get.
- tarinfo.name = arcname
- tarinfo.mode = stmd
- tarinfo.uid = statres.st_uid
- tarinfo.gid = statres.st_gid
- if type == REGTYPE:
- tarinfo.size = statres.st_size
- else:
- tarinfo.size = 0L
- tarinfo.mtime = statres.st_mtime
- tarinfo.type = type
- tarinfo.linkname = linkname
- if pwd:
- try:
- tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
- except KeyError:
- pass
- if grp:
- try:
- tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
- except KeyError:
- pass
-
- if type in (CHRTYPE, BLKTYPE):
- if hasattr(os, "major") and hasattr(os, "minor"):
- tarinfo.devmajor = os.major(statres.st_rdev)
- tarinfo.devminor = os.minor(statres.st_rdev)
- return tarinfo
-
- def list(self, verbose=True):
- """Print a table of contents to sys.stdout. If `verbose' is False, only
- the names of the members are printed. If it is True, an `ls -l'-like
- output is produced.
- """
- self._check()
-
- for tarinfo in self:
- if verbose:
- print filemode(tarinfo.mode),
- print "%s/%s" % (tarinfo.uname or tarinfo.uid,
- tarinfo.gname or tarinfo.gid),
- if tarinfo.ischr() or tarinfo.isblk():
- print "%10s" % ("%d,%d" \
- % (tarinfo.devmajor, tarinfo.devminor)),
- else:
- print "%10d" % tarinfo.size,
- print "%d-%02d-%02d %02d:%02d:%02d" \
- % time.localtime(tarinfo.mtime)[:6],
-
- print tarinfo.name + ("/" if tarinfo.isdir() else ""),
-
- if verbose:
- if tarinfo.issym():
- print "->", tarinfo.linkname,
- if tarinfo.islnk():
- print "link to", tarinfo.linkname,
- print
-
- def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
- """Add the file `name' to the archive. `name' may be any type of file
- (directory, fifo, symbolic link, etc.). If given, `arcname'
- specifies an alternative name for the file in the archive.
- Directories are added recursively by default. This can be avoided by
- setting `recursive' to False. `exclude' is a function that should
- return True for each filename to be excluded. `filter' is a function
- that expects a TarInfo object argument and returns the changed
- TarInfo object, if it returns None the TarInfo object will be
- excluded from the archive.
- """
- self._check("aw")
-
- if arcname is None:
- arcname = name
-
- # Exclude pathnames.
- if exclude is not None:
- import warnings
- warnings.warn("use the filter argument instead",
- DeprecationWarning, 2)
- if exclude(name):
- self._dbg(2, "tarfile: Excluded %r" % name)
- return
-
- # Skip if somebody tries to archive the archive...
- if self.name is not None and os.path.abspath(name) == self.name:
- self._dbg(2, "tarfile: Skipped %r" % name)
- return
-
- self._dbg(1, name)
-
- # Create a TarInfo object from the file.
- tarinfo = self.gettarinfo(name, arcname)
-
- if tarinfo is None:
- self._dbg(1, "tarfile: Unsupported type %r" % name)
- return
-
- # Change or exclude the TarInfo object.
- if filter is not None:
- tarinfo = filter(tarinfo)
- if tarinfo is None:
- self._dbg(2, "tarfile: Excluded %r" % name)
- return
-
- # Append the tar header and data to the archive.
- if tarinfo.isreg():
- f = bltn_open(name, "rb")
- self.addfile(tarinfo, f)
- f.close()
-
- elif tarinfo.isdir():
- self.addfile(tarinfo)
- if recursive:
- for f in os.listdir(name):
- self.add(os.path.join(name, f), os.path.join(arcname, f),
- recursive, exclude, filter)
-
- else:
- self.addfile(tarinfo)
-
- def addfile(self, tarinfo, fileobj=None):
- """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
- given, tarinfo.size bytes are read from it and added to the archive.
- You can create TarInfo objects using gettarinfo().
- On Windows platforms, `fileobj' should always be opened with mode
- 'rb' to avoid irritation about the file size.
- """
- self._check("aw")
-
- tarinfo = copy.copy(tarinfo)
-
- buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
- self.fileobj.write(buf)
- self.offset += len(buf)
-
- # If there's data to follow, append it.
- if fileobj is not None:
- copyfileobj(fileobj, self.fileobj, tarinfo.size)
- blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
- if remainder > 0:
- self.fileobj.write(NUL * (BLOCKSIZE - remainder))
- blocks += 1
- self.offset += blocks * BLOCKSIZE
-
- self.members.append(tarinfo)
-
- def extractall(self, path=".", members=None):
- """Extract all members from the archive to the current working
- directory and set owner, modification time and permissions on
- directories afterwards. `path' specifies a different directory
- to extract to. `members' is optional and must be a subset of the
- list returned by getmembers().
- """
- directories = []
-
- if members is None:
- members = self
-
- for tarinfo in members:
- if tarinfo.isdir():
- # Extract directories with a safe mode.
- directories.append(tarinfo)
- tarinfo = copy.copy(tarinfo)
- tarinfo.mode = 0700
- self.extract(tarinfo, path)
-
- # Reverse sort directories.
- directories.sort(key=operator.attrgetter('name'))
- directories.reverse()
-
- # Set correct owner, mtime and filemode on directories.
- for tarinfo in directories:
- dirpath = os.path.join(path, tarinfo.name)
- try:
- self.chown(tarinfo, dirpath)
- self.utime(tarinfo, dirpath)
- self.chmod(tarinfo, dirpath)
- except ExtractError, e:
- if self.errorlevel > 1:
- raise
- else:
- self._dbg(1, "tarfile: %s" % e)
-
- def extract(self, member, path=""):
- """Extract a member from the archive to the current working directory,
- using its full name. Its file information is extracted as accurately
- as possible. `member' may be a filename or a TarInfo object. You can
- specify a different directory using `path'.
- """
- self._check("r")
-
- if isinstance(member, basestring):
- tarinfo = self.getmember(member)
- else:
- tarinfo = member
-
- # Prepare the link target for makelink().
- if tarinfo.islnk():
- tarinfo._link_target = os.path.join(path, tarinfo.linkname)
-
- try:
- self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
- except EnvironmentError, e:
- if self.errorlevel > 0:
- raise
- else:
- if e.filename is None:
- self._dbg(1, "tarfile: %s" % e.strerror)
- else:
- self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
- except ExtractError, e:
- if self.errorlevel > 1:
- raise
- else:
- self._dbg(1, "tarfile: %s" % e)
-
- def extractfile(self, member):
- """Extract a member from the archive as a file object. `member' may be
- a filename or a TarInfo object. If `member' is a regular file, a
- file-like object is returned. If `member' is a link, a file-like
- object is constructed from the link's target. If `member' is none of
- the above, None is returned.
- The file-like object is read-only and provides the following
- methods: read(), readline(), readlines(), seek() and tell()
- """
- self._check("r")
-
- if isinstance(member, basestring):
- tarinfo = self.getmember(member)
- else:
- tarinfo = member
-
- if tarinfo.isreg():
- return self.fileobject(self, tarinfo)
-
- elif tarinfo.type not in SUPPORTED_TYPES:
- # If a member's type is unknown, it is treated as a
- # regular file.
- return self.fileobject(self, tarinfo)
-
- elif tarinfo.islnk() or tarinfo.issym():
- if isinstance(self.fileobj, _Stream):
- # A small but ugly workaround for the case that someone tries
- # to extract a (sym)link as a file-object from a non-seekable
- # stream of tar blocks.
- raise StreamError("cannot extract (sym)link as file object")
- else:
- # A (sym)link's file object is its target's file object.
- return self.extractfile(self._find_link_target(tarinfo))
- else:
- # If there's no data associated with the member (directory, chrdev,
- # blkdev, etc.), return None instead of a file object.
- return None
-
- def _extract_member(self, tarinfo, targetpath):
- """Extract the TarInfo object tarinfo to a physical
- file called targetpath.
- """
- # Fetch the TarInfo object for the given name
- # and build the destination pathname, replacing
- # forward slashes to platform specific separators.
- targetpath = targetpath.rstrip("/")
- targetpath = targetpath.replace("/", os.sep)
-
- # Create all upper directories.
- upperdirs = os.path.dirname(targetpath)
- if upperdirs and not os.path.exists(upperdirs):
- # Create directories that are not part of the archive with
- # default permissions.
- os.makedirs(upperdirs)
-
- if tarinfo.islnk() or tarinfo.issym():
- self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
- else:
- self._dbg(1, tarinfo.name)
-
- if tarinfo.isreg():
- self.makefile(tarinfo, targetpath)
- elif tarinfo.isdir():
- self.makedir(tarinfo, targetpath)
- elif tarinfo.isfifo():
- self.makefifo(tarinfo, targetpath)
- elif tarinfo.ischr() or tarinfo.isblk():
- self.makedev(tarinfo, targetpath)
- elif tarinfo.islnk() or tarinfo.issym():
- self.makelink(tarinfo, targetpath)
- elif tarinfo.type not in SUPPORTED_TYPES:
- self.makeunknown(tarinfo, targetpath)
- else:
- self.makefile(tarinfo, targetpath)
-
- self.chown(tarinfo, targetpath)
- if not tarinfo.issym():
- self.chmod(tarinfo, targetpath)
- self.utime(tarinfo, targetpath)
-
- #--------------------------------------------------------------------------
- # Below are the different file methods. They are called via
- # _extract_member() when extract() is called. They can be replaced in a
- # subclass to implement other functionality.
-
- def makedir(self, tarinfo, targetpath):
- """Make a directory called targetpath.
- """
- try:
- # Use a safe mode for the directory, the real mode is set
- # later in _extract_member().
- os.mkdir(targetpath, 0700)
- except EnvironmentError, e:
- if e.errno != errno.EEXIST:
- raise
-
- def makefile(self, tarinfo, targetpath):
- """Make a file called targetpath.
- """
- source = self.extractfile(tarinfo)
- target = bltn_open(targetpath, "wb")
- copyfileobj(source, target)
- source.close()
- target.close()
-
- def makeunknown(self, tarinfo, targetpath):
- """Make a file from a TarInfo object with an unknown type
- at targetpath.
- """
- self.makefile(tarinfo, targetpath)
- self._dbg(1, "tarfile: Unknown file type %r, " \
- "extracted as regular file." % tarinfo.type)
-
- def makefifo(self, tarinfo, targetpath):
- """Make a fifo called targetpath.
- """
- if hasattr(os, "mkfifo"):
- os.mkfifo(targetpath)
- else:
- raise ExtractError("fifo not supported by system")
-
- def makedev(self, tarinfo, targetpath):
- """Make a character or block device called targetpath.
- """
- if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
- raise ExtractError("special devices not supported by system")
-
- mode = tarinfo.mode
- if tarinfo.isblk():
- mode |= stat.S_IFBLK
- else:
- mode |= stat.S_IFCHR
-
- os.mknod(targetpath, mode,
- os.makedev(tarinfo.devmajor, tarinfo.devminor))
-
- def makelink(self, tarinfo, targetpath):
- """Make a (symbolic) link called targetpath. If it cannot be created
- (platform limitation), we try to make a copy of the referenced file
- instead of a link.
- """
- if hasattr(os, "symlink") and hasattr(os, "link"):
- # For systems that support symbolic and hard links.
- if tarinfo.issym():
- if os.path.lexists(targetpath):
- os.unlink(targetpath)
- os.symlink(tarinfo.linkname, targetpath)
- else:
- # See extract().
- if os.path.exists(tarinfo._link_target):
- if os.path.lexists(targetpath):
- os.unlink(targetpath)
- os.link(tarinfo._link_target, targetpath)
- else:
- self._extract_member(self._find_link_target(tarinfo), targetpath)
- else:
- try:
- self._extract_member(self._find_link_target(tarinfo), targetpath)
- except KeyError:
- raise ExtractError("unable to resolve link inside archive")
-
- def chown(self, tarinfo, targetpath):
- """Set owner of targetpath according to tarinfo.
- """
- if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
- # We have to be root to do so.
- try:
- g = grp.getgrnam(tarinfo.gname)[2]
- except KeyError:
- try:
- g = grp.getgrgid(tarinfo.gid)[2]
- except KeyError:
- g = os.getgid()
- try:
- u = pwd.getpwnam(tarinfo.uname)[2]
- except KeyError:
- try:
- u = pwd.getpwuid(tarinfo.uid)[2]
- except KeyError:
- u = os.getuid()
- try:
- if tarinfo.issym() and hasattr(os, "lchown"):
- os.lchown(targetpath, u, g)
- else:
- if sys.platform != "os2emx":
- os.chown(targetpath, u, g)
- except EnvironmentError, e:
- raise ExtractError("could not change owner")
-
- def chmod(self, tarinfo, targetpath):
- """Set file permissions of targetpath according to tarinfo.
- """
- if hasattr(os, 'chmod'):
- try:
- os.chmod(targetpath, tarinfo.mode)
- except EnvironmentError, e:
- raise ExtractError("could not change mode")
-
- def utime(self, tarinfo, targetpath):
- """Set modification time of targetpath according to tarinfo.
- """
- if not hasattr(os, 'utime'):
- return
- try:
- os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
- except EnvironmentError, e:
- raise ExtractError("could not change modification time")
-
- #--------------------------------------------------------------------------
- def next(self):
- """Return the next member of the archive as a TarInfo object, when
- TarFile is opened for reading. Return None if there is no more
- available.
- """
- self._check("ra")
- if self.firstmember is not None:
- m = self.firstmember
- self.firstmember = None
- return m
-
- # Read the next block.
- self.fileobj.seek(self.offset)
- tarinfo = None
- while True:
- try:
- tarinfo = self.tarinfo.fromtarfile(self)
- except EOFHeaderError, e:
- if self.ignore_zeros:
- self._dbg(2, "0x%X: %s" % (self.offset, e))
- self.offset += BLOCKSIZE
- continue
- except InvalidHeaderError, e:
- if self.ignore_zeros:
- self._dbg(2, "0x%X: %s" % (self.offset, e))
- self.offset += BLOCKSIZE
- continue
- elif self.offset == 0:
- raise ReadError(str(e))
- except EmptyHeaderError:
- if self.offset == 0:
- raise ReadError("empty file")
- except TruncatedHeaderError, e:
- if self.offset == 0:
- raise ReadError(str(e))
- except SubsequentHeaderError, e:
- raise ReadError(str(e))
- break
-
- if tarinfo is not None:
- self.members.append(tarinfo)
- else:
- self._loaded = True
-
- return tarinfo
-
- #--------------------------------------------------------------------------
- # Little helper methods:
-
- def _getmember(self, name, tarinfo=None, normalize=False):
- """Find an archive member by name from bottom to top.
- If tarinfo is given, it is used as the starting point.
- """
- # Ensure that all members have been loaded.
- members = self.getmembers()
-
- # Limit the member search list up to tarinfo.
- if tarinfo is not None:
- members = members[:members.index(tarinfo)]
-
- if normalize:
- name = os.path.normpath(name)
-
- for member in reversed(members):
- if normalize:
- member_name = os.path.normpath(member.name)
- else:
- member_name = member.name
-
- if name == member_name:
- return member
-
- def _load(self):
- """Read through the entire archive file and look for readable
- members.
- """
- while True:
- tarinfo = self.next()
- if tarinfo is None:
- break
- self._loaded = True
-
- def _check(self, mode=None):
- """Check if TarFile is still open, and if the operation's mode
- corresponds to TarFile's mode.
- """
- if self.closed:
- raise IOError("%s is closed" % self.__class__.__name__)
- if mode is not None and self.mode not in mode:
- raise IOError("bad operation for mode %r" % self.mode)
-
- def _find_link_target(self, tarinfo):
- """Find the target member of a symlink or hardlink member in the
- archive.
- """
- if tarinfo.issym():
- # Always search the entire archive.
- linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
- limit = None
- else:
- # Search the archive before the link, because a hard link is
- # just a reference to an already archived file.
- linkname = tarinfo.linkname
- limit = tarinfo
-
- member = self._getmember(linkname, tarinfo=limit, normalize=True)
- if member is None:
- raise KeyError("linkname %r not found" % linkname)
- return member
-
- def __iter__(self):
- """Provide an iterator object.
- """
- if self._loaded:
- return iter(self.members)
- else:
- return TarIter(self)
-
- def _dbg(self, level, msg):
- """Write debugging output to sys.stderr.
- """
- if level <= self.debug:
- print >> sys.stderr, msg
-
- def __enter__(self):
- self._check()
- return self
-
- def __exit__(self, type, value, traceback):
- if type is None:
- self.close()
- else:
- # An exception occurred. We must not call close() because
- # it would try to write end-of-archive blocks and padding.
- if not self._extfileobj:
- self.fileobj.close()
- self.closed = True
-# class TarFile
-
-class TarIter:
- """Iterator Class.
-
- for tarinfo in TarFile(...):
- suite...
- """
-
- def __init__(self, tarfile):
- """Construct a TarIter object.
- """
- self.tarfile = tarfile
- self.index = 0
- def __iter__(self):
- """Return iterator object.
- """
- return self
- def next(self):
- """Return the next item using TarFile's next() method.
- When all members have been read, set TarFile as _loaded.
- """
- # Fix for SF #1100429: Under rare circumstances it can
- # happen that getmembers() is called during iteration,
- # which will cause TarIter to stop prematurely.
- if not self.tarfile._loaded:
- tarinfo = self.tarfile.next()
- if not tarinfo:
- self.tarfile._loaded = True
- raise StopIteration
- else:
- try:
- tarinfo = self.tarfile.members[self.index]
- except IndexError:
- raise StopIteration
- self.index += 1
- return tarinfo
-
-# Helper classes for sparse file support
-class _section:
- """Base class for _data and _hole.
- """
- def __init__(self, offset, size):
- self.offset = offset
- self.size = size
- def __contains__(self, offset):
- return self.offset <= offset < self.offset + self.size
-
-class _data(_section):
- """Represent a data section in a sparse file.
- """
- def __init__(self, offset, size, realpos):
- _section.__init__(self, offset, size)
- self.realpos = realpos
-
-class _hole(_section):
- """Represent a hole section in a sparse file.
- """
- pass
-
-class _ringbuffer(list):
- """Ringbuffer class which increases performance
- over a regular list.
- """
- def __init__(self):
- self.idx = 0
- def find(self, offset):
- idx = self.idx
- while True:
- item = self[idx]
- if offset in item:
- break
- idx += 1
- if idx == len(self):
- idx = 0
- if idx == self.idx:
- # End of File
- return None
- self.idx = idx
- return item
-
-#---------------------------------------------
-# zipfile compatible TarFile class
-#---------------------------------------------
-TAR_PLAIN = 0 # zipfile.ZIP_STORED
-TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
-class TarFileCompat:
- """TarFile class compatible with standard module zipfile's
- ZipFile class.
- """
- def __init__(self, file, mode="r", compression=TAR_PLAIN):
- from warnings import warnpy3k
- warnpy3k("the TarFileCompat class has been removed in Python 3.0",
- stacklevel=2)
- if compression == TAR_PLAIN:
- self.tarfile = TarFile.taropen(file, mode)
- elif compression == TAR_GZIPPED:
- self.tarfile = TarFile.gzopen(file, mode)
- else:
- raise ValueError("unknown compression constant")
- if mode[0:1] == "r":
- members = self.tarfile.getmembers()
- for m in members:
- m.filename = m.name
- m.file_size = m.size
- m.date_time = time.gmtime(m.mtime)[:6]
- def namelist(self):
- return map(lambda m: m.name, self.infolist())
- def infolist(self):
- return filter(lambda m: m.type in REGULAR_TYPES,
- self.tarfile.getmembers())
- def printdir(self):
- self.tarfile.list()
- def testzip(self):
- return
- def getinfo(self, name):
- return self.tarfile.getmember(name)
- def read(self, name):
- return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
- def write(self, filename, arcname=None, compress_type=None):
- self.tarfile.add(filename, arcname)
- def writestr(self, zinfo, bytes):
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- import calendar
- tinfo = TarInfo(zinfo.filename)
- tinfo.size = len(bytes)
- tinfo.mtime = calendar.timegm(zinfo.date_time)
- self.tarfile.addfile(tinfo, StringIO(bytes))
- def close(self):
- self.tarfile.close()
-#class TarFileCompat
-
-#--------------------
-# exported functions
-#--------------------
-def is_tarfile(name):
- """Return True if name points to a tar archive that we
- are able to handle, else return False.
- """
- try:
- t = open(name)
- t.close()
- return True
- except TarError:
- return False
-
-bltn_open = open
-open = TarFile.open
diff --git a/setup.py b/setup.py
index 7847163..77ba00d 100755
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@ import subprocess
from distutils.core import setup
-from mat import mat
+from lib import mat
#Remove MANIFEST file, since distutils
#doesn't properly update it when
@@ -20,7 +20,7 @@ def l10n():
'''
Compile .po files to .mo
'''
- for language in glob.glob('locale/*'):
+ for language in glob.glob('locale/*/'):
fpath = os.path.join(language, 'LC_MESSAGES', 'mat-gui.po')
output = fpath[:-2] + 'mo'
subprocess.call(['msgfmt', fpath, '-o', output])
@@ -36,8 +36,8 @@ setup(
platforms = 'linux',
license = 'GPLv2',
url = 'https://mat.boum.org',
- packages = ['mat', 'mat.hachoir_editor', 'mat.bencode', 'mat.tarfile'],
- scripts = ['mat-cli', 'mat-gui'],
+ packages = ['lib', 'lib.hachoir_editor', 'lib.bencode', 'lib.tarfile'],
+ scripts = ['mat', 'mat-gui'],
data_files = [
( 'share/applications', ['mat.desktop'] ),
( 'share/mat', ['FORMATS'] ),
diff --git a/test/clitest.py b/test/clitest.py
index 9232745..5b0f0c3 100644
--- a/test/clitest.py
+++ b/test/clitest.py
@@ -8,7 +8,7 @@ import subprocess
import sys
sys.path.append('..')
-from mat import mat
+from lib import mat
import test
@@ -19,14 +19,14 @@ class TestRemovecli(test.MATTest):
def test_remove(self):
'''make sure that the cli remove all compromizing meta'''
for _, dirty in self.file_list:
- subprocess.call(['../mat-cli', dirty])
+ subprocess.call(['../mat', dirty])
current_file = mat.create_class_file(dirty, False, True)
self.assertTrue(current_file.is_clean())
def test_remove_empty(self):
'''Test removal with clean files'''
for clean, _ in self.file_list:
- subprocess.call(['../mat-cli', clean])
+ subprocess.call(['../mat', clean])
current_file = mat.create_class_file(clean, False, True)
self.assertTrue(current_file.is_clean())
@@ -38,7 +38,7 @@ class TestListcli(test.MATTest):
def test_list_clean(self):
'''check if get_meta returns meta'''
for clean, _ in self.file_list:
- proc = subprocess.Popen(['../mat-cli', '-d', clean],
+ proc = subprocess.Popen(['../mat', '-d', clean],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(stdout.strip('\n'), "[+] File %s :\nNo harmful \
@@ -47,7 +47,7 @@ metadata found" % clean)
def test_list_dirty(self):
'''check if get_meta returns all the expected meta'''
for _, dirty in self.file_list:
- proc = subprocess.Popen(['../mat-cli', '-d', dirty],
+ proc = subprocess.Popen(['../mat', '-d', dirty],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertNotEqual(stdout, "[+] File %s" % dirty)
@@ -60,7 +60,7 @@ class TestisCleancli(test.MATTest):
def test_clean(self):
'''test is_clean on clean files'''
for clean, _ in self.file_list:
- proc = subprocess.Popen(['../mat-cli', '-c', clean],
+ proc = subprocess.Popen(['../mat', '-c', clean],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(stdout.strip('\n'), '[+] %s is clean' % clean)
@@ -68,7 +68,7 @@ class TestisCleancli(test.MATTest):
def test_dirty(self):
'''test is_clean on dirty files'''
for _, dirty in self.file_list:
- proc = subprocess.Popen(['../mat-cli', '-c', dirty],
+ proc = subprocess.Popen(['../mat', '-c', dirty],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(stdout.strip('\n'), '[+] %s is not clean' % dirty)
@@ -79,19 +79,19 @@ class TestFileAttributes(unittest.TestCase):
test various stuffs about files (readable, writable, exist, ...)
'''
def test_not_readable(self):
- proc = subprocess.Popen(['../mat-cli', 'not_readable'],
+ proc = subprocess.Popen(['../mat', 'not_readable'],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(stdout.strip('\n'), 'Unable to pocess %s' % 'not_readable')
def test_not_writtable(self):
- proc = subprocess.Popen(['../mat-cli', 'not_writtable'],
+ proc = subprocess.Popen(['../mat', 'not_writtable'],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(stdout.strip('\n'), 'Unable to pocess %s' % 'not_writtable')
def test_not_exist(self):
- proc = subprocess.Popen(['../mat-cli', 'ilikecookies'],
+ proc = subprocess.Popen(['../mat', 'ilikecookies'],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(stdout.strip('\n'), 'Unable to pocess %s' % 'ilikecookies')
diff --git a/test/libtest.py b/test/libtest.py
index 6217b69..9ac12c0 100644
--- a/test/libtest.py
+++ b/test/libtest.py
@@ -8,7 +8,7 @@ import unittest
import test
import sys
sys.path.append('..')
-from mat import mat
+from lib import mat
class TestRemovelib(test.MATTest):
--
cgit v1.3