summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorjvoisin2012-02-01 22:56:04 +0100
committerjvoisin2012-02-01 22:56:04 +0100
commit544fe9bf1782a027b3f31bf4c10a050d783e32ac (patch)
treea8dd60b9ae45efea4875fdb827070531f0199717 /lib
parent9ea6dc6960cebfa70d18ba8ee49d775ea91c9b34 (diff)
Rename mat-cli to mat-gui
Diffstat (limited to 'lib')
-rw-r--r--lib/FORMATS94
-rw-r--r--lib/__init__.py1
-rw-r--r--lib/archive.py291
-rw-r--r--lib/audio.py100
-rw-r--r--lib/bencode/__init__.py1
-rw-r--r--lib/bencode/bencode.py152
-rw-r--r--lib/exiftool.py95
-rw-r--r--lib/hachoir_editor/__init__.py8
-rw-r--r--lib/hachoir_editor/field.py69
-rw-r--r--lib/hachoir_editor/fieldset.py352
-rw-r--r--lib/hachoir_editor/typed_field.py253
-rw-r--r--lib/images.py48
-rw-r--r--lib/mat.py150
-rw-r--r--lib/misc.py63
-rw-r--r--lib/office.py305
-rw-r--r--lib/parser.py130
-rw-r--r--lib/strippers.py48
-rw-r--r--lib/tarfile/__init__.py1
-rw-r--r--lib/tarfile/tarfile.py2593
19 files changed, 4754 insertions, 0 deletions
diff --git a/lib/FORMATS b/lib/FORMATS
new file mode 100644
index 0000000..c497524
--- /dev/null
+++ b/lib/FORMATS
@@ -0,0 +1,94 @@
1<xml>
2 <format>
3 <name>Portable Network Graphics</name>
4 <extension>.png</extension>
5 <support>full</support>
6 <metadata>textual metadata + date</metadata>
7 <method>removal of harmful fields is done with hachoir</method>
8 </format>
9
10 <format>
11 <name>Jpeg</name>
12 <extension>.jpeg, .jpg</extension>
13 <support>full</support>
14 <metadata>comment + exif/photoshop/adobe</metadata>
15 <method>removal of harmful fields is done with hachoir</method>
16 </format>
17
18 <format>
19 <name>Open Document</name>
20 <extension>.odt, .odx, .ods, ...</extension>
21 <support>full</support>
22 <metadata>a meta.xml file</metadata>
23 <method>removal of the meta.xml file</method>
24 </format>
25
26 <format>
27 <name>Office Openxml</name>
28 <extension>.docx, .pptx, .xlsx, ...</extension>
29 <support>full</support>
30 <metadata>a docProps folder containings xml metadata files</metadata>
31 <method>removal of the docProps folder</method>
32 </format>
33
34 <format>
35 <name>Portable Document Fileformat</name>
36 <extension>.pdf</extension>
37 <support>full</support>
38 <metadata>a lot</metadata>
39 <method>rendering of the pdf file on a cairo surface with the help of
40 poppler in order to remove all the internal metadata,
41 then removal of the remaining metadata fields of the pdf itself with
42 pdfrw (the next version of python-cairo will support metadata,
43 so we should get rid of pdfrw)</method>
44 </format>
45
46 <format>
47 <name>Tape ARchive</name>
48 <extension>.tar, .tar.bz2, .tar.gz</extension>
49 <support>full</support>
50 <metadata>metadata from the file itself, metadata from the file contained
51 into the archive, and metadata added by tar to the file at then
52 creation of the archive</metadata>
53 <method>extraction of each file, treatement of the file, add treated file
54 to a new archive, right before the add, remove the metadata added by tar
55 itself. When the new archive is complete, remove all his metadata.</method>
56 </format>
57
58 <format>
59 <name>Zip</name>
60 <extension>.zip</extension>
61 <support>.partial</support>
62 <metadata>metadata from the file itself, metadata from the file contained
63 into the archive, and metadata added by zip to the file when added to
64 the archive.
65 </metadata>
66 <method>extraction of each file, treatement of the file, add treated file
67 to a new archive. When the new archive is complete, remove all his metadata</method>
68 <remaining>metadata added by zip itself to internal files</remaining>
69 </format>
70
71 <format>
72 <name>MPEG Audio</name>
73 <extension>.mp3, .mp2, .mp1</extension>
74 <support>full</support>
75 <metadata>id3</metadata>
76 <method>removal of harmful fields is done with hachoir</method>
77 </format>
78
79 <format>
80 <name>Ogg Vorbis</name>
81 <extension>.ogg</extension>
82 <support>full</support>
83 <metadata>Vorbis</metadata>
84 <method>removal of harmful fields is done with mutagen</method>
85 </format>
86
87 <format>
88 <name>Free Lossless Audio Codec</name>
89 <extension>.flac</extension>
90 <support>full</support>
91 <metadata>Flac, Vorbis</metadata>
92 <method>removal of harmful fields is done with mutagen</method>
93 </format>
94</xml>
diff --git a/lib/__init__.py b/lib/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lib/__init__.py
@@ -0,0 +1 @@
diff --git a/lib/archive.py b/lib/archive.py
new file mode 100644
index 0000000..9993102
--- /dev/null
+++ b/lib/archive.py
@@ -0,0 +1,291 @@
1'''
2 Take care of archives formats
3'''
4
5import zipfile
6import shutil
7import os
8import logging
9import tempfile
10
11import parser
12import mat
13from tarfile import tarfile
14
15
16class GenericArchiveStripper(parser.GenericParser):
17 '''
18 Represent a generic archive
19 '''
20 def __init__(self, filename, parser, mime, backup, add2archive):
21 super(GenericArchiveStripper, self).__init__(filename, parser, mime,
22 backup, add2archive)
23 self.compression = ''
24 self.add2archive = add2archive
25 self.tempdir = tempfile.mkdtemp()
26
27 def __del__(self):
28 '''
29 Remove the files inside the temp dir,
30 then remove the temp dir
31 '''
32 for root, dirs, files in os.walk(self.tempdir):
33 for item in files:
34 path_file = os.path.join(root, item)
35 mat.secure_remove(path_file)
36 shutil.rmtree(self.tempdir)
37
38 def remove_all(self):
39 '''
40 Call _remove_all() with in argument : "normal"
41 '''
42 return self._remove_all('normal')
43
44 def remove_all_strict(self):
45 '''
46 call remove_all() with in argument : "strict"
47 '''
48 return self._remove_all('strict')
49
50 def _remove_all(self, method):
51 '''
52 Remove all meta, normal way if method is "normal",
53 else, use the strict way (with possible data loss)
54 '''
55 raise NotImplementedError
56
57
58class ZipStripper(GenericArchiveStripper):
59 '''
60 Represent a zip file
61 '''
62 def is_file_clean(self, fileinfo):
63 '''
64 Check if a ZipInfo object is clean of metadatas added
65 by zip itself, independently of the corresponding file metadatas
66 '''
67 if fileinfo.comment is not '':
68 return False
69 elif fileinfo.date_time is not 0:
70 return False
71 elif fileinfo.create_system is not 0:
72 return False
73 elif fileinfo.create_version is not 0:
74 return False
75 else:
76 return True
77
78 def is_clean(self):
79 '''
80 Check if the given file is clean from harmful metadata
81 '''
82 zipin = zipfile.ZipFile(self.filename, 'r')
83 if zipin.comment != '':
84 logging.debug('%s has a comment' % self.filename)
85 return False
86 for item in zipin.infolist():
87 #I have not found a way to remove the crap added by zipfile :/
88 #if not self.is_file_clean(item):
89 # logging.debug('%s from %s has compromizing zipinfo' %
90 # (item.filename, self.filename))
91 # return False
92 zipin.extract(item, self.tempdir)
93 name = os.path.join(self.tempdir, item.filename)
94 if os.path.isfile(name):
95 try:
96 cfile = mat.create_class_file(name, False,
97 self.add2archive)
98 if not cfile.is_clean():
99 return False
100 except:
101 #best solution I have found
102 logging.info('%s\'s fileformat is not supported, or is a \
103harmless format' % item.filename)
104 _, ext = os.path.splitext(name)
105 bname = os.path.basename(item.filename)
106 if ext not in parser.NOMETA:
107 if bname != 'mimetype' and bname != '.rels':
108 return False
109 zipin.close()
110 return True
111
112 def get_meta(self):
113 '''
114 Return all the metadata of a ZipFile (don't return metadatas
115 of contained files : should it ?)
116 '''
117 zipin = zipfile.ZipFile(self.filename, 'r')
118 metadata = {}
119 for field in zipin.infolist():
120 zipmeta = {}
121 zipmeta['comment'] = field.comment
122 zipmeta['modified'] = field.date_time
123 zipmeta['system'] = field.create_system
124 zipmeta['zip_version'] = field.create_version
125 metadata[field.filename] = zipmeta
126 metadata["%s comment" % self.filename] = zipin.comment
127 zipin.close()
128 return metadata
129
130 def _remove_all(self, method):
131 '''
132 So far, the zipfile module does not allow to write a ZipInfo
133 object into a zipfile (and it's a shame !) : so data added
134 by zipfile itself could not be removed. It's a big concern.
135 Is shiping a patched version of zipfile.py a good idea ?
136 '''
137 zipin = zipfile.ZipFile(self.filename, 'r')
138 zipout = zipfile.ZipFile(self.output, 'w', allowZip64=True)
139 for item in zipin.infolist():
140 zipin.extract(item, self.tempdir)
141 name = os.path.join(self.tempdir, item.filename)
142 if os.path.isfile(name):
143 try:
144 cfile = mat.create_class_file(name, False,
145 self.add2archive)
146 if method is 'normal':
147 cfile.remove_all()
148 else:
149 cfile.remove_all_strict()
150 logging.debug('Processing %s from %s' % (item.filename,
151 self.filename))
152 zipout.write(name, item.filename)
153 except:
154 logging.info('%s\'s format is not supported or harmless' %
155 item.filename)
156 _, ext = os.path.splitext(name)
157 if self.add2archive or ext in parser.NOMETA:
158 zipout.write(name, item.filename)
159 zipout.comment = ''
160 zipin.close()
161 zipout.close()
162 logging.info('%s treated' % self.filename)
163 self.do_backup()
164 return True
165
166
167class TarStripper(GenericArchiveStripper):
168 '''
169 Represent a tarfile archive
170 '''
171 def _remove(self, current_file):
172 '''
173 remove the meta added by tar itself to the file
174 '''
175 current_file.mtime = 0
176 current_file.uid = 0
177 current_file.gid = 0
178 current_file.uname = ''
179 current_file.gname = ''
180 return current_file
181
182 def _remove_all(self, method):
183 tarin = tarfile.open(self.filename, 'r' + self.compression)
184 tarout = tarfile.open(self.output, 'w' + self.compression)
185 for item in tarin.getmembers():
186 tarin.extract(item, self.tempdir)
187 name = os.path.join(self.tempdir, item.name)
188 if item.type is '0': # is item a regular file ?
189 #no backup file
190 try:
191 cfile = mat.create_class_file(name, False,
192 self.add2archive)
193 if method is 'normal':
194 cfile.remove_all()
195 else:
196 cfile.remove_all_strict()
197 tarout.add(name, item.name, filter=self._remove)
198 except:
199 logging.info('%s\' format is not supported or harmless' %
200 item.name)
201 _, ext = os.path.splitext(name)
202 if self.add2archive or ext in parser.NOMETA:
203 tarout.add(name, item.name, filter=self._remove)
204 tarin.close()
205 tarout.close()
206 self.do_backup()
207 return True
208
209 def is_file_clean(self, current_file):
210 '''
211 Check metadatas added by tar
212 '''
213 if current_file.mtime is not 0:
214 return False
215 elif current_file.uid is not 0:
216 return False
217 elif current_file.gid is not 0:
218 return False
219 elif current_file.uname is not '':
220 return False
221 elif current_file.gname is not '':
222 return False
223 else:
224 return True
225
226 def is_clean(self):
227 '''
228 Check if the file is clean from harmful metadatas
229 '''
230 tarin = tarfile.open(self.filename, 'r' + self.compression)
231 for item in tarin.getmembers():
232 if not self.is_file_clean(item):
233 tarin.close()
234 return False
235 tarin.extract(item, self.tempdir)
236 name = os.path.join(self.tempdir, item.name)
237 if item.type is '0': # is item a regular file ?
238 try:
239 class_file = mat.create_class_file(name,
240 False, self.add2archive) # no backup file
241 if not class_file.is_clean():
242 tarin.close()
243 return False
244 except:
245 logging.error('%s\'s foramt is not supported or harmless' %
246 item.filename)
247 _, ext = os.path.splitext(name)
248 if ext not in parser.NOMETA:
249 tarin.close()
250 return False
251 tarin.close()
252 return True
253
254 def get_meta(self):
255 '''
256 Return a dict with all the meta of the file
257 '''
258 tarin = tarfile.open(self.filename, 'r' + self.compression)
259 metadata = {}
260 for current_file in tarin.getmembers():
261 if current_file.type is '0':
262 if not self.is_file_clean(current_file): # if there is meta
263 current_meta = {}
264 current_meta['mtime'] = current_file.mtime
265 current_meta['uid'] = current_file.uid
266 current_meta['gid'] = current_file.gid
267 current_meta['uname'] = current_file.uname
268 current_meta['gname'] = current_file.gname
269 metadata[current_file.name] = current_meta
270 tarin.close()
271 return metadata
272
273
274class GzipStripper(TarStripper):
275 '''
276 Represent a tar.gz archive
277 '''
278 def __init__(self, filename, parser, mime, backup, add2archive):
279 super(GzipStripper, self).__init__(filename, parser, mime, backup,
280 add2archive)
281 self.compression = ':gz'
282
283
284class Bzip2Stripper(TarStripper):
285 '''
286 Represents a tar.bz2 archive
287 '''
288 def __init__(self, filename, parser, mime, backup, add2archive):
289 super(Bzip2Stripper, self).__init__(filename, parser, mime, backup,
290 add2archive)
291 self.compression = ':bz2'
diff --git a/lib/audio.py b/lib/audio.py
new file mode 100644
index 0000000..ed849ee
--- /dev/null
+++ b/lib/audio.py
@@ -0,0 +1,100 @@
1'''
2 Care about audio fileformat
3'''
4try:
5 from mutagen.flac import FLAC
6 from mutagen.oggvorbis import OggVorbis
7except ImportError:
8 pass
9
10
11import parser
12import shutil
13
14
15class MpegAudioStripper(parser.GenericParser):
16 '''
17 Represent mpeg audio file (mp3, ...)
18 '''
19 def _should_remove(self, field):
20 if field.name in ("id3v1", "id3v2"):
21 return True
22 else:
23 return False
24
25
26class OggStripper(parser.GenericParser):
27 '''
28 Represent an ogg vorbis file
29 '''
30 def remove_all(self):
31 if self.backup is True:
32 shutil.copy2(self.filename, self.output)
33 self.filename = self.output
34
35 mfile = OggVorbis(self.filename)
36 mfile.delete()
37 mfile.save()
38 return True
39
40 def is_clean(self):
41 '''
42 Check if the "metadata" block is present in the file
43 '''
44 mfile = OggVorbis(self.filename)
45 if mfile.tags == []:
46 return True
47 else:
48 return False
49
50 def get_meta(self):
51 '''
52 Return the content of the metadata block if present
53 '''
54 metadata = {}
55 mfile = OggVorbis(self.filename)
56 for key, value in mfile.tags:
57 metadata[key] = value
58 return metadata
59
60
61class FlacStripper(parser.GenericParser):
62 '''
63 Represent a Flac audio file
64 '''
65 def remove_all(self):
66 '''
67 Remove the "metadata" block from the file
68 '''
69 if self.backup is True:
70 shutil.copy2(self.filename, self.output)
71 self.filename = self.output
72
73 mfile = FLAC(self.filename)
74 mfile.delete()
75 mfile.clear_pictures()
76 mfile.save()
77 return True
78
79 def is_clean(self):
80 '''
81 Check if the "metadata" block is present in the file
82 '''
83 mfile = FLAC(self.filename)
84 if mfile.tags is None and mfile.pictures == []:
85 return True
86 else:
87 return False
88
89 def get_meta(self):
90 '''
91 Return the content of the metadata block if present
92 '''
93 metadata = {}
94 mfile = FLAC(self.filename)
95 if mfile.tags is not None:
96 if mfile.pictures != []:
97 metadata['picture :'] = 'yes'
98 for key, value in mfile.tags:
99 metadata[key] = value
100 return metadata
diff --git a/lib/bencode/__init__.py b/lib/bencode/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lib/bencode/__init__.py
@@ -0,0 +1 @@
diff --git a/lib/bencode/bencode.py b/lib/bencode/bencode.py
new file mode 100644
index 0000000..739ffe5
--- /dev/null
+++ b/lib/bencode/bencode.py
@@ -0,0 +1,152 @@
1# Copyright 2007 by Petru Paler
2# Copyright 2011 by Julien (jvoisin) Voisin
3#
4# Permission is hereby granted, free of charge, to any person obtaining a copy
5# of this software and associated documentation files (the "Software"), to deal
6# in the Software without restriction, including without limitation the rights
7# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8# copies of the Software, and to permit persons to whom the Software is
9# furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19# FROM,
20# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21# THE SOFTWARE.
22#
23
24'''
25 A quick (and also nice) lib to bencode/bdecode torrent files
26'''
27
28
29import types
30
31
32class BTFailure(Exception):
33 '''Custom Exception'''
34 pass
35
36
37class Bencached(object):
38 '''Custom type : cached string'''
39 __slots__ = ['bencoded']
40
41 def __init__(self, string):
42 self.bencoded = string
43
44
45def decode_int(x, f):
46 '''decode an int'''
47 f += 1
48 newf = x.index('e', f)
49 n = int(x[f:newf])
50 if x[f] == '-':
51 if x[f + 1] == '0':
52 raise ValueError
53 elif x[f] == '0' and newf != f + 1:
54 raise ValueError
55 return (n, newf + 1)
56
57
58def decode_string(x, f):
59 '''decode a string'''
60 colon = x.index(':', f)
61 n = int(x[f:colon])
62 if x[f] == '0' and colon != f + 1:
63 raise ValueError
64 colon += 1
65 return (x[colon:colon + n], colon + n)
66
67
68def decode_list(x, f):
69 '''decode a list'''
70 result = []
71 f += 1
72 while x[f] != 'e':
73 v, f = DECODE_FUNC[x[f]](x, f)
74 result.append(v)
75 return (result, f + 1)
76
77
78def decode_dict(x, f):
79 '''decode a dict'''
80 result = {}
81 f += 1
82 while x[f] != 'e':
83 k, f = decode_string(x, f)
84 result[k], f = DECODE_FUNC[x[f]](x, f)
85 return (result, f + 1)
86
87
88def encode_bool(x, r):
89 '''bencode a boolean'''
90 if x:
91 encode_int(1, r)
92 else:
93 encode_int(0, r)
94
95
96def encode_int(x, r):
97 '''bencode an integer/float'''
98 r.extend(('i', str(x), 'e'))
99
100
101def encode_list(x, r):
102 '''bencode a list/tuple'''
103 r.append('l')
104 [ENCODE_FUNC[type(item)](item, r) for item in x]
105 r.append('e')
106
107
108def encode_dict(x, result):
109 '''bencode a dict'''
110 result.append('d')
111 ilist = x.items()
112 ilist.sort()
113 for k, v in ilist:
114 result.extend((str(len(k)), ':', k))
115 ENCODE_FUNC[type(v)](v, result)
116 result.append('e')
117
118
119DECODE_FUNC = {}
120DECODE_FUNC.update(dict([(str(x), decode_string) for x in xrange(9)]))
121DECODE_FUNC['l'] = decode_list
122DECODE_FUNC['d'] = decode_dict
123DECODE_FUNC['i'] = decode_int
124
125
126ENCODE_FUNC = {}
127ENCODE_FUNC[Bencached] = lambda x, r: r.append(x.bencoded)
128ENCODE_FUNC[types.IntType] = encode_int
129ENCODE_FUNC[types.LongType] = encode_int
130ENCODE_FUNC[types.StringType] = lambda x, r: r.extend((str(len(x)), ':', x))
131ENCODE_FUNC[types.ListType] = encode_list
132ENCODE_FUNC[types.TupleType] = encode_list
133ENCODE_FUNC[types.DictType] = encode_dict
134ENCODE_FUNC[types.BooleanType] = encode_bool
135
136
137def bencode(string):
138 '''bencode $string'''
139 table = []
140 ENCODE_FUNC[type(string)](string, table)
141 return ''.join(table)
142
143
144def bdecode(string):
145 '''decode $string'''
146 try:
147 result, lenght = DECODE_FUNC[string[0]](string, 0)
148 except (IndexError, KeyError, ValueError):
149 raise BTFailure('Not a valid bencoded string')
150 if lenght != len(string):
151 raise BTFailure('Invalid bencoded value (data after valid prefix)')
152 return result
diff --git a/lib/exiftool.py b/lib/exiftool.py
new file mode 100644
index 0000000..758a094
--- /dev/null
+++ b/lib/exiftool.py
@@ -0,0 +1,95 @@
1'''
2 Care about images with help of the amazing (perl) library Exiftool.
3'''
4
5import subprocess
6import parser
7
8
9class ExiftoolStripper(parser.GenericParser):
10 '''
11 A generic stripper class using exiftool as backend
12 '''
13
14 def __init__(self, filename, parser, mime, backup, add2archive):
15 super(ExiftoolStripper, self).__init__(filename, parser, mime,
16 backup, add2archive)
17 self.allowed = ['ExifTool Version Number', 'File Name', 'Directory',
18 'File Size', 'File Modification Date/Time', 'File Permissions',
19 'File Type', 'MIME Type', 'Image Width', 'Image Height',
20 'Image Size']
21 self._set_allowed()
22
23 def _set_allowed(self):
24 '''
25 Set the allowed/harmless list of metadata
26 '''
27 raise NotImplementedError
28
29 def remove_all(self):
30 '''
31 Remove all metadata with help of exiftool
32 '''
33 try:
34 if self.backup:
35 # Note: '-All=' must be followed by a known exiftool option.
36 process = subprocess.Popen(['exiftool', '-m', '-All=',
37 '-out', self.output, self.filename],
38 stdout=open('/dev/null'))
39 process.wait()
40 else:
41 # Note: '-All=' must be followed by a known exiftool option.
42 process = subprocess.Popen(
43 [ 'exiftool', '-m', '-All=', '-overwrite_original', self.filename ],
44 stdout=open('/dev/null'))
45 process.wait()
46 return True
47 except:
48 return False
49
50 def is_clean(self):
51 '''
52 Check if the file is clean with help of exiftool
53 '''
54 out = subprocess.Popen(['exiftool', self.filename],
55 stdout=subprocess.PIPE).communicate()[0]
56 out = out.split('\n')
57 for i in out[:-1]:
58 if i.split(':')[0].strip() not in self.allowed:
59 return False
60 return True
61
62 def get_meta(self):
63 '''
64 Return every harmful meta with help of exiftool
65 '''
66 out = subprocess.Popen(['exiftool', self.filename],
67 stdout=subprocess.PIPE).communicate()[0]
68 out = out.split('\n')
69 meta = {}
70 for i in out[:-1]:
71 key = i.split(':')[0].strip()
72 if key not in self.allowed:
73 meta[key] = i.split(':')[1].strip()
74 return meta
75
76
77class JpegStripper(ExiftoolStripper):
78 '''
79 Care about jpeg files with help
80 of exiftool
81 '''
82 def _set_allowed(self):
83 self.allowed.extend(['JFIF Version', 'Resolution Unit',
84 'X Resolution', 'Y Resolution', 'Encoding Process', 'Bits Per Sample',
85 'Color Components', 'Y Cb Cr Sub Sampling'])
86
87class PngStripper(ExiftoolStripper):
88 '''
89 Care about png files with help
90 of exiftool
91 '''
92 def _set_allowed(self):
93 self.allowed.extend(['Bit Depth', 'Color Type', 'Compression',
94 'Filter', 'Interlace', 'Pixels Per Unit X', 'Pixels Per Unit Y',
95 'Pixel Units'])
diff --git a/lib/hachoir_editor/__init__.py b/lib/hachoir_editor/__init__.py
new file mode 100644
index 0000000..1835676
--- /dev/null
+++ b/lib/hachoir_editor/__init__.py
@@ -0,0 +1,8 @@
1from field import (
2 EditorError, FakeField)
3from typed_field import (
4 EditableField, EditableBits, EditableBytes,
5 EditableInteger, EditableString,
6 createEditableField)
7from fieldset import EditableFieldSet, NewFieldSet, createEditor
8
diff --git a/lib/hachoir_editor/field.py b/lib/hachoir_editor/field.py
new file mode 100644
index 0000000..6b1efe3
--- /dev/null
+++ b/lib/hachoir_editor/field.py
@@ -0,0 +1,69 @@
1from hachoir_core.error import HachoirError
2from hachoir_core.field import joinPath, MissingField
3
4class EditorError(HachoirError):
5 pass
6
7class FakeField(object):
8 """
9 This class have API looks similar to Field API, but objects don't contain
10 any value: all values are _computed_ by parent methods.
11
12 Example: FakeField(editor, "abc").size calls editor._getFieldSize("abc").
13 """
14 is_field_set = False
15
16 def __init__(self, parent, name):
17 self._parent = parent
18 self._name = name
19
20 def _getPath(self):
21 return joinPath(self._parent.path, self._name)
22 path = property(_getPath)
23
24 def _getName(self):
25 return self._name
26 name = property(_getName)
27
28 def _getAddress(self):
29 return self._parent._getFieldAddress(self._name)
30 address = property(_getAddress)
31
32 def _getSize(self):
33 return self._parent.input[self._name].size
34 size = property(_getSize)
35
36 def _getValue(self):
37 return self._parent.input[self._name].value
38 value = property(_getValue)
39
40 def createDisplay(self):
41 # TODO: Returns new value if field is altered
42 return self._parent.input[self._name].display
43 display = property(createDisplay)
44
45 def _getParent(self):
46 return self._parent
47 parent = property(_getParent)
48
49 def hasValue(self):
50 return self._parent.input[self._name].hasValue()
51
52 def __getitem__(self, key):
53 # TODO: Implement this function!
54 raise MissingField(self, key)
55
56 def _isAltered(self):
57 return False
58 is_altered = property(_isAltered)
59
60 def writeInto(self, output):
61 size = self.size
62 addr = self._parent._getFieldInputAddress(self._name)
63 input = self._parent.input
64 stream = input.stream
65 if size % 8:
66 output.copyBitsFrom(stream, addr, size, input.endian)
67 else:
68 output.copyBytesFrom(stream, addr, size//8)
69
diff --git a/lib/hachoir_editor/fieldset.py b/lib/hachoir_editor/fieldset.py
new file mode 100644
index 0000000..a74c8e2
--- /dev/null
+++ b/lib/hachoir_editor/fieldset.py
@@ -0,0 +1,352 @@
1from hachoir_core.dict import UniqKeyError
2from hachoir_core.field import MissingField, Float32, Float64, FakeArray
3from hachoir_core.compatibility import any
4from hachoir_core.i18n import _
5from typed_field import createEditableField
6from field import EditorError
7from collections import deque # Python 2.4
8import weakref # Python 2.1
9import struct
10
11class EditableFieldSet(object):
12 MAX_SIZE = (1 << 40) # Arbitrary limit to catch errors
13 is_field_set = True
14
15 def __init__(self, parent, fieldset):
16 self._parent = parent
17 self.input = fieldset # original FieldSet
18 self._fields = {} # cache of editable fields
19 self._deleted = set() # Names of deleted fields
20 self._inserted = {} # Inserted field (name => list of field,
21 # where name is the name after)
22
23 def array(self, key):
24 # FIXME: Use cache?
25 return FakeArray(self, key)
26
27 def _getParent(self):
28 return self._parent
29 parent = property(_getParent)
30
31 def _isAltered(self):
32 if self._inserted:
33 return True
34 if self._deleted:
35 return True
36 return any(field.is_altered for field in self._fields.itervalues())
37 is_altered = property(_isAltered)
38
39 def reset(self):
40 """
41 Reset the field set and the input field set.
42 """
43 for key, field in self._fields.iteritems():
44 if not field.is_altered:
45 del self._fields[key]
46 self.input.reset()
47
48 def __len__(self):
49 return len(self.input) \
50 - len(self._deleted) \
51 + sum( len(new) for new in self._inserted.itervalues() )
52
53 def __iter__(self):
54 for field in self.input:
55 name = field.name
56 if name in self._inserted:
57 for newfield in self._inserted[name]:
58 yield weakref.proxy(newfield)
59 if name not in self._deleted:
60 yield self[name]
61 if None in self._inserted:
62 for newfield in self._inserted[None]:
63 yield weakref.proxy(newfield)
64
65 def insertBefore(self, name, *new_fields):
66 self._insert(name, new_fields, False)
67
68 def insertAfter(self, name, *new_fields):
69 self._insert(name, new_fields, True)
70
71 def insert(self, *new_fields):
72 self._insert(None, new_fields, True)
73
74 def _insert(self, key, new_fields, next):
75 """
76 key is the name of the field before which new_fields
77 will be inserted. If next is True, the fields will be inserted
78 _after_ this field.
79 """
80 # Set unique field name
81 for field in new_fields:
82 if field._name.endswith("[]"):
83 self.input.setUniqueFieldName(field)
84
85 # Check that there is no duplicate in inserted fields
86 new_names = list(field.name for field in new_fields)
87 names_set = set(new_names)
88 if len(names_set) != len(new_fields):
89 duplicates = (name for name in names_set if 1 < new_names.count(name))
90 raise UniqKeyError(_("Duplicates in inserted fields: %s") % ", ".join(duplicates))
91
92 # Check that field names are not in input
93 if self.input: # Write special version for NewFieldSet?
94 for name in new_names:
95 if name in self.input and name not in self._deleted:
96 raise UniqKeyError(_("Field name '%s' already exists") % name)
97
98 # Check that field names are not in inserted fields
99 for fields in self._inserted.itervalues():
100 for field in fields:
101 if field.name in new_names:
102 raise UniqKeyError(_("Field name '%s' already exists") % field.name)
103
104 # Input have already inserted field?
105 if key in self._inserted:
106 if next:
107 self._inserted[key].extend( reversed(new_fields) )
108 else:
109 self._inserted[key].extendleft( reversed(new_fields) )
110 return
111
112 # Whould like to insert in inserted fields?
113 if key:
114 for fields in self._inserted.itervalues():
115 names = [item.name for item in fields]
116 try:
117 pos = names.index(key)
118 except ValueError:
119 continue
120 if 0 <= pos:
121 if next:
122 pos += 1
123 fields.rotate(-pos)
124 fields.extendleft( reversed(new_fields) )
125 fields.rotate(pos)
126 return
127
128 # Get next field. Use None if we are at the end.
129 if next:
130 index = self.input[key].index + 1
131 try:
132 key = self.input[index].name
133 except IndexError:
134 key = None
135
136 # Check that field names are not in input
137 if key not in self.input:
138 raise MissingField(self, key)
139
140 # Insert in original input
141 self._inserted[key]= deque(new_fields)
142
143 def _getDescription(self):
144 return self.input.description
145 description = property(_getDescription)
146
147 def _getStream(self):
148 # FIXME: This property is maybe a bad idea since address may be differents
149 return self.input.stream
150 stream = property(_getStream)
151
152 def _getName(self):
153 return self.input.name
154 name = property(_getName)
155
156 def _getEndian(self):
157 return self.input.endian
158 endian = property(_getEndian)
159
160 def _getAddress(self):
161 if self._parent:
162 return self._parent._getFieldAddress(self.name)
163 else:
164 return 0
165 address = property(_getAddress)
166
167 def _getAbsoluteAddress(self):
168 address = self.address
169 current = self._parent
170 while current:
171 address += current.address
172 current = current._parent
173 return address
174 absolute_address = property(_getAbsoluteAddress)
175
176 def hasValue(self):
177 return False
178# return self._parent.input[self.name].hasValue()
179
180 def _getSize(self):
181 if self.is_altered:
182 return sum(field.size for field in self)
183 else:
184 return self.input.size
185 size = property(_getSize)
186
187 def _getPath(self):
188 return self.input.path
189 path = property(_getPath)
190
191 def _getOriginalField(self, name):
192 assert name in self.input
193 return self.input[name]
194
195 def _getFieldInputAddress(self, name):
196 """
197 Absolute address of a field from the input field set.
198 """
199 assert name in self.input
200 return self.input[name].absolute_address
201
202 def _getFieldAddress(self, name):
203 """
204 Compute relative address of a field. The operation takes care of
205 deleted and resized fields.
206 """
207 #assert name not in self._deleted
208 addr = 0
209 for field in self:
210 if field.name == name:
211 return addr
212 addr += field.size
213 raise MissingField(self, name)
214
215 def _getItemByPath(self, path):
216 if not path[0]:
217 path = path[1:]
218 field = self
219 for name in path:
220 field = field[name]
221 return field
222
223 def __contains__(self, name):
224 try:
225 field = self[name]
226 return (field is not None)
227 except MissingField:
228 return False
229
230 def __getitem__(self, key):
231 """
232 Create a weak reference to an editable field (EditableField) for the
233 field with specified name. If the field is removed later, using the
234 editable field will raise a weakref.ReferenceError exception.
235
236 May raise a MissingField error if the field doesn't exist in original
237 field set or it has been deleted.
238 """
239 if "/" in key:
240 return self._getItemByPath(key.split("/"))
241 if isinstance(key, (int, long)):
242 raise EditorError("Integer index are not supported")
243
244 if (key in self._deleted) or (key not in self.input):
245 raise MissingField(self, key)
246 if key not in self._fields:
247 field = self.input[key]
248 if field.is_field_set:
249 self._fields[key] = createEditableFieldSet(self, field)
250 else:
251 self._fields[key] = createEditableField(self, field)
252 return weakref.proxy(self._fields[key])
253
254 def __delitem__(self, name):
255 """
256 Remove a field from the field set. May raise an MissingField exception
257 if the field has already been deleted.
258 """
259 parts = name.partition('/')
260 if parts[2]:
261 fieldset = self[parts[0]]
262 del fieldset[part[2]]
263 return
264 if name in self._deleted:
265 raise MissingField(self, name)
266 self._deleted.add(name)
267 if name in self._fields:
268 del self._fields[name]
269
270 def writeInto(self, output):
271 """
272 Write the content if this field set into the output stream
273 (OutputStream).
274 """
275 if not self.is_altered:
276 # Not altered: just copy bits/bytes
277 input = self.input
278 if input.size % 8:
279 output.copyBitsFrom(input.stream,
280 input.absolute_address, input.size, input.endian)
281 else:
282 output.copyBytesFrom(input.stream,
283 input.absolute_address, input.size//8)
284 else:
285 # Altered: call writeInto() method of each field
286 realaddr = 0
287 for field in self:
288 field.writeInto(output)
289 realaddr += field.size
290
291 def _getValue(self):
292 raise EditorError('Field set "%s" has no value' % self.path)
293 def _setValue(self, value):
294 raise EditorError('Field set "%s" value is read only' % self.path)
295 value = property(_getValue, _setValue, "Value of field")
296
297class EditableFloat(EditableFieldSet):
298 _value = None
299
300 def _isAltered(self):
301 return (self._value is not None)
302 is_altered = property(_isAltered)
303
304 def writeInto(self, output):
305 if self._value is not None:
306 self._write(output)
307 else:
308 EditableFieldSet.writeInto(self, output)
309
310 def _write(self, output):
311 format = self.input.struct_format
312 raw = struct.pack(format, self._value)
313 output.writeBytes(raw)
314
315 def _setValue(self, value):
316 self.parent._is_altered = True
317 self._value = value
318 value = property(EditableFieldSet._getValue, _setValue)
319
320def createEditableFieldSet(parent, field):
321 cls = field.__class__
322 # FIXME: Support Float80
323 if cls in (Float32, Float64):
324 return EditableFloat(parent, field)
325 else:
326 return EditableFieldSet(parent, field)
327
328class NewFieldSet(EditableFieldSet):
329 def __init__(self, parent, name):
330 EditableFieldSet.__init__(self, parent, None)
331 self._name = name
332 self._endian = parent.endian
333
334 def __iter__(self):
335 if None in self._inserted:
336 return iter(self._inserted[None])
337 else:
338 raise StopIteration()
339
340 def _getName(self):
341 return self._name
342 name = property(_getName)
343
344 def _getEndian(self):
345 return self._endian
346 endian = property(_getEndian)
347
348 is_altered = property(lambda self: True)
349
350def createEditor(fieldset):
351 return EditableFieldSet(None, fieldset)
352
diff --git a/lib/hachoir_editor/typed_field.py b/lib/hachoir_editor/typed_field.py
new file mode 100644
index 0000000..0f0427b
--- /dev/null
+++ b/lib/hachoir_editor/typed_field.py
@@ -0,0 +1,253 @@
1from hachoir_core.field import (
2 RawBits, Bit, Bits, PaddingBits,
3 RawBytes, Bytes, PaddingBytes,
4 GenericString, Character,
5 isInteger, isString)
6from field import FakeField
7
8class EditableField(FakeField):
9 """
10 Pure virtual class used to write editable field class.
11 """
12
13 _is_altered = False
14 def __init__(self, parent, name, value=None):
15 FakeField.__init__(self, parent, name)
16 self._value = value
17
18 def _isAltered(self):
19 return self._is_altered
20 is_altered = property(_isAltered)
21
22 def hasValue(self):
23 return True
24
25 def _computeSize(self):
26 raise NotImplementedError()
27 def _getValue(self):
28 return self._value
29 def _setValue(self, value):
30 self._value = value
31
32 def _propGetValue(self):
33 if self._value is not None:
34 return self._getValue()
35 else:
36 return FakeField._getValue(self)
37 def _propSetValue(self, value):
38 self._setValue(value)
39 self._is_altered = True
40 value = property(_propGetValue, _propSetValue)
41
42 def _getSize(self):
43 if self._value is not None:
44 return self._computeSize()
45 else:
46 return FakeField._getSize(self)
47 size = property(_getSize)
48
49 def _write(self, output):
50 raise NotImplementedError()
51
52 def writeInto(self, output):
53 if self._is_altered:
54 self._write(output)
55 else:
56 return FakeField.writeInto(self, output)
57
58class EditableFixedField(EditableField):
59 """
60 Editable field with fixed size.
61 """
62
63 def __init__(self, parent, name, value=None, size=None):
64 EditableField.__init__(self, parent, name, value)
65 if size is not None:
66 self._size = size
67 else:
68 self._size = self._parent._getOriginalField(self._name).size
69
70 def _getSize(self):
71 return self._size
72 size = property(_getSize)
73
74class EditableBits(EditableFixedField):
75 def __init__(self, parent, name, *args):
76 if args:
77 if len(args) != 2:
78 raise TypeError(
79 "Wrong argument count, EditableBits constructor prototype is: "
80 "(parent, name, [size, value])")
81 size = args[0]
82 value = args[1]
83 assert isinstance(value, (int, long))
84 else:
85 size = None
86 value = None
87 EditableFixedField.__init__(self, parent, name, value, size)
88 if args:
89 self._setValue(args[1])
90 self._is_altered = True
91
92 def _setValue(self, value):
93 if not(0 <= value < (1 << self._size)):
94 raise ValueError("Invalid value, must be in range %s..%s"
95 % (0, (1 << self._size) - 1))
96 self._value = value
97
98 def _write(self, output):
99 output.writeBits(self._size, self._value, self._parent.endian)
100
101class EditableBytes(EditableField):
102 def _setValue(self, value):
103 if not value: raise ValueError(
104 "Unable to set empty string to a EditableBytes field")
105 self._value = value
106
107 def _computeSize(self):
108 return len(self._value) * 8
109
110 def _write(self, output):
111 output.writeBytes(self._value)
112
113class EditableString(EditableField):
114 MAX_SIZE = {
115 "Pascal8": (1 << 8)-1,
116 "Pascal16": (1 << 16)-1,
117 "Pascal32": (1 << 32)-1,
118 }
119
120 def __init__(self, parent, name, *args, **kw):
121 if len(args) == 2:
122 value = args[1]
123 assert isinstance(value, str) # TODO: support Unicode
124 elif not args:
125 value = None
126 else:
127 raise TypeError(
128 "Wrong argument count, EditableString constructor prototype is:"
129 "(parent, name, [format, value])")
130 EditableField.__init__(self, parent, name, value)
131 if len(args) == 2:
132 self._charset = kw.get('charset', None)
133 self._format = args[0]
134 if self._format in GenericString.PASCAL_FORMATS:
135 self._prefix_size = GenericString.PASCAL_FORMATS[self._format]
136 else:
137 self._prefix_size = 0
138 self._suffix_str = GenericString.staticSuffixStr(
139 self._format, self._charset, self._parent.endian)
140 self._is_altered = True
141 else:
142 orig = self._parent._getOriginalField(name)
143 self._charset = orig.charset
144 self._format = orig.format
145 self._prefix_size = orig.content_offset
146 self._suffix_str = orig.suffix_str
147
148 def _setValue(self, value):
149 size = len(value)
150 if self._format in self.MAX_SIZE and self.MAX_SIZE[self._format] < size:
151 raise ValueError("String is too big")
152 self._value = value
153
154 def _computeSize(self):
155 return (self._prefix_size + len(self._value) + len(self._suffix_str))*8
156
157 def _write(self, output):
158 if self._format in GenericString.SUFFIX_FORMAT:
159 output.writeBytes(self._value)
160 output.writeBytes(self._suffix_str)
161 elif self._format == "fixed":
162 output.writeBytes(self._value)
163 else:
164 assert self._format in GenericString.PASCAL_FORMATS
165 size = GenericString.PASCAL_FORMATS[self._format]
166 output.writeInteger(len(self._value), False, size, self._parent.endian)
167 output.writeBytes(self._value)
168
169class EditableCharacter(EditableFixedField):
170 def __init__(self, parent, name, *args):
171 if args:
172 if len(args) != 3:
173 raise TypeError(
174 "Wrong argument count, EditableCharacter "
175 "constructor prototype is: (parent, name, [value])")
176 value = args[0]
177 if not isinstance(value, str) or len(value) != 1:
178 raise TypeError("EditableCharacter needs a character")
179 else:
180 value = None
181 EditableFixedField.__init__(self, parent, name, value, 8)
182 if args:
183 self._is_altered = True
184
185 def _setValue(self, value):
186 if not isinstance(value, str) or len(value) != 1:
187 raise TypeError("EditableCharacter needs a character")
188 self._value = value
189
190 def _write(self, output):
191 output.writeBytes(self._value)
192
193class EditableInteger(EditableFixedField):
194 VALID_VALUE_SIGNED = {
195 8: (-(1 << 8), (1 << 8)-1),
196 16: (-(1 << 15), (1 << 15)-1),
197 32: (-(1 << 31), (1 << 31)-1),
198 }
199 VALID_VALUE_UNSIGNED = {
200 8: (0, (1 << 8)-1),
201 16: (0, (1 << 16)-1),
202 32: (0, (1 << 32)-1)
203 }
204
205 def __init__(self, parent, name, *args):
206 if args:
207 if len(args) != 3:
208 raise TypeError(
209 "Wrong argument count, EditableInteger constructor prototype is: "
210 "(parent, name, [signed, size, value])")
211 size = args[1]
212 value = args[2]
213 assert isinstance(value, (int, long))
214 else:
215 size = None
216 value = None
217 EditableFixedField.__init__(self, parent, name, value, size)
218 if args:
219 self._signed = args[0]
220 self._is_altered = True
221 else:
222 self._signed = self._parent._getOriginalField(self._name).signed
223
224 def _setValue(self, value):
225 if self._signed:
226 valid = self.VALID_VALUE_SIGNED
227 else:
228 valid = self.VALID_VALUE_UNSIGNED
229 minval, maxval = valid[self._size]
230 if not(minval <= value <= maxval):
231 raise ValueError("Invalid value, must be in range %s..%s"
232 % (minval, maxval))
233 self._value = value
234
235 def _write(self, output):
236 output.writeInteger(
237 self.value, self._signed, self._size//8, self._parent.endian)
238
239def createEditableField(fieldset, field):
240 if isInteger(field):
241 cls = EditableInteger
242 elif isString(field):
243 cls = EditableString
244 elif field.__class__ in (RawBytes, Bytes, PaddingBytes):
245 cls = EditableBytes
246 elif field.__class__ in (RawBits, Bits, Bit, PaddingBits):
247 cls = EditableBits
248 elif field.__class__ == Character:
249 cls = EditableCharacter
250 else:
251 cls = FakeField
252 return cls(fieldset, field.name)
253
diff --git a/lib/images.py b/lib/images.py
new file mode 100644
index 0000000..3eb3544
--- /dev/null
+++ b/lib/images.py
@@ -0,0 +1,48 @@
1'''
2 Takes care about pictures formats
3'''
4
5import parser
6
7
8class JpegStripper(parser.GenericParser):
9 '''
10 represents a jpeg file
11 remaining :
12 http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/CanonRaw.html
13 '''
14 def _should_remove(self, field):
15 '''
16 return True if the field is compromizing
17 '''
18 name = field.name
19 if name.startswith('comment'):
20 return True
21 elif name in ('photoshop', 'exif', 'adobe', 'app12'):
22 return True
23 elif name in ('icc'): # should we remove the icc profile ?
24 return True
25 else:
26 return False
27
28
29class PngStripper(parser.GenericParser):
30 '''
31 represents a png file
32 see : http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/PNG.html
33 '''
34 def _should_remove(self, field):
35 '''
36 return True if the field is compromizing
37 '''
38 name = field.name
39 if name.startswith('text['): # textual meta
40 return True
41 elif name.startswith('utf8_text['): # uncompressed adobe crap
42 return True
43 elif name.startswith('compt_text['): # compressed adobe crap
44 return True
45 elif name == "time": # timestamp
46 return True
47 else:
48 return False
diff --git a/lib/mat.py b/lib/mat.py
new file mode 100644
index 0000000..53d02d8
--- /dev/null
+++ b/lib/mat.py
@@ -0,0 +1,150 @@
1#!/usr/bin/env python
2
3'''
4 Metadata anonymisation toolkit library
5'''
6
7import os
8import subprocess
9import logging
10import mimetypes
11import xml.sax
12
13import hachoir_core.cmd_line
14import hachoir_parser
15
16import strippers
17
18__version__ = '0.2.2'
19__author__ = 'jvoisin'
20
21#Silence
22LOGGING_LEVEL = logging.CRITICAL
23hachoir_core.config.quiet = True
24fname = ''
25
26#Verbose
27#LOGGING_LEVEL = logging.DEBUG
28#hachoir_core.config.quiet = False
29#logname = 'report.log'
30
31logging.basicConfig(filename=fname, level=LOGGING_LEVEL)
32
33
34def get_sharedir():
35 '''
36 An ugly hack to find where is the "FORMATS" file.
37 '''
38 if os.path.isfile('FORMATS'):
39 return ''
40 elif os.path.exists('/usr/local/share/mat/'):
41 return '/usr/local/share/mat/'
42 elif os.path.exists('/usr/share/mat/'):
43 return '/usr/share/mat'
44
45
46class XMLParser(xml.sax.handler.ContentHandler):
47 '''
48 Parse the supported format xml, and return a corresponding
49 list of dict
50 '''
51 def __init__(self):
52 self.dict = {}
53 self.list = []
54 self.content, self.key = '', ''
55 self.between = False
56
57 def startElement(self, name, attrs):
58 '''
59 Called when entering into xml balise
60 '''
61 self.between = True
62 self.key = name
63 self.content = ''
64
65 def endElement(self, name):
66 '''
67 Called when exiting a xml balise
68 '''
69 if name == 'format': # exiting a fileformat section
70 self.list.append(self.dict.copy())
71 self.dict.clear()
72 else:
73 content = self.content.replace('\s', ' ')
74 self.dict[self.key] = content
75 self.between = False
76
77 def characters(self, characters):
78 '''
79 Concatenate the content between opening and closing balises
80 '''
81 if self.between:
82 self.content += characters
83
84
85def secure_remove(filename):
86 '''
87 securely remove the file
88 '''
89 removed = False
90 try:
91 subprocess.call(['shred', '--remove', filename])
92 removed = True
93 except:
94 logging.error('Unable to securely remove %s' % filename)
95
96 if removed is False:
97 try:
98 os.remove(filename)
99 except:
100 logging.error('Unable to remove %s' % filename)
101
102
103def create_class_file(name, backup, add2archive):
104 '''
105 return a $FILETYPEStripper() class,
106 corresponding to the filetype of the given file
107 '''
108 if not os.path.isfile(name):
109 # check if the file exists
110 logging.error('%s is not a valid file' % name)
111 return None
112
113 if not os.access(name, os.R_OK):
114 #check read permissions
115 logging.error('%s is is not readable' % name)
116 return None
117
118 if not os.access(name, os.W_OK):
119 #check write permission
120 logging.error('%s is not writtable' % name)
121 return None
122
123 filename = ''
124 try:
125 filename = hachoir_core.cmd_line.unicodeFilename(name)
126 except TypeError: # get rid of "decoding Unicode is not supported"
127 filename = name
128
129 parser = hachoir_parser.createParser(filename)
130 if not parser:
131 logging.info('Unable to parse %s' % filename)
132 return None
133
134 mime = parser.mime_type
135
136 if mime == 'application/zip': # some formats are zipped stuff
137 mime = mimetypes.guess_type(name)[0]
138
139 if mime.startswith('application/vnd.oasis.opendocument'):
140 mime = 'application/opendocument' # opendocument fileformat
141 elif mime.startswith('application/vnd.openxmlformats-officedocument'):
142 mime = 'application/officeopenxml' # office openxml
143
144 try:
145 stripper_class = strippers.STRIPPERS[mime]
146 except KeyError:
147 logging.info('Don\'t have stripper for %s format' % mime)
148 return None
149
150 return stripper_class(filename, parser, mime, backup, add2archive)
diff --git a/lib/misc.py b/lib/misc.py
new file mode 100644
index 0000000..d084861
--- /dev/null
+++ b/lib/misc.py
@@ -0,0 +1,63 @@
1'''
2 Care about misc formats
3'''
4
5import parser
6
7from bencode import bencode
8
9
10class TorrentStripper(parser.GenericParser):
11 '''
12 Represent a torrent file with the help
13 of the bencode lib from Petru Paler
14 '''
15 def __init__(self, filename, parser, mime, backup, add2archive):
16 super(TorrentStripper, self).__init__(filename, parser, mime,
17 backup, add2archive)
18 self.fields = ['comment', 'creation date', 'created by']
19
20 def is_clean(self):
21 '''
22 Check if the file is clean from harmful metadatas
23 '''
24 with open(self.filename, 'r') as f:
25 decoded = bencode.bdecode(f.read())
26 for key in self.fields:
27 try:
28 if decoded[key] != '':
29 return False
30 except:
31 pass
32 return True
33
34 def get_meta(self):
35 '''
36 Return a dict with all the meta of the file
37 '''
38 metadata = {}
39 with open(self.filename, 'r') as f:
40 decoded = bencode.bdecode(f.read())
41 for key in self.fields:
42 try:
43 if decoded[key] != '':
44 metadata[key] = decoded[key]
45 except:
46 pass
47 return metadata
48
49 def remove_all(self):
50 '''
51 Remove all the files that are compromizing
52 '''
53 with open(self.filename, 'r') as f:
54 decoded = bencode.bdecode(f.read())
55 for key in self.fields:
56 try:
57 decoded[key] = ''
58 except:
59 pass
60 with open(self.output, 'w') as f: # encode the decoded torrent
61 f.write(bencode.bencode(decoded)) # and write it in self.output
62 self.do_backup()
63 return True
diff --git a/lib/office.py b/lib/office.py
new file mode 100644
index 0000000..e1d738e
--- /dev/null
+++ b/lib/office.py
@@ -0,0 +1,305 @@
1'''
2 Care about office's formats
3'''
4
5import os
6import logging
7import zipfile
8import fileinput
9import subprocess
10import xml.dom.minidom as minidom
11
12try:
13 import cairo
14 import poppler
15except ImportError:
16 pass
17
18import mat
19import parser
20import archive
21
22class OpenDocumentStripper(archive.GenericArchiveStripper):
23 '''
24 An open document file is a zip, with xml file into.
25 The one that interest us is meta.xml
26 '''
27
28 def get_meta(self):
29 '''
30 Return a dict with all the meta of the file by
31 trying to read the meta.xml file.
32 '''
33 zipin = zipfile.ZipFile(self.filename, 'r')
34 metadata = {}
35 try:
36 content = zipin.read('meta.xml')
37 dom1 = minidom.parseString(content)
38 elements = dom1.getElementsByTagName('office:meta')
39 for i in elements[0].childNodes:
40 if i.tagName != 'meta:document-statistic':
41 nodename = ''.join([k for k in i.nodeName.split(':')[1:]])
42 metadata[nodename] = ''.join([j.data for j in i.childNodes])
43 else:
44 # thank you w3c for not providing a nice
45 # method to get all attributes from a node
46 pass
47 zipin.close()
48 except KeyError: # no meta.xml file found
49 logging.debug('%s has no opendocument metadata' % self.filename)
50 return metadata
51
52 def _remove_all(self, method):
53 '''
54 FIXME ?
55 There is a patch implementing the Zipfile.remove()
56 method here : http://bugs.python.org/issue6818
57 '''
58 zipin = zipfile.ZipFile(self.filename, 'r')
59 zipout = zipfile.ZipFile(self.output, 'w', allowZip64=True)
60
61 for item in zipin.namelist():
62 name = os.path.join(self.tempdir, item)
63 _, ext = os.path.splitext(name)
64
65 if item.endswith('manifest.xml'):
66 # contain the list of all files present in the archive
67 zipin.extract(item, self.tempdir)
68 for line in fileinput.input(name, inplace=1):
69 #remove the line which contains "meta.xml"
70 line = line.strip()
71 if not 'meta.xml' in line:
72 print line
73 zipout.write(name, item)
74
75 elif ext in parser.NOMETA or item == 'mimetype':
76 #keep NOMETA files, and the "manifest" file
77 if item != 'meta.xml': # contains the metadata
78 zipin.extract(item, self.tempdir)
79 zipout.write(name, item)
80
81 else:
82 zipin.extract(item, self.tempdir)
83 if os.path.isfile(name):
84 try:
85 cfile = mat.create_class_file(name, False,
86 self.add2archive)
87 if method == 'normal':
88 cfile.remove_all()
89 else:
90 cfile.remove_all_strict()
91 logging.debug('Processing %s from %s' % (item,
92 self.filename))
93 zipout.write(name, item)
94 except:
95 logging.info('%s\' fileformat is not supported' % item)
96 if self.add2archive:
97 zipout.write(name, item)
98 zipout.comment = ''
99 logging.info('%s treated' % self.filename)
100 zipin.close()
101 zipout.close()
102 self.do_backup()
103 return True
104
105 def is_clean(self):
106 '''
107 Check if the file is clean from harmful metadatas
108 '''
109 zipin = zipfile.ZipFile(self.filename, 'r')
110 try:
111 zipin.getinfo('meta.xml')
112 except KeyError: # no meta.xml in the file
113 czf = archive.ZipStripper(self.filename, self.parser,
114 'application/zip', self.backup, self.add2archive)
115 if czf.is_clean():
116 zipin.close()
117 return True
118 zipin.close()
119 return False
120
121
122class PdfStripper(parser.GenericParser):
123 '''
124 Represent a PDF file
125 '''
126 def __init__(self, filename, parser, mime, backup, add2archive):
127 super(PdfStripper, self).__init__(filename, parser, mime, backup,
128 add2archive)
129 uri = 'file://' + os.path.abspath(self.filename)
130 self.password = None
131 self.document = poppler.document_new_from_file(uri, self.password)
132 self.meta_list = ('title', 'author', 'subject', 'keywords', 'creator',
133 'producer', 'metadata')
134
135 def is_clean(self):
136 '''
137 Check if the file is clean from harmful metadatas
138 '''
139 for key in self.meta_list:
140 if self.document.get_property(key) is not None and \
141 self.document.get_property(key) != '':
142 return False
143 return True
144
145
146 def remove_all(self):
147 '''
148 Remove supperficial
149 '''
150 return self._remove_meta()
151
152
153 def remove_all_strict(self):
154 '''
155 Opening the PDF with poppler, then doing a render
156 on a cairo pdfsurface for each pages.
157 Thanks to Lunar^for the idea.
158 http://cairographics.org/documentation/pycairo/2/
159 python-poppler is not documented at all : have fun ;)
160 '''
161 page = self.document.get_page(0)
162 page_width, page_height = page.get_size()
163 surface = cairo.PDFSurface(self.output, page_width, page_height)
164 context = cairo.Context(surface) # context draws on the surface
165 logging.debug('PDF rendering of %s' % self.filename)
166 for pagenum in xrange(self.document.get_n_pages()):
167 page = self.document.get_page(pagenum)
168 context.translate(0, 0)
169 page.render(context) # render the page on context
170 context.show_page() # draw context on surface
171 surface.finish()
172 return self._remove_meta()
173
174 def _remove_meta(self):
175 '''
176 Remove superficial/external metadata
177 from a PDF file, using exiftool,
178 of pdfrw if exiftool is not installed
179 '''
180 processed = False
181 try:# try with pdfrw
182 import pdfrw
183 #For now, poppler cannot write meta, so we must use pdfrw
184 logging.debug('Removing %s\'s superficial metadata' % self.filename)
185 trailer = pdfrw.PdfReader(self.output)
186 trailer.Info.Producer = trailer.Author = trailer.Info.Creator = None
187 writer = pdfrw.PdfWriter()
188 writer.trailer = trailer
189 writer.write(self.output)
190 self.do_backup()
191 processed = True
192 except:
193 pass
194
195 try: # try with exiftool
196 subprocess.Popen('exiftool', stdout=open('/dev/null'))
197 import exiftool
198 # Note: '-All=' must be followed by a known exiftool option.
199 if self.backup:
200 process = subprocess.Popen(['exiftool', '-m', '-All=',
201 '-out', self.output, self.filename], stdout=open('/dev/null'))
202 process.wait()
203 else:
204 # Note: '-All=' must be followed by a known exiftool option.
205 process = subprocess.Popen(
206 ['exiftool', '-All=', '-overwrite_original', self.filename],
207 stdout=open('/dev/null'))
208 process.wait()
209 processed = True
210 except:
211 pass
212
213 if processed is False:
214 logging.error('Please install either pdfrw, or exiftool to\
215 fully handle PDF files')
216 return processed
217
218 def get_meta(self):
219 '''
220 Return a dict with all the meta of the file
221 '''
222 metadata = {}
223 for key in self.meta_list:
224 if self.document.get_property(key) is not None and \
225 self.document.get_property(key) != '':
226 metadata[key] = self.document.get_property(key)
227 return metadata
228
229
230class OpenXmlStripper(archive.GenericArchiveStripper):
231 '''
232 Represent an office openxml document, which is like
233 an opendocument format, with some tricky stuff added.
234 It contains mostly xml, but can have media blobs, crap, ...
235 (I don't like this format.)
236 '''
237 def _remove_all(self, method):
238 '''
239 FIXME ?
240 There is a patch implementing the Zipfile.remove()
241 method here : http://bugs.python.org/issue6818
242 '''
243 zipin = zipfile.ZipFile(self.filename, 'r')
244 zipout = zipfile.ZipFile(self.output, 'w',
245 allowZip64=True)
246 for item in zipin.namelist():
247 name = os.path.join(self.tempdir, item)
248 _, ext = os.path.splitext(name)
249 if item.startswith('docProps/'): # metadatas
250 pass
251 elif ext in parser.NOMETA or item == '.rels':
252 #keep parser.NOMETA files, and the file named ".rels"
253 zipin.extract(item, self.tempdir)
254 zipout.write(name, item)
255 else:
256 zipin.extract(item, self.tempdir)
257 if os.path.isfile(name): # don't care about folders
258 try:
259 cfile = mat.create_class_file(name, False,
260 self.add2archive)
261 if method == 'normal':
262 cfile.remove_all()
263 else:
264 cfile.remove_all_strict()
265 logging.debug('Processing %s from %s' % (item,
266 self.filename))
267 zipout.write(name, item)
268 except:
269 logging.info('%s\' fileformat is not supported' % item)
270 if self.add2archive:
271 zipout.write(name, item)
272 zipout.comment = ''
273 logging.info('%s treated' % self.filename)
274 zipin.close()
275 zipout.close()
276 self.do_backup()
277 return True
278
279 def is_clean(self):
280 '''
281 Check if the file is clean from harmful metadatas
282 '''
283 zipin = zipfile.ZipFile(self.filename, 'r')
284 for item in zipin.namelist():
285 if item.startswith('docProps/'):
286 return False
287 zipin.close()
288 czf = archive.ZipStripper(self.filename, self.parser,
289 'application/zip', self.backup, self.add2archive)
290 if not czf.is_clean():
291 return False
292 else:
293 return True
294
295 def get_meta(self):
296 '''
297 Return a dict with all the meta of the file
298 '''
299 zipin = zipfile.ZipFile(self.filename, 'r')
300 metadata = {}
301 for item in zipin.namelist():
302 if item.startswith('docProps/'):
303 metadata[item] = 'harmful content'
304 zipin.close()
305 return metadata
diff --git a/lib/parser.py b/lib/parser.py
new file mode 100644
index 0000000..6dc5d0b
--- /dev/null
+++ b/lib/parser.py
@@ -0,0 +1,130 @@
1'''
2 Parent class of all parser
3'''
4
5import hachoir_core
6import hachoir_editor
7
8import os
9
10import mat
11
12NOMETA = ('.bmp', '.rdf', '.txt', '.xml', '.rels')
13#bmp : image
14#rdf : text
15#txt : plain text
16#xml : formated text
17#rels : openxml foramted text
18
19
20FIELD = object()
21
22class GenericParser(object):
23 '''
24 Parent class of all parsers
25 '''
26 def __init__(self, filename, parser, mime, backup, add2archive):
27 self.filename = ''
28 self.parser = parser
29 self.mime = mime
30 self.backup = backup
31 self.editor = hachoir_editor.createEditor(parser)
32 self.realname = filename
33 try:
34 self.filename = hachoir_core.cmd_line.unicodeFilename(filename)
35 except TypeError: # get rid of "decoding Unicode is not supported"
36 self.filename = filename
37 basename, ext = os.path.splitext(filename)
38 self.output = basename + '.cleaned' + ext
39 self.basename = os.path.basename(filename) # only filename
40
41 def is_clean(self):
42 '''
43 Check if the file is clean from harmful metadatas
44 '''
45 for field in self.editor:
46 if self._should_remove(field):
47 return self._is_clean(self.editor)
48 return True
49
50 def _is_clean(self, fieldset):
51 for field in fieldset:
52 remove = self._should_remove(field)
53 if remove is True:
54 return False
55 if remove is FIELD:
56 if not self._is_clean(field):
57 return False
58 return True
59
60 def remove_all(self):
61 '''
62 Remove all the files that are compromizing
63 '''
64 state = self._remove_all(self.editor)
65 hachoir_core.field.writeIntoFile(self.editor, self.output)
66 self.do_backup()
67 return state
68
69 def _remove_all(self, fieldset):
70 try:
71 for field in fieldset:
72 remove = self._should_remove(field)
73 if remove is True:
74 self._remove(fieldset, field.name)
75 if remove is FIELD:
76 self._remove_all(field)
77 return True
78 except:
79 return False
80
81 def remove_all_strict(self):
82 '''
83 If the remove_all() is not efficient enough,
84 this method is implemented :
85 It is efficient, but destructive.
86 In a perfect world, with nice fileformat,
87 this method would not exist.
88 '''
89 self.remove_all()
90
91 def _remove(self, fieldset, field):
92 '''
93 Delete the given field
94 '''
95 del fieldset[field]
96
97 def get_meta(self):
98 '''
99 Return a dict with all the meta of the file
100 '''
101 metadata = {}
102 self._get_meta(self.editor, metadata)
103 return metadata
104
105 def _get_meta(self, fieldset, metadata):
106 for field in fieldset:
107 remove = self._should_remove(field)
108 if remove is True:
109 try:
110 metadata[field.name] = field.value
111 except:
112 metadata[field.name] = 'harmful content'
113 if remove is FIELD:
114 self._get_meta(field)
115
116 def _should_remove(self, key):
117 '''
118 return True if the field is compromizing
119 abstract method
120 '''
121 raise NotImplementedError
122
123 def do_backup(self):
124 '''
125 Do a backup of the file if asked,
126 and change his creation/access date
127 '''
128 if self.backup is False:
129 mat.secure_remove(self.filename)
130 os.rename(self.output, self.filename)
diff --git a/lib/strippers.py b/lib/strippers.py
new file mode 100644
index 0000000..7d27874
--- /dev/null
+++ b/lib/strippers.py
@@ -0,0 +1,48 @@
1'''
2 Manage which fileformat can be processed
3'''
4
5import images
6import audio
7import office
8import archive
9import misc
10import subprocess
11
12STRIPPERS = {
13 'application/x-tar': archive.TarStripper,
14 'application/x-gzip': archive.GzipStripper,
15 'application/x-bzip2': archive.Bzip2Stripper,
16 'application/zip': archive.ZipStripper,
17 'audio/mpeg': audio.MpegAudioStripper,
18 'application/x-bittorrent': misc.TorrentStripper,
19 'application/opendocument': office.OpenDocumentStripper,
20 'application/officeopenxml': office.OpenXmlStripper,
21}
22
23try: # PDF support
24 import poppler
25 import cairo
26 STRIPPERS['application/x-pdf'] = office.PdfStripper
27 STRIPPERS['application/pdf'] = office.PdfStripper
28except ImportError:
29 print('Unable to import python-poppler and/or python-cairo: no PDF \
30 support')
31
32try: # mutangen-python : audio format support
33 import mutagen
34 STRIPPERS['audio/x-flac'] = audio.FlacStripper
35 STRIPPERS['audio/vorbis'] = audio.OggStripper
36except ImportError:
37 print('Unable to import python-mutagen: limited audio format support')
38
39try: # check if exiftool is installed on the system
40 subprocess.Popen('exiftool', stdout=open('/dev/null'))
41 import exiftool
42 STRIPPERS['image/jpeg'] = exiftool.JpegStripper
43 STRIPPERS['image/png'] = exiftool.PngStripper
44except: # if exiftool is not installed, use hachoir
45 print('Unable to find exiftool: limited images support')
46 STRIPPERS['image/jpeg'] = images.JpegStripper
47 STRIPPERS['image/png'] = images.PngStripper
48
diff --git a/lib/tarfile/__init__.py b/lib/tarfile/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lib/tarfile/__init__.py
@@ -0,0 +1 @@
diff --git a/lib/tarfile/tarfile.py b/lib/tarfile/tarfile.py
new file mode 100644
index 0000000..a40f9fc
--- /dev/null
+++ b/lib/tarfile/tarfile.py
@@ -0,0 +1,2593 @@
1# -*- coding: iso-8859-1 -*-
2#-------------------------------------------------------------------
3# tarfile.py
4#-------------------------------------------------------------------
5# Copyright (C) 2002 Lars Gustäbel <lars@gustaebel.de>
6# All rights reserved.
7#
8# Permission is hereby granted, free of charge, to any person
9# obtaining a copy of this software and associated documentation
10# files (the "Software"), to deal in the Software without
11# restriction, including without limitation the rights to use,
12# copy, modify, merge, publish, distribute, sublicense, and/or sell
13# copies of the Software, and to permit persons to whom the
14# Software is furnished to do so, subject to the following
15# conditions:
16#
17# The above copyright notice and this permission notice shall be
18# included in all copies or substantial portions of the Software.
19#
20# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
22# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
24# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
25# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27# OTHER DEALINGS IN THE SOFTWARE.
28#
29"""Read from and write to tar format archives.
30"""
31
32__version__ = "$Revision$"
33# $Source$
34
35version = "0.9.0"
36__author__ = "Lars Gustäbel (lars@gustaebel.de)"
37__date__ = "$Date$"
38__cvsid__ = "$Id$"
39__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
40
41#---------
42# Imports
43#---------
44import sys
45import os
46import shutil
47import stat
48import errno
49import time
50import struct
51import copy
52import re
53import operator
54
55try:
56 import grp, pwd
57except ImportError:
58 grp = pwd = None
59
60# from tarfile import *
61__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
62
63#---------------------------------------------------------
64# tar constants
65#---------------------------------------------------------
66NUL = "\0" # the null character
67BLOCKSIZE = 512 # length of processing blocks
68RECORDSIZE = BLOCKSIZE * 20 # length of records
69GNU_MAGIC = "ustar \0" # magic gnu tar string
70POSIX_MAGIC = "ustar\x0000" # magic posix tar string
71
72LENGTH_NAME = 100 # maximum length of a filename
73LENGTH_LINK = 100 # maximum length of a linkname
74LENGTH_PREFIX = 155 # maximum length of the prefix field
75
76REGTYPE = "0" # regular file
77AREGTYPE = "\0" # regular file
78LNKTYPE = "1" # link (inside tarfile)
79SYMTYPE = "2" # symbolic link
80CHRTYPE = "3" # character special device
81BLKTYPE = "4" # block special device
82DIRTYPE = "5" # directory
83FIFOTYPE = "6" # fifo special device
84CONTTYPE = "7" # contiguous file
85
86GNUTYPE_LONGNAME = "L" # GNU tar longname
87GNUTYPE_LONGLINK = "K" # GNU tar longlink
88GNUTYPE_SPARSE = "S" # GNU tar sparse file
89
90XHDTYPE = "x" # POSIX.1-2001 extended header
91XGLTYPE = "g" # POSIX.1-2001 global header
92SOLARIS_XHDTYPE = "X" # Solaris extended header
93
94USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
95GNU_FORMAT = 1 # GNU tar format
96PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
97DEFAULT_FORMAT = GNU_FORMAT
98
99#---------------------------------------------------------
100# tarfile constants
101#---------------------------------------------------------
102# File types that tarfile supports:
103SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
104 SYMTYPE, DIRTYPE, FIFOTYPE,
105 CONTTYPE, CHRTYPE, BLKTYPE,
106 GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
107 GNUTYPE_SPARSE)
108
109# File types that will be treated as a regular file.
110REGULAR_TYPES = (REGTYPE, AREGTYPE,
111 CONTTYPE, GNUTYPE_SPARSE)
112
113# File types that are part of the GNU tar format.
114GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
115 GNUTYPE_SPARSE)
116
117# Fields from a pax header that override a TarInfo attribute.
118PAX_FIELDS = ("path", "linkpath", "size", "mtime",
119 "uid", "gid", "uname", "gname")
120
121# Fields in a pax header that are numbers, all other fields
122# are treated as strings.
123PAX_NUMBER_FIELDS = {
124 "atime": float,
125 "ctime": float,
126 "mtime": float,
127 "uid": int,
128 "gid": int,
129 "size": int
130}
131
132#---------------------------------------------------------
133# Bits used in the mode field, values in octal.
134#---------------------------------------------------------
135S_IFLNK = 0120000 # symbolic link
136S_IFREG = 0100000 # regular file
137S_IFBLK = 0060000 # block device
138S_IFDIR = 0040000 # directory
139S_IFCHR = 0020000 # character device
140S_IFIFO = 0010000 # fifo
141
142TSUID = 04000 # set UID on execution
143TSGID = 02000 # set GID on execution
144TSVTX = 01000 # reserved
145
146TUREAD = 0400 # read by owner
147TUWRITE = 0200 # write by owner
148TUEXEC = 0100 # execute/search by owner
149TGREAD = 0040 # read by group
150TGWRITE = 0020 # write by group
151TGEXEC = 0010 # execute/search by group
152TOREAD = 0004 # read by other
153TOWRITE = 0002 # write by other
154TOEXEC = 0001 # execute/search by other
155
156#---------------------------------------------------------
157# initialization
158#---------------------------------------------------------
159ENCODING = sys.getfilesystemencoding()
160if ENCODING is None:
161 ENCODING = sys.getdefaultencoding()
162
163#---------------------------------------------------------
164# Some useful functions
165#---------------------------------------------------------
166
167def stn(s, length):
168 """Convert a python string to a null-terminated string buffer.
169 """
170 return s[:length] + (length - len(s)) * NUL
171
172def nts(s):
173 """Convert a null-terminated string field to a python string.
174 """
175 # Use the string up to the first null char.
176 p = s.find("\0")
177 if p == -1:
178 return s
179 return s[:p]
180
181def nti(s):
182 """Convert a number field to a python number.
183 """
184 # There are two possible encodings for a number field, see
185 # itn() below.
186 if s[0] != chr(0200):
187 try:
188 n = int(nts(s) or "0", 8)
189 except ValueError:
190 raise InvalidHeaderError("invalid header")
191 else:
192 n = 0L
193 for i in xrange(len(s) - 1):
194 n <<= 8
195 n += ord(s[i + 1])
196 return n
197
198def itn(n, digits=8, format=DEFAULT_FORMAT):
199 """Convert a python number to a number field.
200 """
201 # POSIX 1003.1-1988 requires numbers to be encoded as a string of
202 # octal digits followed by a null-byte, this allows values up to
203 # (8**(digits-1))-1. GNU tar allows storing numbers greater than
204 # that if necessary. A leading 0200 byte indicates this particular
205 # encoding, the following digits-1 bytes are a big-endian
206 # representation. This allows values up to (256**(digits-1))-1.
207 if 0 <= n < 8 ** (digits - 1):
208 s = "%0*o" % (digits - 1, n) + NUL
209 else:
210 if format != GNU_FORMAT or n >= 256 ** (digits - 1):
211 raise ValueError("overflow in number field")
212
213 if n < 0:
214 # XXX We mimic GNU tar's behaviour with negative numbers,
215 # this could raise OverflowError.
216 n = struct.unpack("L", struct.pack("l", n))[0]
217
218 s = ""
219 for i in xrange(digits - 1):
220 s = chr(n & 0377) + s
221 n >>= 8
222 s = chr(0200) + s
223 return s
224
225def uts(s, encoding, errors):
226 """Convert a unicode object to a string.
227 """
228 if errors == "utf-8":
229 # An extra error handler similar to the -o invalid=UTF-8 option
230 # in POSIX.1-2001. Replace untranslatable characters with their
231 # UTF-8 representation.
232 try:
233 return s.encode(encoding, "strict")
234 except UnicodeEncodeError:
235 x = []
236 for c in s:
237 try:
238 x.append(c.encode(encoding, "strict"))
239 except UnicodeEncodeError:
240 x.append(c.encode("utf8"))
241 return "".join(x)
242 else:
243 return s.encode(encoding, errors)
244
245def calc_chksums(buf):
246 """Calculate the checksum for a member's header by summing up all
247 characters except for the chksum field which is treated as if
248 it was filled with spaces. According to the GNU tar sources,
249 some tars (Sun and NeXT) calculate chksum with signed char,
250 which will be different if there are chars in the buffer with
251 the high bit set. So we calculate two checksums, unsigned and
252 signed.
253 """
254 unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
255 signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
256 return unsigned_chksum, signed_chksum
257
258def copyfileobj(src, dst, length=None):
259 """Copy length bytes from fileobj src to fileobj dst.
260 If length is None, copy the entire content.
261 """
262 if length == 0:
263 return
264 if length is None:
265 shutil.copyfileobj(src, dst)
266 return
267
268 BUFSIZE = 16 * 1024
269 blocks, remainder = divmod(length, BUFSIZE)
270 for b in xrange(blocks):
271 buf = src.read(BUFSIZE)
272 if len(buf) < BUFSIZE:
273 raise IOError("end of file reached")
274 dst.write(buf)
275
276 if remainder != 0:
277 buf = src.read(remainder)
278 if len(buf) < remainder:
279 raise IOError("end of file reached")
280 dst.write(buf)
281 return
282
283filemode_table = (
284 ((S_IFLNK, "l"),
285 (S_IFREG, "-"),
286 (S_IFBLK, "b"),
287 (S_IFDIR, "d"),
288 (S_IFCHR, "c"),
289 (S_IFIFO, "p")),
290
291 ((TUREAD, "r"),),
292 ((TUWRITE, "w"),),
293 ((TUEXEC|TSUID, "s"),
294 (TSUID, "S"),
295 (TUEXEC, "x")),
296
297 ((TGREAD, "r"),),
298 ((TGWRITE, "w"),),
299 ((TGEXEC|TSGID, "s"),
300 (TSGID, "S"),
301 (TGEXEC, "x")),
302
303 ((TOREAD, "r"),),
304 ((TOWRITE, "w"),),
305 ((TOEXEC|TSVTX, "t"),
306 (TSVTX, "T"),
307 (TOEXEC, "x"))
308)
309
310def filemode(mode):
311 """Convert a file's mode to a string of the form
312 -rwxrwxrwx.
313 Used by TarFile.list()
314 """
315 perm = []
316 for table in filemode_table:
317 for bit, char in table:
318 if mode & bit == bit:
319 perm.append(char)
320 break
321 else:
322 perm.append("-")
323 return "".join(perm)
324
325class TarError(Exception):
326 """Base exception."""
327 pass
328class ExtractError(TarError):
329 """General exception for extract errors."""
330 pass
331class ReadError(TarError):
332 """Exception for unreadble tar archives."""
333 pass
334class CompressionError(TarError):
335 """Exception for unavailable compression methods."""
336 pass
337class StreamError(TarError):
338 """Exception for unsupported operations on stream-like TarFiles."""
339 pass
340class HeaderError(TarError):
341 """Base exception for header errors."""
342 pass
343class EmptyHeaderError(HeaderError):
344 """Exception for empty headers."""
345 pass
346class TruncatedHeaderError(HeaderError):
347 """Exception for truncated headers."""
348 pass
349class EOFHeaderError(HeaderError):
350 """Exception for end of file headers."""
351 pass
352class InvalidHeaderError(HeaderError):
353 """Exception for invalid headers."""
354 pass
355class SubsequentHeaderError(HeaderError):
356 """Exception for missing and invalid extended headers."""
357 pass
358
359#---------------------------
360# internal stream interface
361#---------------------------
362class _LowLevelFile:
363 """Low-level file object. Supports reading and writing.
364 It is used instead of a regular file object for streaming
365 access.
366 """
367
368 def __init__(self, name, mode):
369 mode = {
370 "r": os.O_RDONLY,
371 "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
372 }[mode]
373 if hasattr(os, "O_BINARY"):
374 mode |= os.O_BINARY
375 self.fd = os.open(name, mode, 0666)
376
377 def close(self):
378 os.close(self.fd)
379
380 def read(self, size):
381 return os.read(self.fd, size)
382
383 def write(self, s):
384 os.write(self.fd, s)
385
386class _Stream:
387 """Class that serves as an adapter between TarFile and
388 a stream-like object. The stream-like object only
389 needs to have a read() or write() method and is accessed
390 blockwise. Use of gzip or bzip2 compression is possible.
391 A stream-like object could be for example: sys.stdin,
392 sys.stdout, a socket, a tape device etc.
393
394 _Stream is intended to be used only internally.
395 """
396
397 def __init__(self, name, mode, comptype, fileobj, bufsize):
398 """Construct a _Stream object.
399 """
400 self._extfileobj = True
401 if fileobj is None:
402 fileobj = _LowLevelFile(name, mode)
403 self._extfileobj = False
404
405 if comptype == '*':
406 # Enable transparent compression detection for the
407 # stream interface
408 fileobj = _StreamProxy(fileobj)
409 comptype = fileobj.getcomptype()
410
411 self.name = name or ""
412 self.mode = mode
413 self.comptype = comptype
414 self.fileobj = fileobj
415 self.bufsize = bufsize
416 self.buf = ""
417 self.pos = 0L
418 self.closed = False
419
420 if comptype == "gz":
421 try:
422 import zlib
423 except ImportError:
424 raise CompressionError("zlib module is not available")
425 self.zlib = zlib
426 self.crc = zlib.crc32("") & 0xffffffffL
427 if mode == "r":
428 self._init_read_gz()
429 else:
430 self._init_write_gz()
431
432 if comptype == "bz2":
433 try:
434 import bz2
435 except ImportError:
436 raise CompressionError("bz2 module is not available")
437 if mode == "r":
438 self.dbuf = ""
439 self.cmp = bz2.BZ2Decompressor()
440 else:
441 self.cmp = bz2.BZ2Compressor()
442
443 def __del__(self):
444 if hasattr(self, "closed") and not self.closed:
445 self.close()
446
447 def _init_write_gz(self):
448 """Initialize for writing with gzip compression.
449 """
450 self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
451 -self.zlib.MAX_WBITS,
452 self.zlib.DEF_MEM_LEVEL,
453 0)
454 timestamp = struct.pack("<L", long(time.time()))
455 self.__write("\037\213\010\010%s\002\377" % timestamp)
456 if self.name.endswith(".gz"):
457 self.name = self.name[:-3]
458 self.__write(self.name + NUL)
459
460 def write(self, s):
461 """Write string s to the stream.
462 """
463 if self.comptype == "gz":
464 self.crc = self.zlib.crc32(s, self.crc) & 0xffffffffL
465 self.pos += len(s)
466 if self.comptype != "tar":
467 s = self.cmp.compress(s)
468 self.__write(s)
469
470 def __write(self, s):
471 """Write string s to the stream if a whole new block
472 is ready to be written.
473 """
474 self.buf += s
475 while len(self.buf) > self.bufsize:
476 self.fileobj.write(self.buf[:self.bufsize])
477 self.buf = self.buf[self.bufsize:]
478
479 def close(self):
480 """Close the _Stream object. No operation should be
481 done on it afterwards.
482 """
483 if self.closed:
484 return
485
486 if self.mode == "w" and self.comptype != "tar":
487 self.buf += self.cmp.flush()
488
489 if self.mode == "w" and self.buf:
490 self.fileobj.write(self.buf)
491 self.buf = ""
492 if self.comptype == "gz":
493 # The native zlib crc is an unsigned 32-bit integer, but
494 # the Python wrapper implicitly casts that to a signed C
495 # long. So, on a 32-bit box self.crc may "look negative",
496 # while the same crc on a 64-bit box may "look positive".
497 # To avoid irksome warnings from the `struct` module, force
498 # it to look positive on all boxes.
499 self.fileobj.write(struct.pack("<L", self.crc & 0xffffffffL))
500 self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
501
502 if not self._extfileobj:
503 self.fileobj.close()
504
505 self.closed = True
506
507 def _init_read_gz(self):
508 """Initialize for reading a gzip compressed fileobj.
509 """
510 self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
511 self.dbuf = ""
512
513 # taken from gzip.GzipFile with some alterations
514 if self.__read(2) != "\037\213":
515 raise ReadError("not a gzip file")
516 if self.__read(1) != "\010":
517 raise CompressionError("unsupported compression method")
518
519 flag = ord(self.__read(1))
520 self.__read(6)
521
522 if flag & 4:
523 xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
524 self.read(xlen)
525 if flag & 8:
526 while True:
527 s = self.__read(1)
528 if not s or s == NUL:
529 break
530 if flag & 16:
531 while True:
532 s = self.__read(1)
533 if not s or s == NUL:
534 break
535 if flag & 2:
536 self.__read(2)
537
538 def tell(self):
539 """Return the stream's file pointer position.
540 """
541 return self.pos
542
543 def seek(self, pos=0):
544 """Set the stream's file pointer to pos. Negative seeking
545 is forbidden.
546 """
547 if pos - self.pos >= 0:
548 blocks, remainder = divmod(pos - self.pos, self.bufsize)
549 for i in xrange(blocks):
550 self.read(self.bufsize)
551 self.read(remainder)
552 else:
553 raise StreamError("seeking backwards is not allowed")
554 return self.pos
555
556 def read(self, size=None):
557 """Return the next size number of bytes from the stream.
558 If size is not defined, return all bytes of the stream
559 up to EOF.
560 """
561 if size is None:
562 t = []
563 while True:
564 buf = self._read(self.bufsize)
565 if not buf:
566 break
567 t.append(buf)
568 buf = "".join(t)
569 else:
570 buf = self._read(size)
571 self.pos += len(buf)
572 return buf
573
574 def _read(self, size):
575 """Return size bytes from the stream.
576 """
577 if self.comptype == "tar":
578 return self.__read(size)
579
580 c = len(self.dbuf)
581 t = [self.dbuf]
582 while c < size:
583 buf = self.__read(self.bufsize)
584 if not buf:
585 break
586 try:
587 buf = self.cmp.decompress(buf)
588 except IOError:
589 raise ReadError("invalid compressed data")
590 t.append(buf)
591 c += len(buf)
592 t = "".join(t)
593 self.dbuf = t[size:]
594 return t[:size]
595
596 def __read(self, size):
597 """Return size bytes from stream. If internal buffer is empty,
598 read another block from the stream.
599 """
600 c = len(self.buf)
601 t = [self.buf]
602 while c < size:
603 buf = self.fileobj.read(self.bufsize)
604 if not buf:
605 break
606 t.append(buf)
607 c += len(buf)
608 t = "".join(t)
609 self.buf = t[size:]
610 return t[:size]
611# class _Stream
612
613class _StreamProxy(object):
614 """Small proxy class that enables transparent compression
615 detection for the Stream interface (mode 'r|*').
616 """
617
618 def __init__(self, fileobj):
619 self.fileobj = fileobj
620 self.buf = self.fileobj.read(BLOCKSIZE)
621
622 def read(self, size):
623 self.read = self.fileobj.read
624 return self.buf
625
626 def getcomptype(self):
627 if self.buf.startswith("\037\213\010"):
628 return "gz"
629 if self.buf.startswith("BZh91"):
630 return "bz2"
631 return "tar"
632
633 def close(self):
634 self.fileobj.close()
635# class StreamProxy
636
637class _BZ2Proxy(object):
638 """Small proxy class that enables external file object
639 support for "r:bz2" and "w:bz2" modes. This is actually
640 a workaround for a limitation in bz2 module's BZ2File
641 class which (unlike gzip.GzipFile) has no support for
642 a file object argument.
643 """
644
645 blocksize = 16 * 1024
646
647 def __init__(self, fileobj, mode):
648 self.fileobj = fileobj
649 self.mode = mode
650 self.name = getattr(self.fileobj, "name", None)
651 self.init()
652
653 def init(self):
654 import bz2
655 self.pos = 0
656 if self.mode == "r":
657 self.bz2obj = bz2.BZ2Decompressor()
658 self.fileobj.seek(0)
659 self.buf = ""
660 else:
661 self.bz2obj = bz2.BZ2Compressor()
662
663 def read(self, size):
664 b = [self.buf]
665 x = len(self.buf)
666 while x < size:
667 raw = self.fileobj.read(self.blocksize)
668 if not raw:
669 break
670 data = self.bz2obj.decompress(raw)
671 b.append(data)
672 x += len(data)
673 self.buf = "".join(b)
674
675 buf = self.buf[:size]
676 self.buf = self.buf[size:]
677 self.pos += len(buf)
678 return buf
679
680 def seek(self, pos):
681 if pos < self.pos:
682 self.init()
683 self.read(pos - self.pos)
684
685 def tell(self):
686 return self.pos
687
688 def write(self, data):
689 self.pos += len(data)
690 raw = self.bz2obj.compress(data)
691 self.fileobj.write(raw)
692
693 def close(self):
694 if self.mode == "w":
695 raw = self.bz2obj.flush()
696 self.fileobj.write(raw)
697# class _BZ2Proxy
698
699#------------------------
700# Extraction file object
701#------------------------
702class _FileInFile(object):
703 """A thin wrapper around an existing file object that
704 provides a part of its data as an individual file
705 object.
706 """
707
708 def __init__(self, fileobj, offset, size, sparse=None):
709 self.fileobj = fileobj
710 self.offset = offset
711 self.size = size
712 self.sparse = sparse
713 self.position = 0
714
715 def tell(self):
716 """Return the current file position.
717 """
718 return self.position
719
720 def seek(self, position):
721 """Seek to a position in the file.
722 """
723 self.position = position
724
725 def read(self, size=None):
726 """Read data from the file.
727 """
728 if size is None:
729 size = self.size - self.position
730 else:
731 size = min(size, self.size - self.position)
732
733 if self.sparse is None:
734 return self.readnormal(size)
735 else:
736 return self.readsparse(size)
737
738 def readnormal(self, size):
739 """Read operation for regular files.
740 """
741 self.fileobj.seek(self.offset + self.position)
742 self.position += size
743 return self.fileobj.read(size)
744
745 def readsparse(self, size):
746 """Read operation for sparse files.
747 """
748 data = []
749 while size > 0:
750 buf = self.readsparsesection(size)
751 if not buf:
752 break
753 size -= len(buf)
754 data.append(buf)
755 return "".join(data)
756
757 def readsparsesection(self, size):
758 """Read a single section of a sparse file.
759 """
760 section = self.sparse.find(self.position)
761
762 if section is None:
763 return ""
764
765 size = min(size, section.offset + section.size - self.position)
766
767 if isinstance(section, _data):
768 realpos = section.realpos + self.position - section.offset
769 self.fileobj.seek(self.offset + realpos)
770 self.position += size
771 return self.fileobj.read(size)
772 else:
773 self.position += size
774 return NUL * size
775#class _FileInFile
776
777
778class ExFileObject(object):
779 """File-like object for reading an archive member.
780 Is returned by TarFile.extractfile().
781 """
782 blocksize = 1024
783
784 def __init__(self, tarfile, tarinfo):
785 self.fileobj = _FileInFile(tarfile.fileobj,
786 tarinfo.offset_data,
787 tarinfo.size,
788 getattr(tarinfo, "sparse", None))
789 self.name = tarinfo.name
790 self.mode = "r"
791 self.closed = False
792 self.size = tarinfo.size
793
794 self.position = 0
795 self.buffer = ""
796
797 def read(self, size=None):
798 """Read at most size bytes from the file. If size is not
799 present or None, read all data until EOF is reached.
800 """
801 if self.closed:
802 raise ValueError("I/O operation on closed file")
803
804 buf = ""
805 if self.buffer:
806 if size is None:
807 buf = self.buffer
808 self.buffer = ""
809 else:
810 buf = self.buffer[:size]
811 self.buffer = self.buffer[size:]
812
813 if size is None:
814 buf += self.fileobj.read()
815 else:
816 buf += self.fileobj.read(size - len(buf))
817
818 self.position += len(buf)
819 return buf
820
821 def readline(self, size=-1):
822 """Read one entire line from the file. If size is present
823 and non-negative, return a string with at most that
824 size, which may be an incomplete line.
825 """
826 if self.closed:
827 raise ValueError("I/O operation on closed file")
828
829 if "\n" in self.buffer:
830 pos = self.buffer.find("\n") + 1
831 else:
832 buffers = [self.buffer]
833 while True:
834 buf = self.fileobj.read(self.blocksize)
835 buffers.append(buf)
836 if not buf or "\n" in buf:
837 self.buffer = "".join(buffers)
838 pos = self.buffer.find("\n") + 1
839 if pos == 0:
840 # no newline found.
841 pos = len(self.buffer)
842 break
843
844 if size != -1:
845 pos = min(size, pos)
846
847 buf = self.buffer[:pos]
848 self.buffer = self.buffer[pos:]
849 self.position += len(buf)
850 return buf
851
852 def readlines(self):
853 """Return a list with all remaining lines.
854 """
855 result = []
856 while True:
857 line = self.readline()
858 if not line: break
859 result.append(line)
860 return result
861
862 def tell(self):
863 """Return the current file position.
864 """
865 if self.closed:
866 raise ValueError("I/O operation on closed file")
867
868 return self.position
869
870 def seek(self, pos, whence=os.SEEK_SET):
871 """Seek to a position in the file.
872 """
873 if self.closed:
874 raise ValueError("I/O operation on closed file")
875
876 if whence == os.SEEK_SET:
877 self.position = min(max(pos, 0), self.size)
878 elif whence == os.SEEK_CUR:
879 if pos < 0:
880 self.position = max(self.position + pos, 0)
881 else:
882 self.position = min(self.position + pos, self.size)
883 elif whence == os.SEEK_END:
884 self.position = max(min(self.size + pos, self.size), 0)
885 else:
886 raise ValueError("Invalid argument")
887
888 self.buffer = ""
889 self.fileobj.seek(self.position)
890
891 def close(self):
892 """Close the file object.
893 """
894 self.closed = True
895
896 def __iter__(self):
897 """Get an iterator over the file's lines.
898 """
899 while True:
900 line = self.readline()
901 if not line:
902 break
903 yield line
904#class ExFileObject
905
906#------------------
907# Exported Classes
908#------------------
909class TarInfo(object):
910 """Informational class which holds the details about an
911 archive member given by a tar header block.
912 TarInfo objects are returned by TarFile.getmember(),
913 TarFile.getmembers() and TarFile.gettarinfo() and are
914 usually created internally.
915 """
916
917 def __init__(self, name=""):
918 """Construct a TarInfo object. name is the optional name
919 of the member.
920 """
921 self.name = name # member name
922 self.mode = 0644 # file permissions
923 self.uid = 0 # user id
924 self.gid = 0 # group id
925 self.size = 0 # file size
926 self.mtime = 0 # modification time
927 self.chksum = 0 # header checksum
928 self.type = REGTYPE # member type
929 self.linkname = "" # link name
930 self.uname = "" # user name
931 self.gname = "" # group name
932 self.devmajor = 0 # device major number
933 self.devminor = 0 # device minor number
934
935 self.offset = 0 # the tar header starts here
936 self.offset_data = 0 # the file's data starts here
937
938 self.pax_headers = {} # pax header information
939
940 # In pax headers the "name" and "linkname" field are called
941 # "path" and "linkpath".
942 def _getpath(self):
943 return self.name
944 def _setpath(self, name):
945 self.name = name
946 path = property(_getpath, _setpath)
947
948 def _getlinkpath(self):
949 return self.linkname
950 def _setlinkpath(self, linkname):
951 self.linkname = linkname
952 linkpath = property(_getlinkpath, _setlinkpath)
953
954 def __repr__(self):
955 return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
956
957 def get_info(self, encoding, errors):
958 """Return the TarInfo's attributes as a dictionary.
959 """
960 info = {
961 "name": self.name,
962 "mode": self.mode & 07777,
963 "uid": self.uid,
964 "gid": self.gid,
965 "size": self.size,
966 "mtime": self.mtime,
967 "chksum": self.chksum,
968 "type": self.type,
969 "linkname": self.linkname,
970 "uname": self.uname,
971 "gname": self.gname,
972 "devmajor": self.devmajor,
973 "devminor": self.devminor
974 }
975
976 if info["type"] == DIRTYPE and not info["name"].endswith("/"):
977 info["name"] += "/"
978
979 for key in ("name", "linkname", "uname", "gname"):
980 if type(info[key]) is unicode:
981 info[key] = info[key].encode(encoding, errors)
982
983 return info
984
985 def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"):
986 """Return a tar header as a string of 512 byte blocks.
987 """
988 info = self.get_info(encoding, errors)
989
990 if format == USTAR_FORMAT:
991 return self.create_ustar_header(info)
992 elif format == GNU_FORMAT:
993 return self.create_gnu_header(info)
994 elif format == PAX_FORMAT:
995 return self.create_pax_header(info, encoding, errors)
996 else:
997 raise ValueError("invalid format")
998
999 def create_ustar_header(self, info):
1000 """Return the object as a ustar header block.
1001 """
1002 info["magic"] = POSIX_MAGIC
1003
1004 if len(info["linkname"]) > LENGTH_LINK:
1005 raise ValueError("linkname is too long")
1006
1007 if len(info["name"]) > LENGTH_NAME:
1008 info["prefix"], info["name"] = self._posix_split_name(info["name"])
1009
1010 return self._create_header(info, USTAR_FORMAT)
1011
1012 def create_gnu_header(self, info):
1013 """Return the object as a GNU header block sequence.
1014 """
1015 info["magic"] = GNU_MAGIC
1016
1017 buf = ""
1018 if len(info["linkname"]) > LENGTH_LINK:
1019 buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK)
1020
1021 if len(info["name"]) > LENGTH_NAME:
1022 buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME)
1023
1024 return buf + self._create_header(info, GNU_FORMAT)
1025
1026 def create_pax_header(self, info, encoding, errors):
1027 """Return the object as a ustar header block. If it cannot be
1028 represented this way, prepend a pax extended header sequence
1029 with supplement information.
1030 """
1031 info["magic"] = POSIX_MAGIC
1032 pax_headers = self.pax_headers.copy()
1033
1034 # Test string fields for values that exceed the field length or cannot
1035 # be represented in ASCII encoding.
1036 for name, hname, length in (
1037 ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
1038 ("uname", "uname", 32), ("gname", "gname", 32)):
1039
1040 if hname in pax_headers:
1041 # The pax header has priority.
1042 continue
1043
1044 val = info[name].decode(encoding, errors)
1045
1046 # Try to encode the string as ASCII.
1047 try:
1048 val.encode("ascii")
1049 except UnicodeEncodeError:
1050 pax_headers[hname] = val
1051 continue
1052
1053 if len(info[name]) > length:
1054 pax_headers[hname] = val
1055
1056 # Test number fields for values that exceed the field limit or values
1057 # that like to be stored as float.
1058 for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
1059 if name in pax_headers:
1060 # The pax header has priority. Avoid overflow.
1061 info[name] = 0
1062 continue
1063
1064 val = info[name]
1065 if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
1066 pax_headers[name] = unicode(val)
1067 info[name] = 0
1068
1069 # Create a pax extended header if necessary.
1070 if pax_headers:
1071 buf = self._create_pax_generic_header(pax_headers)
1072 else:
1073 buf = ""
1074
1075 return buf + self._create_header(info, USTAR_FORMAT)
1076
1077 @classmethod
1078 def create_pax_global_header(cls, pax_headers):
1079 """Return the object as a pax global header block sequence.
1080 """
1081 return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
1082
1083 def _posix_split_name(self, name):
1084 """Split a name longer than 100 chars into a prefix
1085 and a name part.
1086 """
1087 prefix = name[:LENGTH_PREFIX + 1]
1088 while prefix and prefix[-1] != "/":
1089 prefix = prefix[:-1]
1090
1091 name = name[len(prefix):]
1092 prefix = prefix[:-1]
1093
1094 if not prefix or len(name) > LENGTH_NAME:
1095 raise ValueError("name is too long")
1096 return prefix, name
1097
1098 @staticmethod
1099 def _create_header(info, format):
1100 """Return a header block. info is a dictionary with file
1101 information, format must be one of the *_FORMAT constants.
1102 """
1103 parts = [
1104 stn(info.get("name", ""), 100),
1105 itn(info.get("mode", 0) & 07777, 8, format),
1106 itn(info.get("uid", 0), 8, format),
1107 itn(info.get("gid", 0), 8, format),
1108 itn(info.get("size", 0), 12, format),
1109 itn(info.get("mtime", 0), 12, format),
1110 " ", # checksum field
1111 info.get("type", REGTYPE),
1112 stn(info.get("linkname", ""), 100),
1113 stn(info.get("magic", POSIX_MAGIC), 8),
1114 stn(info.get("uname", ""), 32),
1115 stn(info.get("gname", ""), 32),
1116 itn(info.get("devmajor", 0), 8, format),
1117 itn(info.get("devminor", 0), 8, format),
1118 stn(info.get("prefix", ""), 155)
1119 ]
1120
1121 buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
1122 chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
1123 buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
1124 return buf
1125
1126 @staticmethod
1127 def _create_payload(payload):
1128 """Return the string payload filled with zero bytes
1129 up to the next 512 byte border.
1130 """
1131 blocks, remainder = divmod(len(payload), BLOCKSIZE)
1132 if remainder > 0:
1133 payload += (BLOCKSIZE - remainder) * NUL
1134 return payload
1135
1136 @classmethod
1137 def _create_gnu_long_header(cls, name, type):
1138 """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
1139 for name.
1140 """
1141 name += NUL
1142
1143 info = {}
1144 info["name"] = "././@LongLink"
1145 info["type"] = type
1146 info["size"] = len(name)
1147 info["magic"] = GNU_MAGIC
1148
1149 # create extended header + name blocks.
1150 return cls._create_header(info, USTAR_FORMAT) + \
1151 cls._create_payload(name)
1152
1153 @classmethod
1154 def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
1155 """Return a POSIX.1-2001 extended or global header sequence
1156 that contains a list of keyword, value pairs. The values
1157 must be unicode objects.
1158 """
1159 records = []
1160 for keyword, value in pax_headers.iteritems():
1161 keyword = keyword.encode("utf8")
1162 value = value.encode("utf8")
1163 l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
1164 n = p = 0
1165 while True:
1166 n = l + len(str(p))
1167 if n == p:
1168 break
1169 p = n
1170 records.append("%d %s=%s\n" % (p, keyword, value))
1171 records = "".join(records)
1172
1173 # We use a hardcoded "././@PaxHeader" name like star does
1174 # instead of the one that POSIX recommends.
1175 info = {}
1176 info["name"] = "././@PaxHeader"
1177 info["type"] = type
1178 info["size"] = len(records)
1179 info["magic"] = POSIX_MAGIC
1180
1181 # Create pax header + record blocks.
1182 return cls._create_header(info, USTAR_FORMAT) + \
1183 cls._create_payload(records)
1184
1185 @classmethod
1186 def frombuf(cls, buf):
1187 """Construct a TarInfo object from a 512 byte string buffer.
1188 """
1189 if len(buf) == 0:
1190 raise EmptyHeaderError("empty header")
1191 if len(buf) != BLOCKSIZE:
1192 raise TruncatedHeaderError("truncated header")
1193 if buf.count(NUL) == BLOCKSIZE:
1194 raise EOFHeaderError("end of file header")
1195
1196 chksum = nti(buf[148:156])
1197 if chksum not in calc_chksums(buf):
1198 raise InvalidHeaderError("bad checksum")
1199
1200 obj = cls()
1201 obj.buf = buf
1202 obj.name = nts(buf[0:100])
1203 obj.mode = nti(buf[100:108])
1204 obj.uid = nti(buf[108:116])
1205 obj.gid = nti(buf[116:124])
1206 obj.size = nti(buf[124:136])
1207 obj.mtime = nti(buf[136:148])
1208 obj.chksum = chksum
1209 obj.type = buf[156:157]
1210 obj.linkname = nts(buf[157:257])
1211 obj.uname = nts(buf[265:297])
1212 obj.gname = nts(buf[297:329])
1213 obj.devmajor = nti(buf[329:337])
1214 obj.devminor = nti(buf[337:345])
1215 prefix = nts(buf[345:500])
1216
1217 # Old V7 tar format represents a directory as a regular
1218 # file with a trailing slash.
1219 if obj.type == AREGTYPE and obj.name.endswith("/"):
1220 obj.type = DIRTYPE
1221
1222 # Remove redundant slashes from directories.
1223 if obj.isdir():
1224 obj.name = obj.name.rstrip("/")
1225
1226 # Reconstruct a ustar longname.
1227 if prefix and obj.type not in GNU_TYPES:
1228 obj.name = prefix + "/" + obj.name
1229 return obj
1230
1231 @classmethod
1232 def fromtarfile(cls, tarfile):
1233 """Return the next TarInfo object from TarFile object
1234 tarfile.
1235 """
1236 buf = tarfile.fileobj.read(BLOCKSIZE)
1237 obj = cls.frombuf(buf)
1238 obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
1239 return obj._proc_member(tarfile)
1240
1241 #--------------------------------------------------------------------------
1242 # The following are methods that are called depending on the type of a
1243 # member. The entry point is _proc_member() which can be overridden in a
1244 # subclass to add custom _proc_*() methods. A _proc_*() method MUST
1245 # implement the following
1246 # operations:
1247 # 1. Set self.offset_data to the position where the data blocks begin,
1248 # if there is data that follows.
1249 # 2. Set tarfile.offset to the position where the next member's header will
1250 # begin.
1251 # 3. Return self or another valid TarInfo object.
1252 def _proc_member(self, tarfile):
1253 """Choose the right processing method depending on
1254 the type and call it.
1255 """
1256 if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
1257 return self._proc_gnulong(tarfile)
1258 elif self.type == GNUTYPE_SPARSE:
1259 return self._proc_sparse(tarfile)
1260 elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
1261 return self._proc_pax(tarfile)
1262 else:
1263 return self._proc_builtin(tarfile)
1264
1265 def _proc_builtin(self, tarfile):
1266 """Process a builtin type or an unknown type which
1267 will be treated as a regular file.
1268 """
1269 self.offset_data = tarfile.fileobj.tell()
1270 offset = self.offset_data
1271 if self.isreg() or self.type not in SUPPORTED_TYPES:
1272 # Skip the following data blocks.
1273 offset += self._block(self.size)
1274 tarfile.offset = offset
1275
1276 # Patch the TarInfo object with saved global
1277 # header information.
1278 self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
1279
1280 return self
1281
1282 def _proc_gnulong(self, tarfile):
1283 """Process the blocks that hold a GNU longname
1284 or longlink member.
1285 """
1286 buf = tarfile.fileobj.read(self._block(self.size))
1287
1288 # Fetch the next header and process it.
1289 try:
1290 next = self.fromtarfile(tarfile)
1291 except HeaderError:
1292 raise SubsequentHeaderError("missing or bad subsequent header")
1293
1294 # Patch the TarInfo object from the next header with
1295 # the longname information.
1296 next.offset = self.offset
1297 if self.type == GNUTYPE_LONGNAME:
1298 next.name = nts(buf)
1299 elif self.type == GNUTYPE_LONGLINK:
1300 next.linkname = nts(buf)
1301
1302 return next
1303
1304 def _proc_sparse(self, tarfile):
1305 """Process a GNU sparse header plus extra headers.
1306 """
1307 buf = self.buf
1308 sp = _ringbuffer()
1309 pos = 386
1310 lastpos = 0L
1311 realpos = 0L
1312 # There are 4 possible sparse structs in the
1313 # first header.
1314 for i in xrange(4):
1315 try:
1316 offset = nti(buf[pos:pos + 12])
1317 numbytes = nti(buf[pos + 12:pos + 24])
1318 except ValueError:
1319 break
1320 if offset > lastpos:
1321 sp.append(_hole(lastpos, offset - lastpos))
1322 sp.append(_data(offset, numbytes, realpos))
1323 realpos += numbytes
1324 lastpos = offset + numbytes
1325 pos += 24
1326
1327 isextended = ord(buf[482])
1328 origsize = nti(buf[483:495])
1329
1330 # If the isextended flag is given,
1331 # there are extra headers to process.
1332 while isextended == 1:
1333 buf = tarfile.fileobj.read(BLOCKSIZE)
1334 pos = 0
1335 for i in xrange(21):
1336 try:
1337 offset = nti(buf[pos:pos + 12])
1338 numbytes = nti(buf[pos + 12:pos + 24])
1339 except ValueError:
1340 break
1341 if offset > lastpos:
1342 sp.append(_hole(lastpos, offset - lastpos))
1343 sp.append(_data(offset, numbytes, realpos))
1344 realpos += numbytes
1345 lastpos = offset + numbytes
1346 pos += 24
1347 isextended = ord(buf[504])
1348
1349 if lastpos < origsize:
1350 sp.append(_hole(lastpos, origsize - lastpos))
1351
1352 self.sparse = sp
1353
1354 self.offset_data = tarfile.fileobj.tell()
1355 tarfile.offset = self.offset_data + self._block(self.size)
1356 self.size = origsize
1357
1358 return self
1359
1360 def _proc_pax(self, tarfile):
1361 """Process an extended or global header as described in
1362 POSIX.1-2001.
1363 """
1364 # Read the header information.
1365 buf = tarfile.fileobj.read(self._block(self.size))
1366
1367 # A pax header stores supplemental information for either
1368 # the following file (extended) or all following files
1369 # (global).
1370 if self.type == XGLTYPE:
1371 pax_headers = tarfile.pax_headers
1372 else:
1373 pax_headers = tarfile.pax_headers.copy()
1374
1375 # Parse pax header information. A record looks like that:
1376 # "%d %s=%s\n" % (length, keyword, value). length is the size
1377 # of the complete record including the length field itself and
1378 # the newline. keyword and value are both UTF-8 encoded strings.
1379 regex = re.compile(r"(\d+) ([^=]+)=", re.U)
1380 pos = 0
1381 while True:
1382 match = regex.match(buf, pos)
1383 if not match:
1384 break
1385
1386 length, keyword = match.groups()
1387 length = int(length)
1388 value = buf[match.end(2) + 1:match.start(1) + length - 1]
1389
1390 keyword = keyword.decode("utf8")
1391 value = value.decode("utf8")
1392
1393 pax_headers[keyword] = value
1394 pos += length
1395
1396 # Fetch the next header.
1397 try:
1398 next = self.fromtarfile(tarfile)
1399 except HeaderError:
1400 raise SubsequentHeaderError("missing or bad subsequent header")
1401
1402 if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
1403 # Patch the TarInfo object with the extended header info.
1404 next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
1405 next.offset = self.offset
1406
1407 if "size" in pax_headers:
1408 # If the extended header replaces the size field,
1409 # we need to recalculate the offset where the next
1410 # header starts.
1411 offset = next.offset_data
1412 if next.isreg() or next.type not in SUPPORTED_TYPES:
1413 offset += next._block(next.size)
1414 tarfile.offset = offset
1415
1416 return next
1417
1418 def _apply_pax_info(self, pax_headers, encoding, errors):
1419 """Replace fields with supplemental information from a previous
1420 pax extended or global header.
1421 """
1422 for keyword, value in pax_headers.iteritems():
1423 if keyword not in PAX_FIELDS:
1424 continue
1425
1426 if keyword == "path":
1427 value = value.rstrip("/")
1428
1429 if keyword in PAX_NUMBER_FIELDS:
1430 try:
1431 value = PAX_NUMBER_FIELDS[keyword](value)
1432 except ValueError:
1433 value = 0
1434 else:
1435 value = uts(value, encoding, errors)
1436
1437 setattr(self, keyword, value)
1438
1439 self.pax_headers = pax_headers.copy()
1440
1441 def _block(self, count):
1442 """Round up a byte count by BLOCKSIZE and return it,
1443 e.g. _block(834) => 1024.
1444 """
1445 blocks, remainder = divmod(count, BLOCKSIZE)
1446 if remainder:
1447 blocks += 1
1448 return blocks * BLOCKSIZE
1449
1450 def isreg(self):
1451 return self.type in REGULAR_TYPES
1452 def isfile(self):
1453 return self.isreg()
1454 def isdir(self):
1455 return self.type == DIRTYPE
1456 def issym(self):
1457 return self.type == SYMTYPE
1458 def islnk(self):
1459 return self.type == LNKTYPE
1460 def ischr(self):
1461 return self.type == CHRTYPE
1462 def isblk(self):
1463 return self.type == BLKTYPE
1464 def isfifo(self):
1465 return self.type == FIFOTYPE
1466 def issparse(self):
1467 return self.type == GNUTYPE_SPARSE
1468 def isdev(self):
1469 return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
1470# class TarInfo
1471
1472class TarFile(object):
1473 """The TarFile Class provides an interface to tar archives.
1474 """
1475
1476 debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
1477
1478 dereference = False # If true, add content of linked file to the
1479 # tar file, else the link.
1480
1481 ignore_zeros = False # If true, skips empty or invalid blocks and
1482 # continues processing.
1483
1484 errorlevel = 1 # If 0, fatal errors only appear in debug
1485 # messages (if debug >= 0). If > 0, errors
1486 # are passed to the caller as exceptions.
1487
1488 format = DEFAULT_FORMAT # The format to use when creating an archive.
1489
1490 encoding = ENCODING # Encoding for 8-bit character strings.
1491
1492 errors = None # Error handler for unicode conversion.
1493
1494 tarinfo = TarInfo # The default TarInfo class to use.
1495
1496 fileobject = ExFileObject # The default ExFileObject class to use.
1497
1498 def __init__(self, name=None, mode="r", fileobj=None, format=None,
1499 tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
1500 errors=None, pax_headers=None, debug=None, errorlevel=None):
1501 """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
1502 read from an existing archive, 'a' to append data to an existing
1503 file or 'w' to create a new file overwriting an existing one. `mode'
1504 defaults to 'r'.
1505 If `fileobj' is given, it is used for reading or writing data. If it
1506 can be determined, `mode' is overridden by `fileobj's mode.
1507 `fileobj' is not closed, when TarFile is closed.
1508 """
1509 if len(mode) > 1 or mode not in "raw":
1510 raise ValueError("mode must be 'r', 'a' or 'w'")
1511 self.mode = mode
1512 self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
1513
1514 if not fileobj:
1515 if self.mode == "a" and not os.path.exists(name):
1516 # Create nonexistent files in append mode.
1517 self.mode = "w"
1518 self._mode = "wb"
1519 fileobj = bltn_open(name, self._mode)
1520 self._extfileobj = False
1521 else:
1522 if name is None and hasattr(fileobj, "name"):
1523 name = fileobj.name
1524 if hasattr(fileobj, "mode"):
1525 self._mode = fileobj.mode
1526 self._extfileobj = True
1527 self.name = os.path.abspath(name) if name else None
1528 self.fileobj = fileobj
1529
1530 # Init attributes.
1531 if format is not None:
1532 self.format = format
1533 if tarinfo is not None:
1534 self.tarinfo = tarinfo
1535 if dereference is not None:
1536 self.dereference = dereference
1537 if ignore_zeros is not None:
1538 self.ignore_zeros = ignore_zeros
1539 if encoding is not None:
1540 self.encoding = encoding
1541
1542 if errors is not None:
1543 self.errors = errors
1544 elif mode == "r":
1545 self.errors = "utf-8"
1546 else:
1547 self.errors = "strict"
1548
1549 if pax_headers is not None and self.format == PAX_FORMAT:
1550 self.pax_headers = pax_headers
1551 else:
1552 self.pax_headers = {}
1553
1554 if debug is not None:
1555 self.debug = debug
1556 if errorlevel is not None:
1557 self.errorlevel = errorlevel
1558
1559 # Init datastructures.
1560 self.closed = False
1561 self.members = [] # list of members as TarInfo objects
1562 self._loaded = False # flag if all members have been read
1563 self.offset = self.fileobj.tell()
1564 # current position in the archive file
1565 self.inodes = {} # dictionary caching the inodes of
1566 # archive members already added
1567
1568 try:
1569 if self.mode == "r":
1570 self.firstmember = None
1571 self.firstmember = self.next()
1572
1573 if self.mode == "a":
1574 # Move to the end of the archive,
1575 # before the first empty block.
1576 while True:
1577 self.fileobj.seek(self.offset)
1578 try:
1579 tarinfo = self.tarinfo.fromtarfile(self)
1580 self.members.append(tarinfo)
1581 except EOFHeaderError:
1582 self.fileobj.seek(self.offset)
1583 break
1584 except HeaderError, e:
1585 raise ReadError(str(e))
1586
1587 if self.mode in "aw":
1588 self._loaded = True
1589
1590 if self.pax_headers:
1591 buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
1592 self.fileobj.write(buf)
1593 self.offset += len(buf)
1594 except:
1595 if not self._extfileobj:
1596 self.fileobj.close()
1597 self.closed = True
1598 raise
1599
1600 def _getposix(self):
1601 return self.format == USTAR_FORMAT
1602 def _setposix(self, value):
1603 import warnings
1604 warnings.warn("use the format attribute instead", DeprecationWarning,
1605 2)
1606 if value:
1607 self.format = USTAR_FORMAT
1608 else:
1609 self.format = GNU_FORMAT
1610 posix = property(_getposix, _setposix)
1611
1612 #--------------------------------------------------------------------------
1613 # Below are the classmethods which act as alternate constructors to the
1614 # TarFile class. The open() method is the only one that is needed for
1615 # public use; it is the "super"-constructor and is able to select an
1616 # adequate "sub"-constructor for a particular compression using the mapping
1617 # from OPEN_METH.
1618 #
1619 # This concept allows one to subclass TarFile without losing the comfort of
1620 # the super-constructor. A sub-constructor is registered and made available
1621 # by adding it to the mapping in OPEN_METH.
1622
1623 @classmethod
1624 def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
1625 """Open a tar archive for reading, writing or appending. Return
1626 an appropriate TarFile class.
1627
1628 mode:
1629 'r' or 'r:*' open for reading with transparent compression
1630 'r:' open for reading exclusively uncompressed
1631 'r:gz' open for reading with gzip compression
1632 'r:bz2' open for reading with bzip2 compression
1633 'a' or 'a:' open for appending, creating the file if necessary
1634 'w' or 'w:' open for writing without compression
1635 'w:gz' open for writing with gzip compression
1636 'w:bz2' open for writing with bzip2 compression
1637
1638 'r|*' open a stream of tar blocks with transparent compression
1639 'r|' open an uncompressed stream of tar blocks for reading
1640 'r|gz' open a gzip compressed stream of tar blocks
1641 'r|bz2' open a bzip2 compressed stream of tar blocks
1642 'w|' open an uncompressed stream for writing
1643 'w|gz' open a gzip compressed stream for writing
1644 'w|bz2' open a bzip2 compressed stream for writing
1645 """
1646
1647 if not name and not fileobj:
1648 raise ValueError("nothing to open")
1649
1650 if mode in ("r", "r:*"):
1651 # Find out which *open() is appropriate for opening the file.
1652 for comptype in cls.OPEN_METH:
1653 func = getattr(cls, cls.OPEN_METH[comptype])
1654 if fileobj is not None:
1655 saved_pos = fileobj.tell()
1656 try:
1657 return func(name, "r", fileobj, **kwargs)
1658 except (ReadError, CompressionError), e:
1659 if fileobj is not None:
1660 fileobj.seek(saved_pos)
1661 continue
1662 raise ReadError("file could not be opened successfully")
1663
1664 elif ":" in mode:
1665 filemode, comptype = mode.split(":", 1)
1666 filemode = filemode or "r"
1667 comptype = comptype or "tar"
1668
1669 # Select the *open() function according to
1670 # given compression.
1671 if comptype in cls.OPEN_METH:
1672 func = getattr(cls, cls.OPEN_METH[comptype])
1673 else:
1674 raise CompressionError("unknown compression type %r" % comptype)
1675 return func(name, filemode, fileobj, **kwargs)
1676
1677 elif "|" in mode:
1678 filemode, comptype = mode.split("|", 1)
1679 filemode = filemode or "r"
1680 comptype = comptype or "tar"
1681
1682 if filemode not in "rw":
1683 raise ValueError("mode must be 'r' or 'w'")
1684
1685 t = cls(name, filemode,
1686 _Stream(name, filemode, comptype, fileobj, bufsize),
1687 **kwargs)
1688 t._extfileobj = False
1689 return t
1690
1691 elif mode in "aw":
1692 return cls.taropen(name, mode, fileobj, **kwargs)
1693
1694 raise ValueError("undiscernible mode")
1695
1696 @classmethod
1697 def taropen(cls, name, mode="r", fileobj=None, **kwargs):
1698 """Open uncompressed tar archive name for reading or writing.
1699 """
1700 if len(mode) > 1 or mode not in "raw":
1701 raise ValueError("mode must be 'r', 'a' or 'w'")
1702 return cls(name, mode, fileobj, **kwargs)
1703
1704 @classmethod
1705 def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
1706 """Open gzip compressed tar archive name for reading or writing.
1707 Appending is not allowed.
1708 """
1709 if len(mode) > 1 or mode not in "rw":
1710 raise ValueError("mode must be 'r' or 'w'")
1711
1712 try:
1713 import gzip
1714 gzip.GzipFile
1715 except (ImportError, AttributeError):
1716 raise CompressionError("gzip module is not available")
1717
1718 if fileobj is None:
1719 fileobj = bltn_open(name, mode + "b")
1720
1721 try:
1722 t = cls.taropen(name, mode,
1723 gzip.GzipFile(name, mode, compresslevel, fileobj),
1724 **kwargs)
1725 except IOError:
1726 raise ReadError("not a gzip file")
1727 t._extfileobj = False
1728 return t
1729
1730 @classmethod
1731 def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
1732 """Open bzip2 compressed tar archive name for reading or writing.
1733 Appending is not allowed.
1734 """
1735 if len(mode) > 1 or mode not in "rw":
1736 raise ValueError("mode must be 'r' or 'w'.")
1737
1738 try:
1739 import bz2
1740 except ImportError:
1741 raise CompressionError("bz2 module is not available")
1742
1743 if fileobj is not None:
1744 fileobj = _BZ2Proxy(fileobj, mode)
1745 else:
1746 fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
1747
1748 try:
1749 t = cls.taropen(name, mode, fileobj, **kwargs)
1750 except (IOError, EOFError):
1751 raise ReadError("not a bzip2 file")
1752 t._extfileobj = False
1753 return t
1754
1755 # All *open() methods are registered here.
1756 OPEN_METH = {
1757 "tar": "taropen", # uncompressed tar
1758 "gz": "gzopen", # gzip compressed tar
1759 "bz2": "bz2open" # bzip2 compressed tar
1760 }
1761
1762 #--------------------------------------------------------------------------
1763 # The public methods which TarFile provides:
1764
1765 def close(self):
1766 """Close the TarFile. In write-mode, two finishing zero blocks are
1767 appended to the archive.
1768 """
1769 if self.closed:
1770 return
1771
1772 if self.mode in "aw":
1773 self.fileobj.write(NUL * (BLOCKSIZE * 2))
1774 self.offset += (BLOCKSIZE * 2)
1775 # fill up the end with zero-blocks
1776 # (like option -b20 for tar does)
1777 blocks, remainder = divmod(self.offset, RECORDSIZE)
1778 if remainder > 0:
1779 self.fileobj.write(NUL * (RECORDSIZE - remainder))
1780
1781 if not self._extfileobj:
1782 self.fileobj.close()
1783 self.closed = True
1784
1785 def getmember(self, name):
1786 """Return a TarInfo object for member `name'. If `name' can not be
1787 found in the archive, KeyError is raised. If a member occurs more
1788 than once in the archive, its last occurrence is assumed to be the
1789 most up-to-date version.
1790 """
1791 tarinfo = self._getmember(name)
1792 if tarinfo is None:
1793 raise KeyError("filename %r not found" % name)
1794 return tarinfo
1795
1796 def getmembers(self):
1797 """Return the members of the archive as a list of TarInfo objects. The
1798 list has the same order as the members in the archive.
1799 """
1800 self._check()
1801 if not self._loaded: # if we want to obtain a list of
1802 self._load() # all members, we first have to
1803 # scan the whole archive.
1804 return self.members
1805
1806 def getnames(self):
1807 """Return the members of the archive as a list of their names. It has
1808 the same order as the list returned by getmembers().
1809 """
1810 return [tarinfo.name for tarinfo in self.getmembers()]
1811
1812 def gettarinfo(self, name=None, arcname=None, fileobj=None):
1813 """Create a TarInfo object for either the file `name' or the file
1814 object `fileobj' (using os.fstat on its file descriptor). You can
1815 modify some of the TarInfo's attributes before you add it using
1816 addfile(). If given, `arcname' specifies an alternative name for the
1817 file in the archive.
1818 """
1819 self._check("aw")
1820
1821 # When fileobj is given, replace name by
1822 # fileobj's real name.
1823 if fileobj is not None:
1824 name = fileobj.name
1825
1826 # Building the name of the member in the archive.
1827 # Backward slashes are converted to forward slashes,
1828 # Absolute paths are turned to relative paths.
1829 if arcname is None:
1830 arcname = name
1831 drv, arcname = os.path.splitdrive(arcname)
1832 arcname = arcname.replace(os.sep, "/")
1833 arcname = arcname.lstrip("/")
1834
1835 # Now, fill the TarInfo object with
1836 # information specific for the file.
1837 tarinfo = self.tarinfo()
1838 tarinfo.tarfile = self
1839
1840 # Use os.stat or os.lstat, depending on platform
1841 # and if symlinks shall be resolved.
1842 if fileobj is None:
1843 if hasattr(os, "lstat") and not self.dereference:
1844 statres = os.lstat(name)
1845 else:
1846 statres = os.stat(name)
1847 else:
1848 statres = os.fstat(fileobj.fileno())
1849 linkname = ""
1850
1851 stmd = statres.st_mode
1852 if stat.S_ISREG(stmd):
1853 inode = (statres.st_ino, statres.st_dev)
1854 if not self.dereference and statres.st_nlink > 1 and \
1855 inode in self.inodes and arcname != self.inodes[inode]:
1856 # Is it a hardlink to an already
1857 # archived file?
1858 type = LNKTYPE
1859 linkname = self.inodes[inode]
1860 else:
1861 # The inode is added only if its valid.
1862 # For win32 it is always 0.
1863 type = REGTYPE
1864 if inode[0]:
1865 self.inodes[inode] = arcname
1866 elif stat.S_ISDIR(stmd):
1867 type = DIRTYPE
1868 elif stat.S_ISFIFO(stmd):
1869 type = FIFOTYPE
1870 elif stat.S_ISLNK(stmd):
1871 type = SYMTYPE
1872 linkname = os.readlink(name)
1873 elif stat.S_ISCHR(stmd):
1874 type = CHRTYPE
1875 elif stat.S_ISBLK(stmd):
1876 type = BLKTYPE
1877 else:
1878 return None
1879
1880 # Fill the TarInfo object with all
1881 # information we can get.
1882 tarinfo.name = arcname
1883 tarinfo.mode = stmd
1884 tarinfo.uid = statres.st_uid
1885 tarinfo.gid = statres.st_gid
1886 if type == REGTYPE:
1887 tarinfo.size = statres.st_size
1888 else:
1889 tarinfo.size = 0L
1890 tarinfo.mtime = statres.st_mtime
1891 tarinfo.type = type
1892 tarinfo.linkname = linkname
1893 if pwd:
1894 try:
1895 tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
1896 except KeyError:
1897 pass
1898 if grp:
1899 try:
1900 tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
1901 except KeyError:
1902 pass
1903
1904 if type in (CHRTYPE, BLKTYPE):
1905 if hasattr(os, "major") and hasattr(os, "minor"):
1906 tarinfo.devmajor = os.major(statres.st_rdev)
1907 tarinfo.devminor = os.minor(statres.st_rdev)
1908 return tarinfo
1909
1910 def list(self, verbose=True):
1911 """Print a table of contents to sys.stdout. If `verbose' is False, only
1912 the names of the members are printed. If it is True, an `ls -l'-like
1913 output is produced.
1914 """
1915 self._check()
1916
1917 for tarinfo in self:
1918 if verbose:
1919 print filemode(tarinfo.mode),
1920 print "%s/%s" % (tarinfo.uname or tarinfo.uid,
1921 tarinfo.gname or tarinfo.gid),
1922 if tarinfo.ischr() or tarinfo.isblk():
1923 print "%10s" % ("%d,%d" \
1924 % (tarinfo.devmajor, tarinfo.devminor)),
1925 else:
1926 print "%10d" % tarinfo.size,
1927 print "%d-%02d-%02d %02d:%02d:%02d" \
1928 % time.localtime(tarinfo.mtime)[:6],
1929
1930 print tarinfo.name + ("/" if tarinfo.isdir() else ""),
1931
1932 if verbose:
1933 if tarinfo.issym():
1934 print "->", tarinfo.linkname,
1935 if tarinfo.islnk():
1936 print "link to", tarinfo.linkname,
1937 print
1938
1939 def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
1940 """Add the file `name' to the archive. `name' may be any type of file
1941 (directory, fifo, symbolic link, etc.). If given, `arcname'
1942 specifies an alternative name for the file in the archive.
1943 Directories are added recursively by default. This can be avoided by
1944 setting `recursive' to False. `exclude' is a function that should
1945 return True for each filename to be excluded. `filter' is a function
1946 that expects a TarInfo object argument and returns the changed
1947 TarInfo object, if it returns None the TarInfo object will be
1948 excluded from the archive.
1949 """
1950 self._check("aw")
1951
1952 if arcname is None:
1953 arcname = name
1954
1955 # Exclude pathnames.
1956 if exclude is not None:
1957 import warnings
1958 warnings.warn("use the filter argument instead",
1959 DeprecationWarning, 2)
1960 if exclude(name):
1961 self._dbg(2, "tarfile: Excluded %r" % name)
1962 return
1963
1964 # Skip if somebody tries to archive the archive...
1965 if self.name is not None and os.path.abspath(name) == self.name:
1966 self._dbg(2, "tarfile: Skipped %r" % name)
1967 return
1968
1969 self._dbg(1, name)
1970
1971 # Create a TarInfo object from the file.
1972 tarinfo = self.gettarinfo(name, arcname)
1973
1974 if tarinfo is None:
1975 self._dbg(1, "tarfile: Unsupported type %r" % name)
1976 return
1977
1978 # Change or exclude the TarInfo object.
1979 if filter is not None:
1980 tarinfo = filter(tarinfo)
1981 if tarinfo is None:
1982 self._dbg(2, "tarfile: Excluded %r" % name)
1983 return
1984
1985 # Append the tar header and data to the archive.
1986 if tarinfo.isreg():
1987 f = bltn_open(name, "rb")
1988 self.addfile(tarinfo, f)
1989 f.close()
1990
1991 elif tarinfo.isdir():
1992 self.addfile(tarinfo)
1993 if recursive:
1994 for f in os.listdir(name):
1995 self.add(os.path.join(name, f), os.path.join(arcname, f),
1996 recursive, exclude, filter)
1997
1998 else:
1999 self.addfile(tarinfo)
2000
2001 def addfile(self, tarinfo, fileobj=None):
2002 """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
2003 given, tarinfo.size bytes are read from it and added to the archive.
2004 You can create TarInfo objects using gettarinfo().
2005 On Windows platforms, `fileobj' should always be opened with mode
2006 'rb' to avoid irritation about the file size.
2007 """
2008 self._check("aw")
2009
2010 tarinfo = copy.copy(tarinfo)
2011
2012 buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
2013 self.fileobj.write(buf)
2014 self.offset += len(buf)
2015
2016 # If there's data to follow, append it.
2017 if fileobj is not None:
2018 copyfileobj(fileobj, self.fileobj, tarinfo.size)
2019 blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
2020 if remainder > 0:
2021 self.fileobj.write(NUL * (BLOCKSIZE - remainder))
2022 blocks += 1
2023 self.offset += blocks * BLOCKSIZE
2024
2025 self.members.append(tarinfo)
2026
2027 def extractall(self, path=".", members=None):
2028 """Extract all members from the archive to the current working
2029 directory and set owner, modification time and permissions on
2030 directories afterwards. `path' specifies a different directory
2031 to extract to. `members' is optional and must be a subset of the
2032 list returned by getmembers().
2033 """
2034 directories = []
2035
2036 if members is None:
2037 members = self
2038
2039 for tarinfo in members:
2040 if tarinfo.isdir():
2041 # Extract directories with a safe mode.
2042 directories.append(tarinfo)
2043 tarinfo = copy.copy(tarinfo)
2044 tarinfo.mode = 0700
2045 self.extract(tarinfo, path)
2046
2047 # Reverse sort directories.
2048 directories.sort(key=operator.attrgetter('name'))
2049 directories.reverse()
2050
2051 # Set correct owner, mtime and filemode on directories.
2052 for tarinfo in directories:
2053 dirpath = os.path.join(path, tarinfo.name)
2054 try:
2055 self.chown(tarinfo, dirpath)
2056 self.utime(tarinfo, dirpath)
2057 self.chmod(tarinfo, dirpath)
2058 except ExtractError, e:
2059 if self.errorlevel > 1:
2060 raise
2061 else:
2062 self._dbg(1, "tarfile: %s" % e)
2063
2064 def extract(self, member, path=""):
2065 """Extract a member from the archive to the current working directory,
2066 using its full name. Its file information is extracted as accurately
2067 as possible. `member' may be a filename or a TarInfo object. You can
2068 specify a different directory using `path'.
2069 """
2070 self._check("r")
2071
2072 if isinstance(member, basestring):
2073 tarinfo = self.getmember(member)
2074 else:
2075 tarinfo = member
2076
2077 # Prepare the link target for makelink().
2078 if tarinfo.islnk():
2079 tarinfo._link_target = os.path.join(path, tarinfo.linkname)
2080
2081 try:
2082 self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
2083 except EnvironmentError, e:
2084 if self.errorlevel > 0:
2085 raise
2086 else:
2087 if e.filename is None:
2088 self._dbg(1, "tarfile: %s" % e.strerror)
2089 else:
2090 self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
2091 except ExtractError, e:
2092 if self.errorlevel > 1:
2093 raise
2094 else:
2095 self._dbg(1, "tarfile: %s" % e)
2096
2097 def extractfile(self, member):
2098 """Extract a member from the archive as a file object. `member' may be
2099 a filename or a TarInfo object. If `member' is a regular file, a
2100 file-like object is returned. If `member' is a link, a file-like
2101 object is constructed from the link's target. If `member' is none of
2102 the above, None is returned.
2103 The file-like object is read-only and provides the following
2104 methods: read(), readline(), readlines(), seek() and tell()
2105 """
2106 self._check("r")
2107
2108 if isinstance(member, basestring):
2109 tarinfo = self.getmember(member)
2110 else:
2111 tarinfo = member
2112
2113 if tarinfo.isreg():
2114 return self.fileobject(self, tarinfo)
2115
2116 elif tarinfo.type not in SUPPORTED_TYPES:
2117 # If a member's type is unknown, it is treated as a
2118 # regular file.
2119 return self.fileobject(self, tarinfo)
2120
2121 elif tarinfo.islnk() or tarinfo.issym():
2122 if isinstance(self.fileobj, _Stream):
2123 # A small but ugly workaround for the case that someone tries
2124 # to extract a (sym)link as a file-object from a non-seekable
2125 # stream of tar blocks.
2126 raise StreamError("cannot extract (sym)link as file object")
2127 else:
2128 # A (sym)link's file object is its target's file object.
2129 return self.extractfile(self._find_link_target(tarinfo))
2130 else:
2131 # If there's no data associated with the member (directory, chrdev,
2132 # blkdev, etc.), return None instead of a file object.
2133 return None
2134
2135 def _extract_member(self, tarinfo, targetpath):
2136 """Extract the TarInfo object tarinfo to a physical
2137 file called targetpath.
2138 """
2139 # Fetch the TarInfo object for the given name
2140 # and build the destination pathname, replacing
2141 # forward slashes to platform specific separators.
2142 targetpath = targetpath.rstrip("/")
2143 targetpath = targetpath.replace("/", os.sep)
2144
2145 # Create all upper directories.
2146 upperdirs = os.path.dirname(targetpath)
2147 if upperdirs and not os.path.exists(upperdirs):
2148 # Create directories that are not part of the archive with
2149 # default permissions.
2150 os.makedirs(upperdirs)
2151
2152 if tarinfo.islnk() or tarinfo.issym():
2153 self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
2154 else:
2155 self._dbg(1, tarinfo.name)
2156
2157 if tarinfo.isreg():
2158 self.makefile(tarinfo, targetpath)
2159 elif tarinfo.isdir():
2160 self.makedir(tarinfo, targetpath)
2161 elif tarinfo.isfifo():
2162 self.makefifo(tarinfo, targetpath)
2163 elif tarinfo.ischr() or tarinfo.isblk():
2164 self.makedev(tarinfo, targetpath)
2165 elif tarinfo.islnk() or tarinfo.issym():
2166 self.makelink(tarinfo, targetpath)
2167 elif tarinfo.type not in SUPPORTED_TYPES:
2168 self.makeunknown(tarinfo, targetpath)
2169 else:
2170 self.makefile(tarinfo, targetpath)
2171
2172 self.chown(tarinfo, targetpath)
2173 if not tarinfo.issym():
2174 self.chmod(tarinfo, targetpath)
2175 self.utime(tarinfo, targetpath)
2176
2177 #--------------------------------------------------------------------------
2178 # Below are the different file methods. They are called via
2179 # _extract_member() when extract() is called. They can be replaced in a
2180 # subclass to implement other functionality.
2181
2182 def makedir(self, tarinfo, targetpath):
2183 """Make a directory called targetpath.
2184 """
2185 try:
2186 # Use a safe mode for the directory, the real mode is set
2187 # later in _extract_member().
2188 os.mkdir(targetpath, 0700)
2189 except EnvironmentError, e:
2190 if e.errno != errno.EEXIST:
2191 raise
2192
2193 def makefile(self, tarinfo, targetpath):
2194 """Make a file called targetpath.
2195 """
2196 source = self.extractfile(tarinfo)
2197 target = bltn_open(targetpath, "wb")
2198 copyfileobj(source, target)
2199 source.close()
2200 target.close()
2201
2202 def makeunknown(self, tarinfo, targetpath):
2203 """Make a file from a TarInfo object with an unknown type
2204 at targetpath.
2205 """
2206 self.makefile(tarinfo, targetpath)
2207 self._dbg(1, "tarfile: Unknown file type %r, " \
2208 "extracted as regular file." % tarinfo.type)
2209
2210 def makefifo(self, tarinfo, targetpath):
2211 """Make a fifo called targetpath.
2212 """
2213 if hasattr(os, "mkfifo"):
2214 os.mkfifo(targetpath)
2215 else:
2216 raise ExtractError("fifo not supported by system")
2217
2218 def makedev(self, tarinfo, targetpath):
2219 """Make a character or block device called targetpath.
2220 """
2221 if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
2222 raise ExtractError("special devices not supported by system")
2223
2224 mode = tarinfo.mode
2225 if tarinfo.isblk():
2226 mode |= stat.S_IFBLK
2227 else:
2228 mode |= stat.S_IFCHR
2229
2230 os.mknod(targetpath, mode,
2231 os.makedev(tarinfo.devmajor, tarinfo.devminor))
2232
2233 def makelink(self, tarinfo, targetpath):
2234 """Make a (symbolic) link called targetpath. If it cannot be created
2235 (platform limitation), we try to make a copy of the referenced file
2236 instead of a link.
2237 """
2238 if hasattr(os, "symlink") and hasattr(os, "link"):
2239 # For systems that support symbolic and hard links.
2240 if tarinfo.issym():
2241 if os.path.lexists(targetpath):
2242 os.unlink(targetpath)
2243 os.symlink(tarinfo.linkname, targetpath)
2244 else:
2245 # See extract().
2246 if os.path.exists(tarinfo._link_target):
2247 if os.path.lexists(targetpath):
2248 os.unlink(targetpath)
2249 os.link(tarinfo._link_target, targetpath)
2250 else:
2251 self._extract_member(self._find_link_target(tarinfo), targetpath)
2252 else:
2253 try:
2254 self._extract_member(self._find_link_target(tarinfo), targetpath)
2255 except KeyError:
2256 raise ExtractError("unable to resolve link inside archive")
2257
2258 def chown(self, tarinfo, targetpath):
2259 """Set owner of targetpath according to tarinfo.
2260 """
2261 if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
2262 # We have to be root to do so.
2263 try:
2264 g = grp.getgrnam(tarinfo.gname)[2]
2265 except KeyError:
2266 try:
2267 g = grp.getgrgid(tarinfo.gid)[2]
2268 except KeyError:
2269 g = os.getgid()
2270 try:
2271 u = pwd.getpwnam(tarinfo.uname)[2]
2272 except KeyError:
2273 try:
2274 u = pwd.getpwuid(tarinfo.uid)[2]
2275 except KeyError:
2276 u = os.getuid()
2277 try:
2278 if tarinfo.issym() and hasattr(os, "lchown"):
2279 os.lchown(targetpath, u, g)
2280 else:
2281 if sys.platform != "os2emx":
2282 os.chown(targetpath, u, g)
2283 except EnvironmentError, e:
2284 raise ExtractError("could not change owner")
2285
2286 def chmod(self, tarinfo, targetpath):
2287 """Set file permissions of targetpath according to tarinfo.
2288 """
2289 if hasattr(os, 'chmod'):
2290 try:
2291 os.chmod(targetpath, tarinfo.mode)
2292 except EnvironmentError, e:
2293 raise ExtractError("could not change mode")
2294
2295 def utime(self, tarinfo, targetpath):
2296 """Set modification time of targetpath according to tarinfo.
2297 """
2298 if not hasattr(os, 'utime'):
2299 return
2300 try:
2301 os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
2302 except EnvironmentError, e:
2303 raise ExtractError("could not change modification time")
2304
2305 #--------------------------------------------------------------------------
2306 def next(self):
2307 """Return the next member of the archive as a TarInfo object, when
2308 TarFile is opened for reading. Return None if there is no more
2309 available.
2310 """
2311 self._check("ra")
2312 if self.firstmember is not None:
2313 m = self.firstmember
2314 self.firstmember = None
2315 return m
2316
2317 # Read the next block.
2318 self.fileobj.seek(self.offset)
2319 tarinfo = None
2320 while True:
2321 try:
2322 tarinfo = self.tarinfo.fromtarfile(self)
2323 except EOFHeaderError, e:
2324 if self.ignore_zeros:
2325 self._dbg(2, "0x%X: %s" % (self.offset, e))
2326 self.offset += BLOCKSIZE
2327 continue
2328 except InvalidHeaderError, e:
2329 if self.ignore_zeros:
2330 self._dbg(2, "0x%X: %s" % (self.offset, e))
2331 self.offset += BLOCKSIZE
2332 continue
2333 elif self.offset == 0:
2334 raise ReadError(str(e))
2335 except EmptyHeaderError:
2336 if self.offset == 0:
2337 raise ReadError("empty file")
2338 except TruncatedHeaderError, e:
2339 if self.offset == 0:
2340 raise ReadError(str(e))
2341 except SubsequentHeaderError, e:
2342 raise ReadError(str(e))
2343 break
2344
2345 if tarinfo is not None:
2346 self.members.append(tarinfo)
2347 else:
2348 self._loaded = True
2349
2350 return tarinfo
2351
2352 #--------------------------------------------------------------------------
2353 # Little helper methods:
2354
2355 def _getmember(self, name, tarinfo=None, normalize=False):
2356 """Find an archive member by name from bottom to top.
2357 If tarinfo is given, it is used as the starting point.
2358 """
2359 # Ensure that all members have been loaded.
2360 members = self.getmembers()
2361
2362 # Limit the member search list up to tarinfo.
2363 if tarinfo is not None:
2364 members = members[:members.index(tarinfo)]
2365
2366 if normalize:
2367 name = os.path.normpath(name)
2368
2369 for member in reversed(members):
2370 if normalize:
2371 member_name = os.path.normpath(member.name)
2372 else:
2373 member_name = member.name
2374
2375 if name == member_name:
2376 return member
2377
2378 def _load(self):
2379 """Read through the entire archive file and look for readable
2380 members.
2381 """
2382 while True:
2383 tarinfo = self.next()
2384 if tarinfo is None:
2385 break
2386 self._loaded = True
2387
2388 def _check(self, mode=None):
2389 """Check if TarFile is still open, and if the operation's mode
2390 corresponds to TarFile's mode.
2391 """
2392 if self.closed:
2393 raise IOError("%s is closed" % self.__class__.__name__)
2394 if mode is not None and self.mode not in mode:
2395 raise IOError("bad operation for mode %r" % self.mode)
2396
2397 def _find_link_target(self, tarinfo):
2398 """Find the target member of a symlink or hardlink member in the
2399 archive.
2400 """
2401 if tarinfo.issym():
2402 # Always search the entire archive.
2403 linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
2404 limit = None
2405 else:
2406 # Search the archive before the link, because a hard link is
2407 # just a reference to an already archived file.
2408 linkname = tarinfo.linkname
2409 limit = tarinfo
2410
2411 member = self._getmember(linkname, tarinfo=limit, normalize=True)
2412 if member is None:
2413 raise KeyError("linkname %r not found" % linkname)
2414 return member
2415
2416 def __iter__(self):
2417 """Provide an iterator object.
2418 """
2419 if self._loaded:
2420 return iter(self.members)
2421 else:
2422 return TarIter(self)
2423
2424 def _dbg(self, level, msg):
2425 """Write debugging output to sys.stderr.
2426 """
2427 if level <= self.debug:
2428 print >> sys.stderr, msg
2429
2430 def __enter__(self):
2431 self._check()
2432 return self
2433
2434 def __exit__(self, type, value, traceback):
2435 if type is None:
2436 self.close()
2437 else:
2438 # An exception occurred. We must not call close() because
2439 # it would try to write end-of-archive blocks and padding.
2440 if not self._extfileobj:
2441 self.fileobj.close()
2442 self.closed = True
2443# class TarFile
2444
2445class TarIter:
2446 """Iterator Class.
2447
2448 for tarinfo in TarFile(...):
2449 suite...
2450 """
2451
2452 def __init__(self, tarfile):
2453 """Construct a TarIter object.
2454 """
2455 self.tarfile = tarfile
2456 self.index = 0
2457 def __iter__(self):
2458 """Return iterator object.
2459 """
2460 return self
2461 def next(self):
2462 """Return the next item using TarFile's next() method.
2463 When all members have been read, set TarFile as _loaded.
2464 """
2465 # Fix for SF #1100429: Under rare circumstances it can
2466 # happen that getmembers() is called during iteration,
2467 # which will cause TarIter to stop prematurely.
2468 if not self.tarfile._loaded:
2469 tarinfo = self.tarfile.next()
2470 if not tarinfo:
2471 self.tarfile._loaded = True
2472 raise StopIteration
2473 else:
2474 try:
2475 tarinfo = self.tarfile.members[self.index]
2476 except IndexError:
2477 raise StopIteration
2478 self.index += 1
2479 return tarinfo
2480
2481# Helper classes for sparse file support
2482class _section:
2483 """Base class for _data and _hole.
2484 """
2485 def __init__(self, offset, size):
2486 self.offset = offset
2487 self.size = size
2488 def __contains__(self, offset):
2489 return self.offset <= offset < self.offset + self.size
2490
2491class _data(_section):
2492 """Represent a data section in a sparse file.
2493 """
2494 def __init__(self, offset, size, realpos):
2495 _section.__init__(self, offset, size)
2496 self.realpos = realpos
2497
2498class _hole(_section):
2499 """Represent a hole section in a sparse file.
2500 """
2501 pass
2502
2503class _ringbuffer(list):
2504 """Ringbuffer class which increases performance
2505 over a regular list.
2506 """
2507 def __init__(self):
2508 self.idx = 0
2509 def find(self, offset):
2510 idx = self.idx
2511 while True:
2512 item = self[idx]
2513 if offset in item:
2514 break
2515 idx += 1
2516 if idx == len(self):
2517 idx = 0
2518 if idx == self.idx:
2519 # End of File
2520 return None
2521 self.idx = idx
2522 return item
2523
2524#---------------------------------------------
2525# zipfile compatible TarFile class
2526#---------------------------------------------
2527TAR_PLAIN = 0 # zipfile.ZIP_STORED
2528TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
2529class TarFileCompat:
2530 """TarFile class compatible with standard module zipfile's
2531 ZipFile class.
2532 """
2533 def __init__(self, file, mode="r", compression=TAR_PLAIN):
2534 from warnings import warnpy3k
2535 warnpy3k("the TarFileCompat class has been removed in Python 3.0",
2536 stacklevel=2)
2537 if compression == TAR_PLAIN:
2538 self.tarfile = TarFile.taropen(file, mode)
2539 elif compression == TAR_GZIPPED:
2540 self.tarfile = TarFile.gzopen(file, mode)
2541 else:
2542 raise ValueError("unknown compression constant")
2543 if mode[0:1] == "r":
2544 members = self.tarfile.getmembers()
2545 for m in members:
2546 m.filename = m.name
2547 m.file_size = m.size
2548 m.date_time = time.gmtime(m.mtime)[:6]
2549 def namelist(self):
2550 return map(lambda m: m.name, self.infolist())
2551 def infolist(self):
2552 return filter(lambda m: m.type in REGULAR_TYPES,
2553 self.tarfile.getmembers())
2554 def printdir(self):
2555 self.tarfile.list()
2556 def testzip(self):
2557 return
2558 def getinfo(self, name):
2559 return self.tarfile.getmember(name)
2560 def read(self, name):
2561 return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
2562 def write(self, filename, arcname=None, compress_type=None):
2563 self.tarfile.add(filename, arcname)
2564 def writestr(self, zinfo, bytes):
2565 try:
2566 from cStringIO import StringIO
2567 except ImportError:
2568 from StringIO import StringIO
2569 import calendar
2570 tinfo = TarInfo(zinfo.filename)
2571 tinfo.size = len(bytes)
2572 tinfo.mtime = calendar.timegm(zinfo.date_time)
2573 self.tarfile.addfile(tinfo, StringIO(bytes))
2574 def close(self):
2575 self.tarfile.close()
2576#class TarFileCompat
2577
2578#--------------------
2579# exported functions
2580#--------------------
2581def is_tarfile(name):
2582 """Return True if name points to a tar archive that we
2583 are able to handle, else return False.
2584 """
2585 try:
2586 t = open(name)
2587 t.close()
2588 return True
2589 except TarError:
2590 return False
2591
2592bltn_open = open
2593open = TarFile.open