summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjvoisin2015-11-25 19:29:18 +0100
committerjvoisin2015-11-26 15:47:30 +0100
commit2212039d70ce5c4e97c3943bb82d4231c10e1260 (patch)
treeafd2aedcf6bb4c2a9fc82d6f3b2d0f7ad2d0fbb8
parent9ea3c2cbc456bfa9a903eaa0ed6a42a34d12e1c2 (diff)
Make the logging more pep-282 compliant
See https://www.python.org/dev/peps/pep-0282/ for details, but basically this is commit leaves the string replacement to the logging function, instead of doing it in place in its parameters with the '%' operator.
-rw-r--r--libmat/archive.py36
-rw-r--r--libmat/mat.py18
-rw-r--r--libmat/office.py10
-rw-r--r--nautilus/nautilus-mat.py6
4 files changed, 32 insertions, 38 deletions
diff --git a/libmat/archive.py b/libmat/archive.py
index 703ee66..ca80161 100644
--- a/libmat/archive.py
+++ b/libmat/archive.py
@@ -87,25 +87,23 @@ class ZipStripper(GenericArchiveStripper):
87 ret_list = [] 87 ret_list = []
88 zipin = zipfile.ZipFile(self.filename, 'r') 88 zipin = zipfile.ZipFile(self.filename, 'r')
89 if zipin.comment != '' and not list_unsupported: 89 if zipin.comment != '' and not list_unsupported:
90 logging.debug('%s has a comment' % self.filename) 90 logging.debug('%s has a comment', self.filename)
91 return False 91 return False
92 for item in zipin.infolist(): 92 for item in zipin.infolist():
93 zipin.extract(item, self.tempdir) 93 zipin.extract(item, self.tempdir)
94 path = os.path.join(self.tempdir, item.filename) 94 path = os.path.join(self.tempdir, item.filename)
95 if not self.__is_zipfile_clean(item) and not list_unsupported: 95 if not self.__is_zipfile_clean(item) and not list_unsupported:
96 logging.debug('%s from %s has compromising zipinfo' % 96 logging.debug('%s from %s has compromising zipinfo', item.filename, self.filename)
97 (item.filename, self.filename))
98 return False 97 return False
99 if os.path.isfile(path): 98 if os.path.isfile(path):
100 cfile = mat.create_class_file(path, False, add2archive=self.add2archive) 99 cfile = mat.create_class_file(path, False, add2archive=self.add2archive)
101 if cfile is not None: 100 if cfile is not None:
102 if not cfile.is_clean(): 101 if not cfile.is_clean():
103 logging.debug('%s from %s has metadata' % (item.filename, self.filename)) 102 logging.debug('%s from %s has metadata', item.filename, self.filename)
104 if not list_unsupported: 103 if not list_unsupported:
105 return False 104 return False
106 else: 105 else:
107 logging.info('%s\'s fileformat is not supported or harmless.' 106 logging.info('%s\'s fileformat is not supported or harmless.', item.filename)
108 % item.filename)
109 basename, ext = os.path.splitext(path) 107 basename, ext = os.path.splitext(path)
110 if os.path.basename(item.filename) not in ('mimetype', '.rels'): 108 if os.path.basename(item.filename) not in ('mimetype', '.rels'):
111 if ext not in parser.NOMETA: 109 if ext not in parser.NOMETA:
@@ -136,8 +134,7 @@ class ZipStripper(GenericArchiveStripper):
136 if cfile_meta != {}: 134 if cfile_meta != {}:
137 metadata[item.filename] = str(cfile_meta) 135 metadata[item.filename] = str(cfile_meta)
138 else: 136 else:
139 logging.info('%s\'s fileformat is not supported or harmless' 137 logging.info('%s\'s fileformat is not supported or harmless', item.filename)
140 % item.filename)
141 zipin.close() 138 zipin.close()
142 return metadata 139 return metadata
143 140
@@ -188,9 +185,9 @@ class ZipStripper(GenericArchiveStripper):
188 os.chmod(path, old_stat | stat.S_IWUSR) 185 os.chmod(path, old_stat | stat.S_IWUSR)
189 cfile.remove_all() 186 cfile.remove_all()
190 os.chmod(path, old_stat) 187 os.chmod(path, old_stat)
191 logging.debug('Processing %s from %s' % (item.filename, self.filename)) 188 logging.debug('Processing %s from %s', item.filename, self.filename)
192 elif item.filename not in whitelist: 189 elif item.filename not in whitelist:
193 logging.info('%s\'s format is not supported or harmless' % item.filename) 190 logging.info("%s's format is not supported or harmless", item.filename)
194 basename, ext = os.path.splitext(path) 191 basename, ext = os.path.splitext(path)
195 if not (self.add2archive or ext in parser.NOMETA): 192 if not (self.add2archive or ext in parser.NOMETA):
196 continue 193 continue
@@ -205,7 +202,7 @@ class ZipStripper(GenericArchiveStripper):
205 zipin.close() 202 zipin.close()
206 zipout.close() 203 zipout.close()
207 204
208 logging.info('%s processed' % self.filename) 205 logging.info('%s processed', self.filename)
209 self.do_backup() 206 self.do_backup()
210 return True 207 return True
211 208
@@ -248,12 +245,11 @@ class TarStripper(GenericArchiveStripper):
248 cfile.remove_all() 245 cfile.remove_all()
249 os.chmod(path, old_stat) 246 os.chmod(path, old_stat)
250 elif self.add2archive or os.path.splitext(item.name)[1] in parser.NOMETA: 247 elif self.add2archive or os.path.splitext(item.name)[1] in parser.NOMETA:
251 logging.debug('%s\' format is either not supported or harmless' % item.name) 248 logging.debug("%s' format is either not supported or harmless", item.name)
252 elif item.name in whitelist: 249 elif item.name in whitelist:
253 logging.debug('%s is not supported, but MAT was told to add it anyway.' 250 logging.debug('%s is not supported, but MAT was told to add it anyway.', item.name)
254 % item.name)
255 else: # Don't add the file to the archive 251 else: # Don't add the file to the archive
256 logging.debug('%s will not be added' % item.name) 252 logging.debug('%s will not be added', item.name)
257 continue 253 continue
258 tarout.add(unicode(path.decode('utf-8')), 254 tarout.add(unicode(path.decode('utf-8')),
259 unicode(item.name.decode('utf-8')), 255 unicode(item.name.decode('utf-8')),
@@ -291,8 +287,7 @@ class TarStripper(GenericArchiveStripper):
291 tarin = tarfile.open(self.filename, 'r' + self.compression) 287 tarin = tarfile.open(self.filename, 'r' + self.compression)
292 for item in tarin.getmembers(): 288 for item in tarin.getmembers():
293 if not self.is_file_clean(item) and not list_unsupported: 289 if not self.is_file_clean(item) and not list_unsupported:
294 logging.debug('%s from %s has compromising tarinfo' % 290 logging.debug('%s from %s has compromising tarinfo', item.name, self.filename)
295 (item.name, self.filename))
296 return False 291 return False
297 tarin.extract(item, self.tempdir) 292 tarin.extract(item, self.tempdir)
298 path = os.path.join(self.tempdir, item.name) 293 path = os.path.join(self.tempdir, item.name)
@@ -300,15 +295,14 @@ class TarStripper(GenericArchiveStripper):
300 cfile = mat.create_class_file(path, False, add2archive=self.add2archive) 295 cfile = mat.create_class_file(path, False, add2archive=self.add2archive)
301 if cfile is not None: 296 if cfile is not None:
302 if not cfile.is_clean(): 297 if not cfile.is_clean():
303 logging.debug('%s from %s has metadata' % 298 logging.debug('%s from %s has metadata', item.name.decode("utf8"), self.filename)
304 (item.name.decode("utf8"), self.filename))
305 if not list_unsupported: 299 if not list_unsupported:
306 return False 300 return False
307 # Nested archives are treated like unsupported files 301 # Nested archives are treated like unsupported files
308 elif isinstance(cfile, GenericArchiveStripper): 302 elif isinstance(cfile, GenericArchiveStripper):
309 ret_list.append(item.name) 303 ret_list.append(item.name)
310 else: 304 else:
311 logging.error('%s\'s format is not supported or harmless' % item.name) 305 logging.error("%s's format is not supported or harmless", item.name)
312 if os.path.splitext(path)[1] not in parser.NOMETA: 306 if os.path.splitext(path)[1] not in parser.NOMETA:
313 if not list_unsupported: 307 if not list_unsupported:
314 return False 308 return False
@@ -334,7 +328,7 @@ class TarStripper(GenericArchiveStripper):
334 if meta: 328 if meta:
335 current_meta['file'] = str(meta) 329 current_meta['file'] = str(meta)
336 else: 330 else:
337 logging.error('%s\'s format is not supported or harmless' % item.name) 331 logging.error("%s's format is not supported or harmless", item.name)
338 332
339 if not self.is_file_clean(item): # if there is meta 333 if not self.is_file_clean(item): # if there is meta
340 current_meta['mtime'] = item.mtime 334 current_meta['mtime'] = item.mtime
diff --git a/libmat/mat.py b/libmat/mat.py
index df607a5..42357d6 100644
--- a/libmat/mat.py
+++ b/libmat/mat.py
@@ -119,7 +119,7 @@ def secure_remove(filename):
119 try: # I want the file removed, even if it's read-only 119 try: # I want the file removed, even if it's read-only
120 os.chmod(filename, 220) 120 os.chmod(filename, 220)
121 except OSError: 121 except OSError:
122 logging.error('Unable to add write rights to %s' % filename) 122 logging.error('Unable to add write rights to %s', filename)
123 raise libmat.exceptions.UnableToWriteFile 123 raise libmat.exceptions.UnableToWriteFile
124 124
125 try: 125 try:
@@ -131,12 +131,12 @@ def secure_remove(filename):
131 else: 131 else:
132 raise OSError 132 raise OSError
133 except OSError: 133 except OSError:
134 logging.error('Unable to securely remove %s' % filename) 134 logging.error('Unable to securely remove %s', filename)
135 135
136 try: 136 try:
137 os.remove(filename) 137 os.remove(filename)
138 except OSError: 138 except OSError:
139 logging.error('Unable to remove %s' % filename) 139 logging.error('Unable to remove %s', filename)
140 raise libmat.exceptions.UnableToRemoveFile 140 raise libmat.exceptions.UnableToRemoveFile
141 141
142 return True 142 return True
@@ -150,13 +150,13 @@ def create_class_file(name, backup, **kwargs):
150 :param bool backup: shell the file be backuped? 150 :param bool backup: shell the file be backuped?
151 """ 151 """
152 if not os.path.isfile(name): # check if the file exists 152 if not os.path.isfile(name): # check if the file exists
153 logging.error('%s is not a valid file' % name) 153 logging.error('%s is not a valid file', name)
154 return None 154 return None
155 elif not os.access(name, os.R_OK): # check read permissions 155 elif not os.access(name, os.R_OK): # check read permissions
156 logging.error('%s is is not readable' % name) 156 logging.error('%s is is not readable', name)
157 return None 157 return None
158 elif not os.path.getsize(name): # check if the file is not empty (hachoir crash on empty files) 158 elif not os.path.getsize(name): # check if the file is not empty (hachoir crash on empty files)
159 logging.error('%s is empty' % name) 159 logging.error('%s is empty', name)
160 return None 160 return None
161 161
162 try: 162 try:
@@ -166,11 +166,11 @@ def create_class_file(name, backup, **kwargs):
166 166
167 parser = hachoir_parser.createParser(filename) 167 parser = hachoir_parser.createParser(filename)
168 if not parser: 168 if not parser:
169 logging.info('Unable to parse %s with hachoir' % filename) 169 logging.info('Unable to parse %s with hachoir', filename)
170 170
171 mime = mimetypes.guess_type(name)[0] 171 mime = mimetypes.guess_type(name)[0]
172 if not mime: 172 if not mime:
173 logging.info('Unable to find mimetype of %s' % filename) 173 logging.info('Unable to find mimetype of %s', filename)
174 return None 174 return None
175 175
176 if mime.startswith('application/vnd.oasis.opendocument'): 176 if mime.startswith('application/vnd.oasis.opendocument'):
@@ -183,7 +183,7 @@ def create_class_file(name, backup, **kwargs):
183 try: 183 try:
184 stripper_class = strippers.STRIPPERS[mime] 184 stripper_class = strippers.STRIPPERS[mime]
185 except KeyError: 185 except KeyError:
186 logging.info('Don\'t have stripper for %s format' % mime) 186 logging.info('Don\'t have stripper for %s format', mime)
187 return None 187 return None
188 188
189 return stripper_class(filename, parser, mime, backup, is_writable, **kwargs) 189 return stripper_class(filename, parser, mime, backup, is_writable, **kwargs)
diff --git a/libmat/office.py b/libmat/office.py
index 00b8f34..72f77c8 100644
--- a/libmat/office.py
+++ b/libmat/office.py
@@ -44,7 +44,7 @@ class OpenDocumentStripper(archive.TerminalZipStripper):
44 # method to get all attributes of a node 44 # method to get all attributes of a node
45 pass 45 pass
46 except KeyError: # no meta.xml file found 46 except KeyError: # no meta.xml file found
47 logging.debug('%s has no opendocument metadata' % self.filename) 47 logging.debug('%s has no opendocument metadata', self.filename)
48 zipin.close() 48 zipin.close()
49 return metadata 49 return metadata
50 50
@@ -153,7 +153,7 @@ class PdfStripper(parser.GenericParser):
153 surface = cairo.PDFSurface(output, 10, 10) 153 surface = cairo.PDFSurface(output, 10, 10)
154 context = cairo.Context(surface) # context draws on the surface 154 context = cairo.Context(surface) # context draws on the surface
155 155
156 logging.debug('PDF rendering of %s' % self.filename) 156 logging.debug('PDF rendering of %s', self.filename)
157 for pagenum in range(document.get_n_pages()): 157 for pagenum in range(document.get_n_pages()):
158 page = document.get_page(pagenum) 158 page = document.get_page(pagenum)
159 page_width, page_height = page.get_size() 159 page_width, page_height = page.get_size()
@@ -168,13 +168,13 @@ class PdfStripper(parser.GenericParser):
168 surface.finish() 168 surface.finish()
169 shutil.move(output, self.output) 169 shutil.move(output, self.output)
170 except: 170 except:
171 logging.error('Something went wrong when cleaning %s.' % self.filename) 171 logging.error('Something went wrong when cleaning %s.', self.filename)
172 return False 172 return False
173 173
174 try: 174 try:
175 import pdfrw # For now, poppler cannot write meta, so we must use pdfrw 175 import pdfrw # For now, poppler cannot write meta, so we must use pdfrw
176 176
177 logging.debug('Removing %s\'s superficial metadata' % self.filename) 177 logging.debug('Removing %s\'s superficial metadata', self.filename)
178 trailer = pdfrw.PdfReader(self.output) 178 trailer = pdfrw.PdfReader(self.output)
179 trailer.Info.Producer = None 179 trailer.Info.Producer = None
180 trailer.Info.Creator = None 180 trailer.Info.Creator = None
@@ -183,7 +183,7 @@ class PdfStripper(parser.GenericParser):
183 writer.write(self.output) 183 writer.write(self.output)
184 self.do_backup() 184 self.do_backup()
185 except: 185 except:
186 logging.error('Unable to remove all metadata from %s, please install pdfrw' % self.output) 186 logging.error('Unable to remove all metadata from %s, please install pdfrw', self.output)
187 return False 187 return False
188 return True 188 return True
189 189
diff --git a/nautilus/nautilus-mat.py b/nautilus/nautilus-mat.py
index 11a47f3..cdce947 100644
--- a/nautilus/nautilus-mat.py
+++ b/nautilus/nautilus-mat.py
@@ -36,17 +36,17 @@ class MatExtension(GObject.GObject, Nautilus.MenuProvider):
36 # We're only going to put ourselves on supported mimetypes' context menus 36 # We're only going to put ourselves on supported mimetypes' context menus
37 if not (file.get_mime_type() 37 if not (file.get_mime_type()
38 in [i["mimetype"] for i in libmat.mat.list_supported_formats()]): 38 in [i["mimetype"] for i in libmat.mat.list_supported_formats()]):
39 logging.debug("%s is not supported by MAT" % file.get_mime_type()) 39 logging.debug("%s is not supported by MAT", file.get_mime_type())
40 return 40 return
41 41
42 # MAT can only handle local file: 42 # MAT can only handle local file:
43 if file.get_uri_scheme() != 'file': 43 if file.get_uri_scheme() != 'file':
44 logging.debug("%s files not supported by MAT" % file.get_uri_scheme()) 44 logging.debug("%s files not supported by MAT", file.get_uri_scheme())
45 return 45 return
46 46
47 # MAT can not clean non-writable files 47 # MAT can not clean non-writable files
48 if not file.can_write(): 48 if not file.can_write():
49 logging.debug("%s is not writable by MAT" % file.get_uri_scheme()) 49 logging.debug("%s is not writable by MAT", file.get_uri_scheme())
50 return 50 return
51 51
52 item = Nautilus.MenuItem(name="Nautilus::clean_metadata", 52 item = Nautilus.MenuItem(name="Nautilus::clean_metadata",