summaryrefslogtreecommitdiff
path: root/rapid/backupfile.py
diff options
context:
space:
mode:
Diffstat (limited to 'rapid/backupfile.py')
-rw-r--r--rapid/backupfile.py179
1 files changed, 99 insertions, 80 deletions
diff --git a/rapid/backupfile.py b/rapid/backupfile.py
index 7c91a19..0247346 100644
--- a/rapid/backupfile.py
+++ b/rapid/backupfile.py
@@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: latin1 -*-
-### Copyright (C) 2011 - 2012 Damon Lynch <damonlynch@gmail.com>
+### Copyright (C) 2011 - 2014 Damon Lynch <damonlynch@gmail.com>
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
@@ -21,9 +21,11 @@
import multiprocessing
import tempfile
import os
+import errno
+import hashlib
-import gio
import shutil
+import io
import logging
logger = multiprocessing.get_logger()
@@ -38,6 +40,7 @@ VIDEO_BACKUP = 2
PHOTO_VIDEO_BACKUP = 3
from gettext import gettext as _
+from copyfiles import copy_file_metadata
class BackupFiles(multiprocessing.Process):
@@ -48,13 +51,11 @@ class BackupFiles(multiprocessing.Process):
self.results_pipe = results_pipe
self.terminate_queue = terminate_queue
self.batch_size_bytes = batch_size_MB * 1048576 # * 1024 * 1024
+ self.io_buffer = 1048576
self.path = path
self.mount_name = name
self.run_event = run_event
- # As of Ubuntu 12.10 / Fedora 18, the file move/rename command is running agonisingly slowly
- # A hackish workaround is to replace it with the standard python function
- self.use_gnome_file_operations = False
def check_termination_request(self):
"""
@@ -69,63 +70,39 @@ class BackupFiles(multiprocessing.Process):
def update_progress(self, amount_downloaded, total):
- # first check if process is being terminated
self.amount_downloaded = amount_downloaded
- if not self.terminate_queue.empty():
- # it is - cancel the current copy
- self.cancel_copy.cancel()
- else:
- if not self.total_reached:
- chunk_downloaded = amount_downloaded - self.bytes_downloaded
- if (chunk_downloaded > self.batch_size_bytes) or (amount_downloaded == total):
- self.bytes_downloaded = amount_downloaded
-
- if amount_downloaded == total:
- # this function is called a couple of times when total is reached
- self.total_reached = True
-
- self.results_pipe.send((rpdmp.CONN_PARTIAL, (rpdmp.MSG_BYTES, (self.scan_pid, self.pid, self.total_downloaded + amount_downloaded, chunk_downloaded))))
- if amount_downloaded == total:
- self.bytes_downloaded = 0
-
- def progress_callback(self, amount_downloaded, total):
- self.update_progress(amount_downloaded, total)
-
- def progress_callback_no_update(self, amount_downloaded, total):
- """called when copying very small files"""
- pass
+ chunk_downloaded = amount_downloaded - self.bytes_downloaded
+ if (chunk_downloaded > self.batch_size_bytes) or (amount_downloaded == total):
+ self.bytes_downloaded = amount_downloaded
+ self.results_pipe.send((rpdmp.CONN_PARTIAL, (rpdmp.MSG_BYTES, (self.scan_pid, self.pid, self.total_downloaded + amount_downloaded, chunk_downloaded))))
+ if amount_downloaded == total:
+ self.bytes_downloaded = 0
def backup_additional_file(self, dest_dir, full_file_name):
"""Backs up small files like XMP or THM files"""
- source = gio.File(full_file_name)
dest_name = os.path.join(dest_dir, os.path.split(full_file_name)[1])
- if self.use_gnome_file_operations:
+ try:
logger.debug("Backing up additional file %s...", dest_name)
- dest=gio.File(dest_name)
- try:
- source.copy(dest, self.progress_callback_no_update, cancellable=None)
- logger.debug("...backing up additional file %s succeeded", dest_name)
- except gio.Error, inst:
- logger.error("Failed to backup file %s: %s", full_file_name, inst)
- else:
- try:
- logger.debug("Using python to back up additional file %s...", dest_name)
- shutil.copy(full_file_name, dest_name)
- logger.debug("...backing up additional file %s succeeded", dest_name)
- except:
- logger.error("Backup of %s failed", full_file_name)
+ shutil.copyfile(full_file_name, dest_name)
+ logger.debug("...backing up additional file %s succeeded", dest_name)
+ except:
+ logger.error("Backup of %s failed", full_file_name)
+
+ try:
+ copy_file_metadata(full_file_name, dest_name, logger)
+ except:
+ logger.error("Unknown error updating filesystem metadata when copying %s", full_file_name)
def run(self):
- self.cancel_copy = gio.Cancellable()
self.bytes_downloaded = 0
self.total_downloaded = 0
while True:
self.amount_downloaded = 0
- move_succeeded, rpd_file, path_suffix, backup_duplicate_overwrite, download_count = self.results_pipe.recv()
+ move_succeeded, do_backup, rpd_file, path_suffix, backup_duplicate_overwrite, verify_file, download_count = self.results_pipe.recv()
if rpd_file is None:
# this is a termination signal
return None
@@ -138,11 +115,9 @@ class BackupFiles(multiprocessing.Process):
backup_succeeded = False
self.scan_pid = rpd_file.scan_pid
- if move_succeeded:
+ if move_succeeded and do_backup:
self.total_reached = False
- source = gio.File(path=rpd_file.download_full_file_name)
-
if path_suffix is None:
dest_base_dir = self.path
else:
@@ -154,63 +129,107 @@ class BackupFiles(multiprocessing.Process):
dest_dir,
rpd_file.download_name)
- subfolder = gio.File(path=dest_dir)
- if not subfolder.query_exists(cancellable=None):
+ if not os.path.isdir(dest_dir):
# create the subfolders on the backup path
try:
logger.debug("Creating subfolder %s on backup device %s...", dest_dir, self.mount_name)
- subfolder.make_directory_with_parents(cancellable=gio.Cancellable())
+ os.makedirs(dest_dir)
logger.debug("...backup subfolder created")
- except gio.Error, inst:
+ except IOError as inst:
# There is a tiny chance directory may have been created by
# another process between the time it takes to query and
# the time it takes to create a new directory.
# Ignore such errors.
- if inst.code <> gio.ERROR_EXISTS:
+ if inst.errno <> errno.EEXIST:
logger.error("Failed to create backup subfolder: %s", dest_dir)
- logger.error(inst)
+ msg = "%s %s", inst.errno, inst.strerror
+ logger.error(msg)
rpd_file.add_problem(None, pn.BACKUP_DIRECTORY_CREATION, self.mount_name)
- rpd_file.add_extra_detail('%s%s' % (pn.BACKUP_DIRECTORY_CREATION, self.mount_name), inst)
+ rpd_file.add_extra_detail('%s%s' % (pn.BACKUP_DIRECTORY_CREATION, self.mount_name), msg)
rpd_file.error_title = _('Backing up error')
rpd_file.error_msg = \
_("Destination directory could not be created: %(directory)s\n") % \
- {'directory': subfolder, } + \
+ {'directory': dest_dir, } + \
_("Source: %(source)s\nDestination: %(destination)s") % \
{'source': rpd_file.download_full_file_name,
'destination': backup_full_file_name} + "\n" + \
- _("Error: %(inst)s") % {'inst': inst}
+ _("Error: %(inst)s") % {'inst': msg}
- dest = gio.File(path=backup_full_file_name)
- if backup_duplicate_overwrite:
- flags = gio.FILE_COPY_OVERWRITE
- else:
- flags = gio.FILE_COPY_NONE
- if self.use_gnome_file_operations:
+ backup_already_exists = os.path.exists(backup_full_file_name)
+ if backup_already_exists:
+ if backup_duplicate_overwrite:
+ rpd_file.add_problem(None, pn.BACKUP_EXISTS_OVERWRITTEN, self.mount_name)
+ msg = _("Backup %(file_type)s overwritten") % {'file_type': rpd_file.title}
+ else:
+ rpd_file.add_problem(None, pn.BACKUP_EXISTS, self.mount_name)
+ msg = _("%(file_type)s not backed up") % {'file_type': rpd_file.title_capitalized}
+
+ rpd_file.error_title = _("Backup of %(file_type)s already exists") % {'file_type': rpd_file.title}
+ rpd_file.error_msg = \
+ _("Source: %(source)s\nDestination: %(destination)s") % \
+ {'source': rpd_file.download_full_file_name, 'destination': backup_full_file_name} + "\n" + msg
+
+ if backup_already_exists and not backup_duplicate_overwrite:
+ logger.warning(msg)
+ else:
try:
logger.debug("Backing up file %s on device %s...", download_count, self.mount_name)
- source.copy(dest, self.progress_callback, flags,
- cancellable=self.cancel_copy)
+
+ dest = io.open(backup_full_file_name, 'wb', self.io_buffer)
+ src = io.open(rpd_file.download_full_file_name, 'rb', self.io_buffer)
+ total = rpd_file.size
+ amount_downloaded = 0
+ while True:
+ # first check if process is being terminated
+ if self.check_termination_request():
+ logger.debug("Closing partially written temporary file")
+ dest.close()
+ src.close()
+ return None
+ else:
+ chunk = src.read(self.io_buffer)
+ if chunk:
+ dest.write(chunk)
+ amount_downloaded += len(chunk)
+ self.update_progress(amount_downloaded, total)
+ else:
+ break
+ dest.close()
+ src.close()
backup_succeeded = True
+ if verify_file:
+ md5 = hashlib.md5(open(backup_full_file_name).read()).hexdigest()
+ if md5 <> rpd_file.md5:
+ backup_succeeded = False
+ logger.critical("%s file verification FAILED", rpd_file.name)
+ logger.critical("The %s did not back up correctly!", rpd_file.title)
+ rpd_file.add_problem(None, pn.BACKUP_VERIFICATION_FAILED, self.mount_name)
+ rpd_file.error_title = rpd_file.problem.get_title()
+ rpd_file.error_msg = _("%(problem)s\nFile: %(file)s") % \
+ {'problem': rpd_file.problem.get_problems(),
+ 'file': rpd_file.download_full_file_name}
+
logger.debug("...backing up file %s on device %s succeeded", download_count, self.mount_name)
- except gio.Error, inst:
- fileNotBackedUpMessageDisplayed = True
+ if backup_already_exists:
+ logger.warning(msg)
+ except (IOError, OSError) as inst:
+ logger.error("Backup of %s failed", backup_full_file_name)
+ msg = "%s %s", inst.errno, inst.strerror
rpd_file.add_problem(None, pn.BACKUP_ERROR, self.mount_name)
- rpd_file.add_extra_detail('%s%s' % (pn.BACKUP_ERROR, self.mount_name), inst)
+ rpd_file.add_extra_detail('%s%s' % (pn.BACKUP_ERROR, self.mount_name), msg)
rpd_file.error_title = _('Backing up error')
rpd_file.error_msg = \
_("Source: %(source)s\nDestination: %(destination)s") % \
{'source': rpd_file.download_full_file_name, 'destination': backup_full_file_name} + "\n" + \
- _("Error: %(inst)s") % {'inst': inst}
+ _("Error: %(inst)s") % {'inst': msg}
logger.error("%s:\n%s", rpd_file.error_title, rpd_file.error_msg)
- else:
- try:
- logger.debug("Using python to back up file %s on device %s...", download_count, self.mount_name)
- shutil.copy(rpd_file.download_full_file_name, backup_full_file_name)
- backup_succeeded = True
- logger.debug("...backing up file %s on device %s succeeded", download_count, self.mount_name)
- except:
- logger.error("Backup of %s failed", backup_full_file_name)
+
+ if backup_succeeded:
+ try:
+ copy_file_metadata(rpd_file.download_full_file_name, backup_full_file_name, logger)
+ except:
+ logger.error("Unknown error updating filesystem metadata when copying %s", rpd_file.download_full_file_name)
if not backup_succeeded:
@@ -232,11 +251,11 @@ class BackupFiles(multiprocessing.Process):
self.total_downloaded += rpd_file.size
bytes_not_downloaded = rpd_file.size - self.amount_downloaded
- if bytes_not_downloaded:
+ if bytes_not_downloaded and do_backup:
self.results_pipe.send((rpdmp.CONN_PARTIAL, (rpdmp.MSG_BYTES, (self.scan_pid, self.pid, self.total_downloaded, bytes_not_downloaded))))
self.results_pipe.send((rpdmp.CONN_PARTIAL, (rpdmp.MSG_FILE,
- (backup_succeeded, rpd_file))))
+ (backup_succeeded, do_backup, rpd_file))))