summaryrefslogtreecommitdiff
path: root/rapid/backupfile.py
blob: 39efb53eb008dd1be1b179274cac066bfa11927b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
#!/usr/bin/python
# -*- coding: latin1 -*-

### Copyright (C) 2011 Damon Lynch <damonlynch@gmail.com>

### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or
### (at your option) any later version.

### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
### GNU General Public License for more details.

### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

import multiprocessing
import tempfile
import os

import gio

import logging
logger = multiprocessing.get_logger()

import rpdmultiprocessing as rpdmp
import rpdfile
import problemnotification as pn
import config

PHOTO_BACKUP = 1
VIDEO_BACKUP = 2
PHOTO_VIDEO_BACKUP = 3

from gettext import gettext as _


class BackupFiles(multiprocessing.Process):
    def __init__(self, path, name,
                 batch_size_MB, results_pipe, terminate_queue, 
                 run_event):
        multiprocessing.Process.__init__(self)
        self.results_pipe = results_pipe
        self.terminate_queue = terminate_queue
        self.batch_size_bytes = batch_size_MB * 1048576 # * 1024 * 1024
        self.path = path
        self.mount_name = name
        self.run_event = run_event
        
    def check_termination_request(self):
        """
        Check to see this process has not been requested to immediately terminate
        """
        if not self.terminate_queue.empty():
            x = self.terminate_queue.get()
            # terminate immediately
            logger.info("Terminating file backup")
            return True
        return False
        
        
    def update_progress(self, amount_downloaded, total):
        # first check if process is being terminated
        self.amount_downloaded = amount_downloaded
        if not self.terminate_queue.empty():
            # it is - cancel the current copy
            self.cancel_copy.cancel()
        else:
            if not self.total_reached:
                chunk_downloaded = amount_downloaded - self.bytes_downloaded
                if (chunk_downloaded > self.batch_size_bytes) or (amount_downloaded == total):
                    self.bytes_downloaded = amount_downloaded
                    
                    if amount_downloaded == total:
                        # this function is called a couple of times when total is reached
                        self.total_reached = True
                        
                    self.results_pipe.send((rpdmp.CONN_PARTIAL, (rpdmp.MSG_BYTES, (self.scan_pid, self.pid, self.total_downloaded + amount_downloaded, chunk_downloaded))))
                    if amount_downloaded == total:
                        self.bytes_downloaded = 0
    
    def progress_callback(self, amount_downloaded, total):
        self.update_progress(amount_downloaded, total)
        
    def progress_callback_no_update(self, amount_downloaded, total):
        """called when copying very small files"""
        pass
        
    def backup_additional_file(self, dest_dir, full_file_name):
        """Backs up small files like XMP or THM files"""
        source = gio.File(full_file_name)
        dest_name = os.path.join(dest_dir, os.path.split(full_file_name)[1])
        logger.debug("Backing up %s", dest_name)
        dest=gio.File(dest_name)
        try:
            source.copy(dest, self.progress_callback_no_update, cancellable=None)
        except gio.Error, inst:
                logger.error("Failed to backup file %s", full_file_name)        
        
    def run(self):
        
        self.cancel_copy = gio.Cancellable()
        self.bytes_downloaded = 0
        self.total_downloaded = 0
        
        while True:
            
            self.amount_downloaded = 0
            move_succeeded, rpd_file, path_suffix, backup_duplicate_overwrite = self.results_pipe.recv()
            if rpd_file is None:
                # this is a termination signal
                return None
            # pause if instructed by the caller
            self.run_event.wait()
                
            if self.check_termination_request():
                return None
                
            backup_succeeded = False
            self.scan_pid = rpd_file.scan_pid
            
            if move_succeeded:
                self.total_reached = False
                    
                source = gio.File(path=rpd_file.download_full_file_name)
                
                if path_suffix is None:
                    dest_base_dir = self.path
                else:
                    dest_base_dir = os.path.join(self.path, path_suffix)
                    
                
                dest_dir = os.path.join(dest_base_dir, rpd_file.download_subfolder)
                backup_full_file_name = os.path.join(
                                    dest_dir, 
                                    rpd_file.download_name)            
                
                subfolder = gio.File(path=dest_dir)
                if not subfolder.query_exists(cancellable=None):
                    # create the subfolders on the backup path
                    try:
                        subfolder.make_directory_with_parents(cancellable=gio.Cancellable())
                    except gio.Error, inst:
                        # There is a tiny chance directory may have been created by
                        # another process between the time it takes to query and
                        # the time it takes to create a new directory. 
                        # Ignore such errors.
                        if inst.code <> gio.ERROR_EXISTS:
                            logger.error("Failed to create backup subfolder: %s", dest_dir)
                            logger.error(inst)
                            rpd_file.add_problem(None, pn.BACKUP_DIRECTORY_CREATION, self.mount_name)
                            rpd_file.add_extra_detail('%s%s' % (pn.BACKUP_DIRECTORY_CREATION, self.mount_name), inst)
                            rpd_file.error_title = _('Backing up error')
                            rpd_file.error_msg = \
                                 _("Destination directory could not be created: %(directory)s\n") % \
                                  {'directory': subfolder,  } + \
                                 _("Source: %(source)s\nDestination: %(destination)s") % \
                                  {'source': rpd_file.download_full_file_name, 
                                   'destination': backup_full_file_name} + "\n" + \
                                 _("Error: %(inst)s") % {'inst': inst}

                dest = gio.File(path=backup_full_file_name)
                if backup_duplicate_overwrite:
                    flags = gio.FILE_COPY_OVERWRITE
                else:
                    flags = gio.FILE_COPY_NONE
                    
                try:
                    source.copy(dest, self.progress_callback, flags, 
                                        cancellable=self.cancel_copy)
                    backup_succeeded = True
                except gio.Error, inst:
                    fileNotBackedUpMessageDisplayed = True
                    rpd_file.add_problem(None, pn.BACKUP_ERROR, self.mount_name)
                    rpd_file.add_extra_detail('%s%s' % (pn.BACKUP_ERROR, self.mount_name), inst)
                    rpd_file.error_title = _('Backing up error')
                    rpd_file.error_msg = \
                            _("Source: %(source)s\nDestination: %(destination)s") % \
                             {'source': rpd_file.download_full_file_name, 'destination': backup_full_file_name} + "\n" + \
                            _("Error: %(inst)s") % {'inst': inst}
                    logger.error("%s:\n%s", rpd_file.error_title, rpd_file.error_msg)

                if not backup_succeeded:
                    if rpd_file.status ==  config.STATUS_DOWNLOAD_FAILED:
                        rpd_file.status = config.STATUS_DOWNLOAD_AND_BACKUP_FAILED
                    else:
                        rpd_file.status = config.STATUS_BACKUP_PROBLEM
                else:
                    # backup any THM or XMP files
                    if rpd_file.download_thm_full_name:
                        self.backup_additional_file(dest_dir,
                                        rpd_file.download_thm_full_name)
                    if rpd_file.download_xmp_full_name:
                        self.backup_additional_file(dest_dir,
                                        rpd_file.download_xmp_full_name)
            
            self.total_downloaded += rpd_file.size
            bytes_not_downloaded = rpd_file.size - self.amount_downloaded
            if bytes_not_downloaded:
                self.results_pipe.send((rpdmp.CONN_PARTIAL, (rpdmp.MSG_BYTES, (self.scan_pid, self.pid, self.total_downloaded, bytes_not_downloaded))))
                
            self.results_pipe.send((rpdmp.CONN_PARTIAL, (rpdmp.MSG_FILE,
                                   (backup_succeeded, rpd_file))))