filename current(check->_dirname);
if (!current.is_normal()) {
//#ifdef DEBUG_DIRECTORY_TREE
- LOG(astring("skipping abnormal dir: ") + current);
+ LOG(astring("skipping abnormal dir: \"") + current + "\"");
//#endif
continue;
}
while (directory_tree::current_dir(*ted, curr)) {
if (!curr.is_normal()) {
//#ifdef DEBUG_DIRECTORY_TREE
- LOG(astring("skipping abnormal dir: ") + curr);
+ LOG(astring("skipping abnormal dir: \"") + curr + "\"");
//#endif
directory_tree::next(*ted);
continue; // scary non-simple file type.
#endif
if (!curr_file.is_normal()) {
//#ifdef DEBUG_DIRECTORY_TREE
- LOG(astring("skipping abnormal file: ") + curr);
+ LOG(astring("skipping abnormal file: \"") + curr + "\"");
//#endif
continue;
}
namespace filesystem {
-//#define DEBUG_HEAVY_FILE_OPS
+#define DEBUG_HEAVY_FILE_OPS
// uncomment for noisier debugging.
#undef LOG
continue;
}
- if ((last_action._byte_start + last_action._length >= current.length())
- && current.length()) {
+ if (last_action._byte_start + last_action._length >= current.length()) {
+// && current.length()) {
// this file is done now. go to the next one.
if (!advance(to_transfer, last_action)) break;
continue;
const int TRANSFER_TIMEOUT = 10 * MINUTE_ms;
// if it hasn't been touched in this long, it's out of there.
-//#define DEBUG_FILE_TRANSFER_TENTACLE
+#define DEBUG_FILE_TRANSFER_TENTACLE
// uncomment for noisier version.
#undef LOG
(const astring &source_mapping, const astring &source_root,
int refresh_interval)
{
-#ifdef DEBUG_FILE_TRANSFER_TENTACLE
FUNCDEF("add_correspondence");
-#endif
AUTO_LOCK;
remove_correspondence(source_mapping); // clean the old one out first.
(const octopus_entity &ent, const astring &src_root,
const astring &dest_root, const string_array &includes)
{
-// FUNCDEF("register_file_transfer");
+ FUNCDEF("register_file_transfer");
AUTO_LOCK;
// make sure that this isn't an existing transfer. if so, we just update
// the status.
void file_transfer_tentacle::periodic_actions()
{
-#ifdef DEBUG_FILE_TRANSFER_TENTACLE
FUNCDEF("periodic_actions");
-#endif
AUTO_LOCK;
// first, we'll clean out old transfers.
if (bufret == heavy_file_operations::FINISHED) {
//here we go. finish by setting command to conclude.
LOG("got the final marker saying heavy file ops done!");
+ the_rec->_done = true;
resp->_command = file_transfer_infoton::CONCLUDE_TRANSFER_MARKER;
+ bufret = OKAY; // now it's no longer an exceptional outcome.
} else if (bufret != OKAY) {
// complain, but still send.
LOG(astring("buffer files returned an error on item=")
+ req._dest_root);
}
- if ( (bufret == OKAY) && !resp->_packed_data.length() ) {
+// if ( (bufret == OKAY) && !resp->_packed_data.length() ) {
+// LOG(astring("failed to pack any data for file: ") + req._src_root);
+// }
+
+ if (!the_rec->_done && (bufret == OKAY) && !resp->_packed_data.length() ) {
// seems like the transfer is done.
+ LOG("marking empty transfer as done; why not caught above at FINISHED check?");
the_rec->_done = true;
resp->_command = file_transfer_infoton::CONCLUDE_TRANSFER_MARKER;
}
outcome file_transfer_tentacle::refresh_now(const astring &source_mapping)
{
-#ifdef DEBUG_FILE_TRANSFER_TENTACLE
FUNCDEF("refresh_now");
-#endif
AUTO_LOCK;
for (int i = 0; i < _correspondences->elements(); i++) {
file_transfer_record *curr = _correspondences->borrow(i);
namespace octopi {
-//#define DEBUG_RECURSIVE_FILE_COPY
+#define DEBUG_RECURSIVE_FILE_COPY
// uncomment for noisier debugging.
#define FAKE_HOSTNAME "internal_fake_host"
#undef BASE_LOG
#define BASE_LOG(s) EMERGENCY_LOG(program_wide_logger::get(), s)
+#define RETURN_ERROR_RFC(msg, err) { \
+ LOG(msg); \
+ return err; \
+}
+
const int MAX_CHUNK_RFC_COPY_HIER = 1 * MEGABYTE;
// maximum size for each transfer chunk.
const char *recursive_file_copy::outcome_name(const outcome &to_name)
{ return common::outcome_name(to_name); }
-#define RETURN_ERROR_RFC(msg, err) { \
- LOG(msg); \
- return err; \
-}
-
outcome recursive_file_copy::copy_hierarchy(int transfer_mode,
const astring &source_dir, const astring &target_dir,
const string_array &includes, const astring &source_start)
break;
}
- if (!reply->_packed_data.length()) {
- RETURN_ERROR_RFC("file transfer had no packed data", GARBAGE);
- }
+// if (!reply->_packed_data.length()) {
+// RETURN_ERROR_RFC("file transfer had no packed data", GARBAGE);
+// }
byte_array copy = reply->_packed_data;
while (copy.length()) {
-#ifdef DEBUG_RECURSIVE_FILE_COPY
- LOG(a_sprintf("starging size in array: %d", copy.length()));
-#endif
file_time empty;
file_transfer_header head(empty);
if (!head.unpack(copy))
RETURN_ERROR_RFC("failed to unpack header", GARBAGE);
-#ifdef DEBUG_RECURSIVE_FILE_COPY
- LOG(a_sprintf("removed head size in array: %d", copy.length()));
-#endif
if (copy.length() < head._length)
RETURN_ERROR_RFC("not enough length in array", GARBAGE);
-//hmmm: are we doing nothing here besides validating that we GOT something in the header?
- copy.zap(0, head._length - 1);
-#ifdef DEBUG_RECURSIVE_FILE_COPY
- LOG(a_sprintf("size in array now: %d", copy.length()));
-#endif
+ if (head._length > 0)
+ copy.zap(0, head._length - 1);
//hmmm: this needs better formatting, and should not repeat the same file name even
// if it's in multiple chunks.
function check_if_failed()
{
if [ $? -ne 0 ]; then
- echo "Step failed: $*"
+ echo "Step FAILed: $*"
return 1
fi
}
fi
}
+function synch_to_backup()
+{
+ local source="$1"; shift
+ local dest="$1"; shift
+ if [ -z "$source" -o -z "$dest" ]; then
+ echo synch_to_backup function requires a source and a target folder to synch.
+ exit 1
+ fi
+ echo "Synchronizing $source into $dest."
+#hmmm: temporary measure until top-level dir bug fixed in synch_files app.
+ if [ ! -d "$dest" ]; then
+ mkdir -p "$dest"
+ if [ $? -ne 0 ]; then
+ echo "FAILed to make target directory: $dest"
+ return 1
+ fi
+ fi
+ synch_files "$source" "$dest"
+ check_if_failed "synching $source to $dest"
+}
+
# just undo it first, to try to be sure we know we are mounted properly later.
umount /z/backup &>/dev/null
##############
-synch_files /etc /z/backup/etc/
-check_if_failed "synching etc to backup"
+synch_to_backup /etc /z/backup/etc/
##############
-synch_files /home/albums /z/backup/home/albums
-check_if_failed "synching home/albums to backup"
-
-synch_files /home/deepcore /z/backup/home/deepcore
-check_if_failed "synching home/deepcore to backup"
-
-synch_files /home/drupal /z/backup/home/drupal
-check_if_failed "synching home/drupal to backup"
-
-synch_files /home/fred /z/backup/home/fred
-check_if_failed "synching home/fred to backup"
-
-synch_files /home/git /z/backup/home/git
-check_if_failed "synching home/git to backup"
-
-synch_files /home/sharedspam /z/backup/home/sharedspam
-check_if_failed "synching home/sharedspam to backup"
-
-synch_files /home/sim /z/backup/home/sim
-check_if_failed "synching home/sim to backup"
-
-synch_files /home/svn /z/backup/home/svn
-check_if_failed "synching home/svn to backup"
-
-synch_files /home/trac /z/backup/home/trac
-check_if_failed "synching home/trac to backup"
+for subdir in albums deepcore drupal fred git sharedspam sim svn trac ; do
+ synch_to_backup /home/$subdir /z/backup/home/$subdir
+done
##############
-synch_files /var/lib/mailman /z/backup/var/lib/mailman
-check_if_failed "synching var/lib/mailman to backup"
+synch_to_backup /var/lib/mailman /z/backup/var/lib/mailman
##############