got things a bit closer to right, but there is still a nasty problem on
authorChris Koeritz <fred@gruntose.com>
Tue, 18 Sep 2012 03:23:20 +0000 (23:23 -0400)
committerChris Koeritz <fred@gruntose.com>
Tue, 18 Sep 2012 03:23:20 +0000 (23:23 -0400)
some hierarchies, like /home/fred which refuses to be understood by
synch files and somehow causes it to through blank destination errors.

nucleus/library/filesystem/directory_tree.cpp
nucleus/library/filesystem/heavy_file_ops.cpp
octopi/library/tentacles/file_transfer_tentacle.cpp
octopi/library/tentacles/recursive_file_copy.cpp
scripts/database/serene_backer_upper.sh

index dc78832fb64d13d977724ede7afabed1543c53ac..b6742e936e2a25434ed29f09a31a27b9ebe2ca47 100644 (file)
@@ -438,7 +438,7 @@ filename_tree *directory_tree::seek(const astring &dir_name_in,
       filename current(check->_dirname);
       if (!current.is_normal()) {
 //#ifdef DEBUG_DIRECTORY_TREE
-        LOG(astring("skipping abnormal dir:  ") + current);
+        LOG(astring("skipping abnormal dir: \"") + current + "\"");
 //#endif
         continue;
       }
@@ -500,7 +500,7 @@ bool directory_tree::calculate(filename_tree *start, bool just_size)
   while (directory_tree::current_dir(*ted, curr)) {
     if (!curr.is_normal()) {
 //#ifdef DEBUG_DIRECTORY_TREE
-      LOG(astring("skipping abnormal dir:  ") + curr);
+      LOG(astring("skipping abnormal dir: \"") + curr + "\"");
 //#endif
       directory_tree::next(*ted);
       continue;  // scary non-simple file type.
@@ -518,7 +518,7 @@ bool directory_tree::calculate(filename_tree *start, bool just_size)
 #endif
       if (!curr_file.is_normal()) {
 //#ifdef DEBUG_DIRECTORY_TREE
-        LOG(astring("skipping abnormal file:  ") + curr);
+        LOG(astring("skipping abnormal file: \"") + curr + "\"");
 //#endif
         continue;
       }
index e8f48ebd5d20e585546ffe5a024250e27abd7499..0d2f91bc3463383ffcc1d0285597886af0abac03 100644 (file)
@@ -27,7 +27,7 @@ using namespace structures;
 
 namespace filesystem {
 
-//#define DEBUG_HEAVY_FILE_OPS
+#define DEBUG_HEAVY_FILE_OPS
   // uncomment for noisier debugging.
 
 #undef LOG
@@ -279,8 +279,8 @@ outcome heavy_file_operations::buffer_files(const astring &source_root,
       continue;
     }
 
-    if ((last_action._byte_start + last_action._length >= current.length())
-        && current.length()) {
+    if (last_action._byte_start + last_action._length >= current.length()) {
+//        && current.length()) {
       // this file is done now.  go to the next one.
       if (!advance(to_transfer, last_action)) break;
       continue;
index bb9711e82e3c2886f3e65c20186fde53bae5ed42..717d276120d5c3c396fd3c0e00917b2e398e8f92 100644 (file)
@@ -46,7 +46,7 @@ const int FTT_CLEANING_INTERVAL = 30 * SECOND_ms;
 const int TRANSFER_TIMEOUT = 10 * MINUTE_ms;
   // if it hasn't been touched in this long, it's out of there.
 
-//#define DEBUG_FILE_TRANSFER_TENTACLE
+#define DEBUG_FILE_TRANSFER_TENTACLE
   // uncomment for noisier version.
 
 #undef LOG
@@ -253,9 +253,7 @@ outcome file_transfer_tentacle::add_correspondence
     (const astring &source_mapping, const astring &source_root,
      int refresh_interval)
 {
-#ifdef DEBUG_FILE_TRANSFER_TENTACLE
   FUNCDEF("add_correspondence");
-#endif
   AUTO_LOCK;
 
   remove_correspondence(source_mapping);  // clean the old one out first.
@@ -343,7 +341,7 @@ outcome file_transfer_tentacle::register_file_transfer
     (const octopus_entity &ent, const astring &src_root,
     const astring &dest_root, const string_array &includes)
 {
-//  FUNCDEF("register_file_transfer");
+  FUNCDEF("register_file_transfer");
   AUTO_LOCK;
   // make sure that this isn't an existing transfer.  if so, we just update
   // the status.
@@ -408,9 +406,7 @@ bool file_transfer_tentacle::remove_path(const astring &key,
 
 void file_transfer_tentacle::periodic_actions()
 {
-#ifdef DEBUG_FILE_TRANSFER_TENTACLE
   FUNCDEF("periodic_actions");
-#endif
   AUTO_LOCK;
 
   // first, we'll clean out old transfers.
@@ -626,7 +622,9 @@ outcome file_transfer_tentacle::handle_storage_request
   if (bufret == heavy_file_operations::FINISHED) {
 //here we go.  finish by setting command to conclude.
 LOG("got the final marker saying heavy file ops done!");
+    the_rec->_done = true;
     resp->_command = file_transfer_infoton::CONCLUDE_TRANSFER_MARKER;
+    bufret = OKAY;  // now it's no longer an exceptional outcome.
   } else if (bufret != OKAY) {
     // complain, but still send.
     LOG(astring("buffer files returned an error on item=")
@@ -634,8 +632,13 @@ LOG("got the final marker saying heavy file ops done!");
         + req._dest_root);
   }
 
-  if ( (bufret == OKAY) && !resp->_packed_data.length() ) {
+//  if ( (bufret == OKAY) && !resp->_packed_data.length() ) {
+//    LOG(astring("failed to pack any data for file: ") + req._src_root);
+//  }
+
+  if (!the_rec->_done && (bufret == OKAY) && !resp->_packed_data.length() ) {
     // seems like the transfer is done.
+    LOG("marking empty transfer as done; why not caught above at FINISHED check?");
     the_rec->_done = true;
     resp->_command = file_transfer_infoton::CONCLUDE_TRANSFER_MARKER;
   }
@@ -800,9 +803,7 @@ outcome file_transfer_tentacle::consume(infoton &to_chow,
 
 outcome file_transfer_tentacle::refresh_now(const astring &source_mapping)
 {
-#ifdef DEBUG_FILE_TRANSFER_TENTACLE
   FUNCDEF("refresh_now");
-#endif
   AUTO_LOCK;
   for (int i = 0; i < _correspondences->elements(); i++) {
     file_transfer_record *curr = _correspondences->borrow(i);
index e2461ca8a8a9254bbdc8a893d6df8dae80a8b9de..073ddb36851aad94335b2c01c4b4c084c10c8057 100644 (file)
@@ -39,7 +39,7 @@ using namespace textual;
 
 namespace octopi {
 
-//#define DEBUG_RECURSIVE_FILE_COPY
+#define DEBUG_RECURSIVE_FILE_COPY
   // uncomment for noisier debugging.
 
 #define FAKE_HOSTNAME "internal_fake_host"
@@ -49,6 +49,11 @@ namespace octopi {
 #undef BASE_LOG
 #define BASE_LOG(s) EMERGENCY_LOG(program_wide_logger::get(), s)
 
+#define RETURN_ERROR_RFC(msg, err) { \
+  LOG(msg); \
+  return err; \
+}
+
 const int MAX_CHUNK_RFC_COPY_HIER = 1 * MEGABYTE;
   // maximum size for each transfer chunk.
 
@@ -62,11 +67,6 @@ recursive_file_copy::~recursive_file_copy() {}
 const char *recursive_file_copy::outcome_name(const outcome &to_name)
 { return common::outcome_name(to_name); }
 
-#define RETURN_ERROR_RFC(msg, err) { \
-  LOG(msg); \
-  return err; \
-}
-
 outcome recursive_file_copy::copy_hierarchy(int transfer_mode,
   const astring &source_dir, const astring &target_dir,
   const string_array &includes, const astring &source_start)
@@ -171,29 +171,20 @@ outcome recursive_file_copy::copy_hierarchy(int transfer_mode,
       break;
     }
 
-    if (!reply->_packed_data.length()) {
-      RETURN_ERROR_RFC("file transfer had no packed data", GARBAGE);
-    }
+//    if (!reply->_packed_data.length()) {
+//      RETURN_ERROR_RFC("file transfer had no packed data", GARBAGE);
+//    }
 
     byte_array copy = reply->_packed_data;
     while (copy.length()) {
-#ifdef DEBUG_RECURSIVE_FILE_COPY
-      LOG(a_sprintf("starging size in array: %d", copy.length()));
-#endif
       file_time empty;
       file_transfer_header head(empty);
       if (!head.unpack(copy)) 
         RETURN_ERROR_RFC("failed to unpack header", GARBAGE);
-#ifdef DEBUG_RECURSIVE_FILE_COPY
-      LOG(a_sprintf("removed head size in array: %d", copy.length()));
-#endif
       if (copy.length() < head._length)
         RETURN_ERROR_RFC("not enough length in array", GARBAGE);
-//hmmm: are we doing nothing here besides validating that we GOT something in the header?
-      copy.zap(0, head._length - 1);
-#ifdef DEBUG_RECURSIVE_FILE_COPY
-      LOG(a_sprintf("size in array now: %d", copy.length()));
-#endif
+      if (head._length > 0)
+        copy.zap(0, head._length - 1);
 
 //hmmm: this needs better formatting, and should not repeat the same file name even
 //      if it's in multiple chunks.
index 58b49fd741ebb3668f5edf7dabebee208b7d38ec..8a2cbcb0303eac351ee116ff41ac5412f6a792f5 100644 (file)
@@ -3,7 +3,7 @@
 function check_if_failed()
 {
   if [ $? -ne 0 ]; then
-    echo "Step failed: $*"
+    echo "Step FAILed: $*"
     return 1
   fi
 }
@@ -16,6 +16,27 @@ function exit_if_failed()
   fi
 }
 
+function synch_to_backup()
+{
+  local source="$1"; shift
+  local dest="$1"; shift
+  if [ -z "$source" -o -z "$dest" ]; then
+    echo synch_to_backup function requires a source and a target folder to synch.
+    exit 1
+  fi
+  echo "Synchronizing $source into $dest."
+#hmmm: temporary measure until top-level dir bug fixed in synch_files app.
+  if [ ! -d "$dest" ]; then
+    mkdir -p "$dest"
+    if [ $? -ne 0 ]; then
+      echo "FAILed to make target directory: $dest"
+      return 1
+    fi
+  fi
+  synch_files "$source" "$dest"
+  check_if_failed "synching $source to $dest"
+}
+
 # just undo it first, to try to be sure we know we are mounted properly later.
 umount /z/backup &>/dev/null
 
@@ -30,42 +51,17 @@ exit_if_failed "mounting backup folder"
 
 ##############
 
-synch_files /etc /z/backup/etc/
-check_if_failed "synching etc to backup"
+synch_to_backup /etc /z/backup/etc/
 
 ##############
 
-synch_files /home/albums /z/backup/home/albums
-check_if_failed "synching home/albums to backup"
-
-synch_files /home/deepcore /z/backup/home/deepcore
-check_if_failed "synching home/deepcore to backup"
-
-synch_files /home/drupal /z/backup/home/drupal
-check_if_failed "synching home/drupal to backup"
-
-synch_files /home/fred /z/backup/home/fred
-check_if_failed "synching home/fred to backup"
-
-synch_files /home/git /z/backup/home/git
-check_if_failed "synching home/git to backup"
-
-synch_files /home/sharedspam /z/backup/home/sharedspam
-check_if_failed "synching home/sharedspam to backup"
-
-synch_files /home/sim /z/backup/home/sim
-check_if_failed "synching home/sim to backup"
-
-synch_files /home/svn /z/backup/home/svn
-check_if_failed "synching home/svn to backup"
-
-synch_files /home/trac /z/backup/home/trac
-check_if_failed "synching home/trac to backup"
+for subdir in albums deepcore drupal fred git sharedspam sim svn trac ; do 
+  synch_to_backup /home/$subdir /z/backup/home/$subdir
+done
 
 ##############
 
-synch_files /var/lib/mailman /z/backup/var/lib/mailman
-check_if_failed "synching var/lib/mailman to backup"
+synch_to_backup /var/lib/mailman /z/backup/var/lib/mailman
 
 ##############