awesome assets from gffs code
authorChris Koeritz <fred@gruntose.com>
Wed, 4 Jan 2017 17:47:32 +0000 (12:47 -0500)
committerChris Koeritz <fred@gruntose.com>
Wed, 4 Jan 2017 17:47:32 +0000 (12:47 -0500)
shared under apache asl license version 2.0.  modified to fit within feisty meow naming system.

22 files changed:
kona/.classpath
kona/lib/SizeOf.jar [new file with mode: 0644]
kona/lib/commons-compress-1.8.1.jar [new file with mode: 0644]
kona/lib/commons-io-2.4.jar [new file with mode: 0644]
kona/lib/commons-lang3-3.5.jar [new file with mode: 0644]
kona/src/org/gffs/application/MemoryFootprint.java [new file with mode: 0644]
kona/src/org/gffs/application/ProgramTools.java [new file with mode: 0644]
kona/src/org/gffs/cache/CacheList.java [new file with mode: 0644]
kona/src/org/gffs/cache/LRUList.java [new file with mode: 0644]
kona/src/org/gffs/cache/RoleBasedCacheNode.java [new file with mode: 0644]
kona/src/org/gffs/cache/TimedOutLRUCache.java [new file with mode: 0644]
kona/src/org/gffs/cache/TimeoutList.java [new file with mode: 0644]
kona/src/org/gffs/cache/improvements_readme.txt [new file with mode: 0644]
kona/src/org/gffs/compression/PackTar.java [new file with mode: 0644]
kona/src/org/gffs/compression/UnpackTar.java [new file with mode: 0644]
kona/src/org/gffs/filesystem/FileSystemHelper.java [new file with mode: 0644]
kona/src/org/gffs/io/IOUtils.java [new file with mode: 0644]
kona/src/org/gffs/network/DeadHostChecker.java [new file with mode: 0644]
kona/src/org/gffs/network/URLDownloader.java [new file with mode: 0644]
kona/src/org/gffs/readme.txt [new file with mode: 0644]
kona/src/org/gffs/version/Version.java [new file with mode: 0644]
kona/src/org/gffs/version/VersionManager.java [new file with mode: 0644]

index 18bf7466743f666fc2eb13c48231df8c1dbde2e8..310fd8d41486b0da87332b3807d3f14b7017c3dc 100644 (file)
@@ -10,5 +10,9 @@
        <classpathentry kind="lib" path="lib/commons-logging-tests.jar"/>
        <classpathentry kind="lib" path="lib/log4j-1.2.16.jar"/>
        <classpathentry kind="lib" path="lib/junit-4.5.jar"/>
+       <classpathentry kind="lib" path="lib/commons-lang3-3.5.jar"/>
+       <classpathentry kind="lib" path="lib/SizeOf.jar"/>
+       <classpathentry kind="lib" path="lib/commons-compress-1.8.1.jar"/>
+       <classpathentry kind="lib" path="lib/commons-io-2.4.jar"/>
        <classpathentry kind="output" path="bin-eclipse"/>
 </classpath>
diff --git a/kona/lib/SizeOf.jar b/kona/lib/SizeOf.jar
new file mode 100644 (file)
index 0000000..34641d2
Binary files /dev/null and b/kona/lib/SizeOf.jar differ
diff --git a/kona/lib/commons-compress-1.8.1.jar b/kona/lib/commons-compress-1.8.1.jar
new file mode 100644 (file)
index 0000000..66b0a56
Binary files /dev/null and b/kona/lib/commons-compress-1.8.1.jar differ
diff --git a/kona/lib/commons-io-2.4.jar b/kona/lib/commons-io-2.4.jar
new file mode 100644 (file)
index 0000000..90035a4
Binary files /dev/null and b/kona/lib/commons-io-2.4.jar differ
diff --git a/kona/lib/commons-lang3-3.5.jar b/kona/lib/commons-lang3-3.5.jar
new file mode 100644 (file)
index 0000000..6328c8d
Binary files /dev/null and b/kona/lib/commons-lang3-3.5.jar differ
diff --git a/kona/src/org/gffs/application/MemoryFootprint.java b/kona/src/org/gffs/application/MemoryFootprint.java
new file mode 100644 (file)
index 0000000..6b2ac28
--- /dev/null
@@ -0,0 +1,59 @@
+package org.gffs.application;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import net.sourceforge.sizeof.SizeOf;
+
+/**
+ * can retrieve the size of java objects to assist tuning of caches and memory usage. this requires the SizeOf jar and -javaagent setting to
+ * point at that jar. this is the project page: https://sourceforge.net/projects/sizeof/?source=typ_redirect
+ */
+public class MemoryFootprint
+{
+       static private Log _logger = LogFactory.getLog(MemoryFootprint.class);
+
+       // static SizeOf _sizeEstimater = new SizeOf();
+       static {
+               // don't count statics in the memory size.
+               SizeOf.skipStaticField(true);
+               // only complain about large objects if they're bigger than the limit below.
+               SizeOf.setMinSizeToLog(5 * 1024 * 1024);
+       }
+
+       /**
+        * can report the size of the object 'o' if instrumentation has been set up. if instrumentation is absent, all object sizes will be
+        * reported as zero.
+        */
+       public static long getFootprint(Object o)
+       {
+               if (!_logger.isDebugEnabled()) {
+                       _logger.error("abusive memory footprint called when not in debug mode.  a logging statement is wrong.");
+                       return 0;
+               }
+               try {
+                       return SizeOf.sizeOf(o);
+               } catch (Exception e) {
+                       _logger.debug("error retrieving SizeOf object; is SizeOf.jar in javaagent?");
+                       return 0;
+               }
+       }
+
+       /**
+        * reports the size of the object 'o' plus the size of all other objects reachable from it.
+        */
+       public static long getDeepFootprint(Object o)
+       {
+               if (!_logger.isDebugEnabled()) {
+                       _logger.error("abusive memory footprint called when not in debug mode.  a logging statement is wrong.");
+                       return 0;
+               }
+
+               try {
+                       return SizeOf.deepSizeOf(o);
+               } catch (Exception e) {
+                       _logger.debug("error retrieving SizeOf object; is SizeOf.jar in javaagent?");
+                       return 0;
+               }
+       }
+}
diff --git a/kona/src/org/gffs/application/ProgramTools.java b/kona/src/org/gffs/application/ProgramTools.java
new file mode 100644 (file)
index 0000000..f13640d
--- /dev/null
@@ -0,0 +1,149 @@
+package org.gffs.application;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.net.URL;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.gffs.filesystem.FileSystemHelper;
+
+/**
+ * Some utility functions for getting information about the running application.
+ * 
+ * @author Chris Koeritz
+ */
+public class ProgramTools
+{
+       public static Log _logger = LogFactory.getLog(ProgramTools.class);
+       
+       /**
+        * produces a list of the stack for a certain number of its elements, called stack frames. this will ignore the fact that this function is
+        * invoked, and start counting stack frames from the immediate caller's perspective (including it).
+        */
+       public static String showLastFewOnStack(int howManyFrames)
+       {
+               StackTraceElement[] elements = Thread.currentThread().getStackTrace();
+               StringBuilder toReturn = new StringBuilder();
+               /*
+                * don't start at the very first frame; we want to skip backwards to the direct caller of this function.
+                */
+               int startFrame = 3;
+               int endFrame = Math.min(howManyFrames + 3, elements.length - 1);
+               for (int i = startFrame; i < endFrame; i++) {
+                       if (toReturn.length() != 0) {
+                               toReturn.append("\n<= ");
+                       }
+                       toReturn.append(getStackFrame(i));
+               }
+               return toReturn.toString();
+       }
+
+       /**
+        * returns the Nth frame backwards starting from this function. 0 is this method, 1 is the invoker, 2 is the invoker's invoker, etc.
+        */
+       public static String getStackFrame(int which)
+       {
+               StackTraceElement[] elements = Thread.currentThread().getStackTrace();
+               /* a little self-protection to avoid accessing missing parts of the array. */
+               if (which >= elements.length)
+                       which = elements.length - 1;
+               return elements[which].toString();
+       }
+
+       /**
+        * returns the location where the code is running, as best as can be determined. finds the running location based on our jar files, or if
+        * that's not available, on the assumption of app path being within an appropriate installation (even if not at the top). this method
+        * cannot use standard genesis properties to look up the path, because this function needs to operate before anything else is loaded (for
+        * OSGi usage).
+        */
+       static public String getInstallationDirectory()
+       {
+               String appPath = null;
+               // see if we can intuit our location from living in a jar.
+               URL url = ProgramTools.class.getProtectionDomain().getCodeSource().getLocation();
+               try {
+                       // get the app path but switch back slashes to forward ones.
+                       appPath = new File(url.toURI().getSchemeSpecificPart()).toString().replace('\\', '/');
+               } catch (URISyntaxException e) {
+                       String msg = "failed to convert code source url to app path: " + url;
+                       _logger.error(msg);
+                       throw new RuntimeException(msg);
+               }
+               if (_logger.isTraceEnabled())
+                       _logger.trace("got source path as: " + appPath);
+               if (appPath.endsWith(".jar")) {
+                       // we need to chop off the jar file part of the name.
+                       int lastSlash = appPath.lastIndexOf("/");
+                       // if (lastSlash < 0)
+                       // lastSlash = appPath.lastIndexOf("\\");
+                       if (lastSlash < 0) {
+                               String msg = "could not find a slash character in the path: " + appPath;
+                               _logger.error(msg);
+                               throw new RuntimeException(msg);
+                       }
+                       appPath = appPath.substring(0, lastSlash);
+                       if (_logger.isTraceEnabled())
+                               _logger.trace("truncated path since inside jar: " + appPath);
+               }
+               appPath = appPath.concat("/..");
+
+               if (_logger.isTraceEnabled())
+                       _logger.trace("jar-intuited startup bundle path: " + appPath);
+
+               File startupDir = new File(appPath);
+               if (!startupDir.exists() || !startupDir.isDirectory()) {
+                       throw new RuntimeException(
+                               "the location where we believe the installation is running from does not actually exist as a directory.");
+               }
+
+               //hmmm: below may not be very general since it does osgi?  but it will work if people use a bundles dir.
+               
+               /*
+                * make sure we can find our own bundles directory, which is a crucial thing for osgi. if we can't find it, then we really don't know
+                * where home is.
+                */
+               File testingBundlesDir = new File(startupDir, "bundles");
+               File testingExtDir = new File(startupDir, "ext");
+               String lastStartupDirState = "not-equal"; // a string we should never see as a full path.
+
+               while (!testingBundlesDir.exists() || !testingExtDir.exists()) {
+                       if (_logger.isDebugEnabled()) {
+                               if (_logger.isTraceEnabled())
+                                       _logger.debug("failed to find bundles directory at '" + startupDir.getAbsolutePath() + "', popping up a level.");
+                       }
+
+                       if (lastStartupDirState.equals(FileSystemHelper.sanitizeFilename(startupDir.getAbsolutePath()))) {
+                               throw new RuntimeException(
+                                       "caught the startup directory not changing, which means we have hit the root and failed to find our bundles and ext directories.");
+                       }
+                       // reset for next time.
+                       lastStartupDirState = FileSystemHelper.sanitizeFilename(startupDir.getAbsolutePath());
+
+                       // pop up a level, since we didn't find our bundles directory.
+                       startupDir = new File(startupDir, "..");
+                       testingBundlesDir = new File(startupDir, "bundles");
+                       testingExtDir = new File(startupDir, "ext");
+
+                       if (startupDir.getParent() == null) {
+                               throw new RuntimeException("failed to find the bundles and ext directories after hitting top of file system paths.");
+                       }
+               }
+
+               // we successfully found the bundles directory, even if we may have had to jump a few hoops.
+               if (_logger.isTraceEnabled()) {
+                       _logger.debug("successfully found bundles directory under path: " + appPath);
+               }
+
+               // now resolve the path to an absolute location without relative components.
+               try {
+                       appPath = FileSystemHelper.sanitizeFilename(startupDir.getCanonicalPath());
+               } catch (IOException e) {
+                       _logger.error("could not open osgi directory: " + appPath);
+               }
+               if (_logger.isTraceEnabled())
+                       _logger.debug("startup path after resolution with File: " + appPath);
+               return appPath;
+       }
+}
diff --git a/kona/src/org/gffs/cache/CacheList.java b/kona/src/org/gffs/cache/CacheList.java
new file mode 100644 (file)
index 0000000..dc2d213
--- /dev/null
@@ -0,0 +1,77 @@
+package org.gffs.cache;
+
+/*
+ * Copyright 2006 University of Virginia
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may
+ * obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ */
+
+abstract class CacheList<KeyType, DataType>
+{
+       protected int _myRole;
+
+       protected RoleBasedCacheNode<KeyType, DataType> _head;
+       protected RoleBasedCacheNode<KeyType, DataType> _tail;
+
+       protected CacheList(int role)
+       {
+               _myRole = role;
+
+               _head = _tail = null;
+       }
+
+       public abstract void insert(RoleBasedCacheNode<KeyType, DataType> node);
+
+       public RoleBasedCacheNode<KeyType, DataType> removeFirst()
+       {
+               if (_head == null)
+                       return null;
+
+               RoleBasedCacheNode<KeyType, DataType> ret = _head;
+
+               _head = _head.getNext(_myRole);
+               if (_head != null)
+                       _head.setPrevious(_myRole, null);
+               else
+                       _tail = null;
+
+               ret.clearLinks(_myRole);
+               return ret;
+       }
+
+       public RoleBasedCacheNode<KeyType, DataType> peekFirst()
+       {
+               if (_head == null)
+                       return null;
+
+               return _head;
+       }
+
+       public void remove(RoleBasedCacheNode<KeyType, DataType> node)
+       {
+               if (node.getPrevious(_myRole) == null) // At the head of the list
+                       _head = node.getNext(_myRole);
+               else
+                       node.getPrevious(_myRole).setNext(_myRole, node.getNext(_myRole));
+
+               if (node.getNext(_myRole) == null) // At the tail of the list
+                       _tail = node.getPrevious(_myRole);
+               else
+                       node.getNext(_myRole).setPrevious(_myRole, node.getPrevious(_myRole));
+
+               node.clearLinks(_myRole);
+       }
+
+       public void clear()
+       {
+               _head = null;
+               _tail = null;
+       }
+}
\ No newline at end of file
diff --git a/kona/src/org/gffs/cache/LRUList.java b/kona/src/org/gffs/cache/LRUList.java
new file mode 100644 (file)
index 0000000..137e2a3
--- /dev/null
@@ -0,0 +1,43 @@
+package org.gffs.cache;
+
+/*
+ * Copyright 2006 University of Virginia
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may
+ * obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ */
+
+
+class LRUList<KeyType, DataType> extends CacheList<KeyType, DataType>
+{
+       public LRUList()
+       {
+               super(RoleBasedCacheNode.ROLE_LRU);
+       }
+
+       @Override
+       public void insert(RoleBasedCacheNode<KeyType, DataType> node)
+       {
+               // LRU inserts ALWAYS go at the tail
+               if (_tail == null) {
+                       _head = _tail = node;
+                       return;
+               }
+
+               _tail.setNext(_myRole, node);
+               node.setPrevious(_myRole, _tail);
+               _tail = node;
+       }
+
+       public void noteUse(RoleBasedCacheNode<KeyType, DataType> node)
+       {
+               remove(node);
+               insert(node);
+       }
+}
\ No newline at end of file
diff --git a/kona/src/org/gffs/cache/RoleBasedCacheNode.java b/kona/src/org/gffs/cache/RoleBasedCacheNode.java
new file mode 100644 (file)
index 0000000..5168d14
--- /dev/null
@@ -0,0 +1,100 @@
+package org.gffs.cache;
+
+/*
+ * Copyright 2006 University of Virginia
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may
+ * obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ */
+
+import java.util.Date;
+
+class RoleBasedCacheNode<KeyType, DataType>
+{
+       static public final int ROLE_LRU = 0;
+       static public final int ROLE_TIMEOUT = 1;
+       static private final int _NUM_ROLES = 2;
+
+       private Object[] _next;
+       private Object[] _previous;
+
+       private KeyType _key;
+       private DataType _data;
+
+       private Date _invalidationDate;
+
+       public RoleBasedCacheNode(KeyType key, DataType data, Date invalidationDate)
+       {
+               _next = new Object[_NUM_ROLES];
+               _previous = new Object[_NUM_ROLES];
+
+               for (int lcv = 0; lcv < _NUM_ROLES; lcv++) {
+                       _next[lcv] = null;
+                       _previous[lcv] = null;
+               }
+
+               _key = key;
+               _data = data;
+               _invalidationDate = invalidationDate;
+       }
+
+       public KeyType getKey()
+       {
+               return _key;
+       }
+
+       public DataType getData()
+       {
+               return _data;
+       }
+
+       public Date getInvalidationDate()
+       {
+               return _invalidationDate;
+       }
+
+       public void setInvalidationDate(long millisecondsFromNow)
+       {
+               _invalidationDate = new Date(System.currentTimeMillis() + millisecondsFromNow);
+       }
+
+       @SuppressWarnings("unchecked")
+       public RoleBasedCacheNode<KeyType, DataType> getPrevious(int role)
+       {
+               return RoleBasedCacheNode.class.cast(_previous[role]);
+       }
+
+       @SuppressWarnings("unchecked")
+       public RoleBasedCacheNode<KeyType, DataType> getNext(int role)
+       {
+               return RoleBasedCacheNode.class.cast(_next[role]);
+       }
+
+       public void setPrevious(int role, RoleBasedCacheNode<KeyType, DataType> previous)
+       {
+               _previous[role] = previous;
+       }
+
+       public void setNext(int role, RoleBasedCacheNode<KeyType, DataType> next)
+       {
+               _next[role] = next;
+       }
+
+       public void clearLinks(int role)
+       {
+               _previous[role] = null;
+               _next[role] = null;
+       }
+
+       public void clearLinks()
+       {
+               for (int lcv = 0; lcv < _NUM_ROLES; lcv++)
+                       clearLinks(lcv);
+       }
+}
\ No newline at end of file
diff --git a/kona/src/org/gffs/cache/TimedOutLRUCache.java b/kona/src/org/gffs/cache/TimedOutLRUCache.java
new file mode 100644 (file)
index 0000000..43dbc44
--- /dev/null
@@ -0,0 +1,335 @@
+package org.gffs.cache;
+
+/*
+ * Copyright 2006 University of Virginia
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may
+ * obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * This cache attempt to efficiently handle cached items that may time out after a certain period of time. It does this by maintaining 3
+ * separate data structures. The first is a HashMap which allows for quick access to the cached data based off of the key. The second is a
+ * linked list which maintains the LRU property of the items. The third is a list ordered by timeout so that items that have timed out can be
+ * identified quickly (i.e., a straight traversal of this list). All data structures share the exact same data node (not equivalent, but
+ * identical instances of the node class). This means that once a node is identified through any of the means indicated (key, LRU property,
+ * timeout property), the node for all three data structures has been identified and does not need to be looked up in the others.
+ * 
+ * @author mmm2a
+ * 
+ * @param <KeyType>
+ * @param <DataType>
+ */
+public class TimedOutLRUCache<KeyType, DataType>
+{
+       static Log _logger = LogFactory.getLog(TimedOutLRUCache.class);
+
+       private HashMap<KeyType, RoleBasedCacheNode<KeyType, DataType>> _map;
+       private LRUList<KeyType, DataType> _lruList;
+       private TimeoutList<KeyType, DataType> _timeoutList;
+
+       private int _maxElements;
+       private long _defaultTimeoutMS;
+       private Thread _activeTimeoutThread = null;
+       private boolean _logCacheEjection = false;
+       public String _cacheName = null; // the name for this cache.
+
+       public TimedOutLRUCache(int maxElements, long defaultTimeoutMS, String cacheName)
+       {
+               if (maxElements < 1)
+                       throw new IllegalArgumentException("\"maxElements\" must be greater than 0.");
+               _cacheName = cacheName;
+               if (_cacheName == null)
+                       throw new IllegalArgumentException("must provide a non-null cache name");
+
+               _maxElements = maxElements;
+               _defaultTimeoutMS = defaultTimeoutMS;
+               _map = new HashMap<KeyType, RoleBasedCacheNode<KeyType, DataType>>(_maxElements);
+               _lruList = new LRUList<KeyType, DataType>();
+               _timeoutList = new TimeoutList<KeyType, DataType>();
+       }
+
+       /**
+        * returns the number of elements held in the cache currently.
+        */
+       public int size()
+       {
+               return _map.size();
+       }
+
+       /**
+        * allows this cache to log when items are removed due to overloading or timing out.
+        */
+       public void setCacheEjectionLogging(boolean logRemovals)
+       {
+               _logCacheEjection = logRemovals;
+       }
+
+       public void activelyTimeoutElements(boolean activelyTimeout)
+       {
+               synchronized (_map) {
+                       if (_activeTimeoutThread == null) {
+                               if (activelyTimeout)
+                                       startActiveTimeout();
+                       } else {
+                               if (!activelyTimeout)
+                                       stopActiveTimeout();
+                       }
+               }
+       }
+
+       public String debugPrefix()
+       {
+               return _cacheName + ": ";
+       }
+
+       public void put(KeyType key, DataType data, long timeoutMS)
+       {
+               RoleBasedCacheNode<KeyType, DataType> newNode =
+                       new RoleBasedCacheNode<KeyType, DataType>(key, data, new Date(System.currentTimeMillis() + timeoutMS));
+
+               synchronized (_map) {
+                       RoleBasedCacheNode<KeyType, DataType> oldNode = _map.remove(key);
+                       if (oldNode != null) {
+                               _lruList.remove(oldNode);
+                               _timeoutList.remove(oldNode);
+                       }
+
+                       if (_map.size() >= _maxElements)
+                               clearStale();
+
+                       while (_map.size() >= _maxElements) {
+                               RoleBasedCacheNode<KeyType, DataType> node = _lruList.removeFirst();
+                               if (_logCacheEjection && _logger.isDebugEnabled())
+                                       _logger.debug(debugPrefix() + "overloaded cache: removing cached item with key: " + node.getKey());
+                               _timeoutList.remove(node);
+                               _map.remove(node.getKey());
+                       }
+
+                       _map.put(key, newNode);
+                       _lruList.insert(newNode);
+                       _timeoutList.insert(newNode);
+
+                       _map.notify();
+               }
+       }
+
+       public void put(KeyType key, DataType data)
+       {
+               put(key, data, _defaultTimeoutMS);
+       }
+
+       public int getMaximumElements()
+       {
+               return _maxElements;
+       }
+
+       /**
+        * tickles the object so that it won't expire for another default expiration period. true is returned if the object was there and got
+        * updated.
+        */
+       public boolean refresh(KeyType key)
+       {
+               synchronized (_map) {
+                       RoleBasedCacheNode<KeyType, DataType> node = _map.get(key);
+                       if (node == null)
+                               return false;
+                       _lruList.remove(node);
+                       node.setInvalidationDate(_defaultTimeoutMS);
+                       // move the node to the end of the LRU list, since we just accessed it.
+                       _lruList.insert(node);
+                       // also fix its position in the timeout list.
+                       _timeoutList.remove(node);
+                       _timeoutList.insert(node);
+                       return true;
+               }
+       }
+
+       // hmmm: highly experimental memory analysis code here!
+       // private final long CHECK_INTERVAL = 1000 * 60; // one minute interval between deep checks currently.
+       private final long CHECK_INTERVAL = 1000 * 10;// hmmm: way too fast interval, being used for debugging.
+
+       private Date _nextDeepSizeCheck = new Date((new Date().getTime()) + CHECK_INTERVAL);
+
+       public DataType get(KeyType key)
+       {
+               Date now = new Date();
+
+               if (now.after(_nextDeepSizeCheck)) {
+
+                       /*
+                        * hmmm: size check code below, not right yet.
+                        * 
+                        * would be nice to break that output into k, m, g, etc.
+                        * 
+                        * trying the deep footprint on 'this' is giving unrealistic very small sizes.
+                        * 
+                        * also the deep size check is dying with a stack overflow during the large rns directory test, so we cannot use it yet.
+                        */
+                       // if (_logger.isDebugEnabled()) {
+                       // long sizeUsed = MemoryFootprint.getDeepFootprint(_map) + MemoryFootprint.getDeepFootprint(_lruList)
+                       // + MemoryFootprint.getDeepFootprint(_timeoutList);
+                       // _logger.debug(SizeOf.humanReadable(sizeUsed) + " consumed by "+ _cacheName);
+                       // }
+
+                       _nextDeepSizeCheck = new Date((new Date().getTime()) + CHECK_INTERVAL);
+               }
+
+               synchronized (_map) {
+                       RoleBasedCacheNode<KeyType, DataType> node = _map.get(key);
+                       if (node == null)
+                               return null;
+                       _lruList.remove(node);
+                       if (node.getInvalidationDate().before(now)) {
+                               // this entry has become stale.
+                               if (_logCacheEjection && _logger.isDebugEnabled())
+                                       _logger.debug(debugPrefix() + "timed-out entry in get: removing cached item with key: " + node.getKey());
+                               _map.remove(key);
+                               _timeoutList.remove(node);
+                               return null;
+                       }
+                       // move the node to the end of the LRU list, since we just accessed it.
+                       _lruList.insert(node);
+                       return node.getData();
+               }
+       }
+
+       public List<DataType> getAll()
+       {
+               ArrayList<DataType> toReturn = new ArrayList<DataType>();
+               synchronized (_map) {
+                       for (KeyType key : _map.keySet()) {
+                               toReturn.add(_map.get(key).getData());
+                       }
+               }
+               return toReturn;
+       }
+
+       public List<DataType> getAllReferenced(List<KeyType> references)
+       {
+               ArrayList<DataType> toReturn = new ArrayList<DataType>();
+               synchronized (_map) {
+                       for (KeyType key : references) {
+                               //// for (KeyType key : _map.keySet()) {
+                               if (_map.containsKey(key)) {
+                                       toReturn.add(_map.get(key).getData());
+                               } else {
+                                       _logger.error(debugPrefix() + "failed to locate referenced object in cache: " + key);
+                               }
+                       }
+               }
+               return toReturn;
+       }
+
+       public void clearStale()
+       {
+               Date now = new Date();
+
+               synchronized (_map) {
+                       while (true) {
+                               RoleBasedCacheNode<KeyType, DataType> node = _timeoutList.peekFirst();
+                               if (node == null)
+                                       break;
+
+                               if (node.getInvalidationDate().compareTo(now) <= 0) {
+                                       if (_logCacheEjection && _logger.isDebugEnabled())
+                                               _logger.debug(debugPrefix() + "removing timed-out node: " + node.getKey());
+                                       _map.remove(node.getKey());
+                                       _timeoutList.removeFirst();
+                                       _lruList.remove(node);
+                               } else {
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       public void remove(KeyType key)
+       {
+               synchronized (_map) {
+                       RoleBasedCacheNode<KeyType, DataType> node = _map.remove(key);
+                       if (node != null) {
+                               _lruList.remove(node);
+                               _timeoutList.remove(node);
+                       }
+               }
+       }
+
+       public Set<KeyType> keySet()
+       {
+               synchronized (_map) {
+                       return new HashSet<KeyType>(_map.keySet());
+               }
+       }
+
+       public void clear()
+       {
+               synchronized (_map) {
+                       _map.clear();
+                       _lruList.clear();
+                       _timeoutList.clear();
+               }
+       }
+
+       public void startActiveTimeout()
+       {
+               _activeTimeoutThread = new Thread(new ActiveTimeoutWorker(), "Active Cache Timeout Thread");
+               _activeTimeoutThread.setDaemon(true);
+               _activeTimeoutThread.start();
+       }
+
+       public void stopActiveTimeout()
+       {
+               Thread tmp = _activeTimeoutThread;
+               _activeTimeoutThread = null;
+               synchronized (_map) {
+                       _map.notify();
+               }
+
+               try {
+                       tmp.join();
+               } catch (InterruptedException cause) {
+               }
+       }
+
+       private class ActiveTimeoutWorker implements Runnable
+       {
+               public void run()
+               {
+                       synchronized (_map) {
+                               while (_activeTimeoutThread != null) {
+                                       try {
+                                               clearStale();
+                                               RoleBasedCacheNode<KeyType, DataType> firstNode = _timeoutList.peekFirst();
+                                               if (firstNode == null) {
+                                                       _map.wait();
+                                               } else {
+                                                       Date nextStale = firstNode.getInvalidationDate();
+                                                       long timeout = nextStale.getTime() - System.currentTimeMillis();
+                                                       if (timeout > 0) {
+                                                               _map.wait(timeout);
+                                                       }
+                                               }
+                                       } catch (InterruptedException ie) {
+                                       }
+                               }
+                       }
+               }
+       }
+}
\ No newline at end of file
diff --git a/kona/src/org/gffs/cache/TimeoutList.java b/kona/src/org/gffs/cache/TimeoutList.java
new file mode 100644 (file)
index 0000000..c59069b
--- /dev/null
@@ -0,0 +1,56 @@
+package org.gffs.cache;
+
+/*
+ * Copyright 2006 University of Virginia
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may
+ * obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ */
+
+class TimeoutList<KeyType, DataType> extends CacheList<KeyType, DataType>
+{
+       public TimeoutList()
+       {
+               super(RoleBasedCacheNode.ROLE_TIMEOUT);
+       }
+
+       @Override
+       public void insert(RoleBasedCacheNode<KeyType, DataType> node)
+       {
+               // We'll start at the end because most likely we're adding to the end
+               if (_tail == null) {
+                       // The list is empty
+                       _head = _tail = node;
+                       return;
+               }
+
+               RoleBasedCacheNode<KeyType, DataType> tmp;
+               for (tmp = _tail; tmp != null; tmp = tmp.getPrevious(_myRole)) {
+                       if (tmp.getInvalidationDate().compareTo(node.getInvalidationDate()) <= 0) {
+                               // current node invalidates before me, so I should go after him
+                               node.setPrevious(_myRole, tmp);
+                               node.setNext(_myRole, tmp.getNext(_myRole));
+                               tmp.setNext(_myRole, node);
+                               if (node.getNext(_myRole) == null) {
+                                       // Adding to the tail
+                                       _tail = node;
+                               } else {
+                                       node.getNext(_myRole).setPrevious(_myRole, node);
+                               }
+
+                               return;
+                       }
+               }
+
+               // We add to the head of the list
+               node.setNext(_myRole, _head);
+               _head.setPrevious(_myRole, node);
+               _head = node;
+       }
+}
\ No newline at end of file
diff --git a/kona/src/org/gffs/cache/improvements_readme.txt b/kona/src/org/gffs/cache/improvements_readme.txt
new file mode 100644 (file)
index 0000000..227eb78
--- /dev/null
@@ -0,0 +1,18 @@
+
+
+it would be nice to fix the role based cache node and how these classes
+are intertwingled.
+it seems better to have some interfaces for the different concerns, like timeout and
+ordering.
+and then to inherit interfaces to build the real object as a composite,
+rather than stuffing all characteristics any cache node has into one object,
+the role based cache node.
+
+also the structures like cachelist are not separable from the stored type right now.
+
+these guys DO store any kind of type that you can implement with the parts,
+and they do it well, but the internal organization could be cleaned a bit and i think
+it would reduce the code size and also the complexity of the objects.
+
+
+
diff --git a/kona/src/org/gffs/compression/PackTar.java b/kona/src/org/gffs/compression/PackTar.java
new file mode 100644 (file)
index 0000000..c7a4cdd
--- /dev/null
@@ -0,0 +1,150 @@
+package org.gffs.compression;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
+import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
+import org.apache.commons.compress.compressors.gzip.GzipCompressorOutputStream;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.filefilter.IOFileFilter;
+import org.apache.commons.io.filefilter.TrueFileFilter;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class PackTar
+{
+       static private Log _logger = LogFactory.getLog(PackTar.class);
+
+       /**
+        * returns the "longName" with the "prefix" stripped off the front. if the name doesn't contain the prefix, then an error is thrown.
+        */
+       public static String stripOutPrefix(String prefix, String longName) throws IOException
+       {
+               int indy = longName.indexOf(prefix, 0);
+               if (indy < 0)
+                       throw new IOException("failure to find prefix in string, prefix=" + prefix + " and string=" + longName);
+               return longName.substring(indy + prefix.length());
+       }
+
+       /**
+        * adds the file pointed at by "source" to the archive in "tarOut" as a compressed file. if "source" is a directory, then the directory is
+        * recursed and added. the names in the archive will be stripped of the "prefix" string before being added to the archive; for example, if
+        * the source path "/home/fred/wumpus" and the prefix string is "/home/fred/", then the files in the archive will be named "wumpus/A",
+        * "wumpus/B", etc.
+        */
+       public static void compressArchive(ArchiveOutputStream tarOut, String prefix, File source) throws IOException
+       {
+               _logger.debug("entered into compress archive on source " + source + " prefix " + prefix + " and tarout " + tarOut);
+
+               if (!source.exists()) {
+                       // Don't unpack into an existing directory.
+                       String msg = "Directory " + source.getAbsolutePath() + " doesn't exist yet. Cannot pack.";
+                       _logger.error(msg);
+                       throw new IOException(msg);
+               }
+
+               // traverse the whole tree of the directory (or just write this directly if it's a file).
+               if (source.isFile()) {
+                       String choppedName = stripOutPrefix(prefix, source.getPath());
+                       if (_logger.isDebugEnabled())
+                               _logger.debug("adding a file to the archive (chopped): " + choppedName);
+                       ArchiveEntry f = tarOut.createArchiveEntry(source, choppedName);
+                       tarOut.putArchiveEntry(f);
+                       IOUtils.copy(new FileInputStream(source), tarOut);
+                       tarOut.closeArchiveEntry();
+               } else if (source.isDirectory()) {
+                       // traverse the directory tree at just this height and add everything recursively.
+
+                       if (_logger.isDebugEnabled())
+                               _logger.debug("iterating over a directory to add its contents to the archive: " + source);
+
+                       Iterator<File> spidey = FileUtils.iterateFiles(source, new IOFileFilter()
+                       {
+                               @Override
+                               public boolean accept(File arg0)
+                               {
+                                       return true;
+                               }
+
+                               @Override
+                               public boolean accept(File arg0, String arg1)
+                               {
+                                       return true;
+                               }
+                       }, TrueFileFilter.INSTANCE);
+
+                       File item = null;
+                       while (spidey.hasNext()) {
+                               item = spidey.next();
+
+                               if (_logger.isTraceEnabled())
+                                       _logger.debug("recursing on item: " + item);
+
+                               compressArchive(tarOut, prefix, item);
+                       }
+               } else {
+                       String msg = "source is not a file or directory although it exists.  unknown how to process.";
+                       _logger.error(msg);
+                       throw new IOException(msg);
+               }
+
+       }
+
+       /**
+        * returns the same string as presented, but ensures that the last character will be a directory separator (/).
+        */
+       public static String findAppropriatePrefix(String toChop)
+       {
+               if (toChop.endsWith("/"))
+                       return toChop; // already ready.
+               else
+                       return toChop + "/"; // add a slash on the end.
+       }
+
+       public synchronized static void compressTarGZ(File tarFile, File dest) throws IOException
+       {
+               TarArchiveOutputStream tarOut = new TarArchiveOutputStream(new GzipCompressorOutputStream(new FileOutputStream(tarFile)));
+               compressArchive(tarOut, findAppropriatePrefix(dest.getPath()), dest);
+               tarOut.close();
+       }
+
+       public synchronized static void compressTar(File tarFile, File dest) throws IOException
+       {
+               TarArchiveOutputStream tarOut = new TarArchiveOutputStream(new FileOutputStream(tarFile));
+               compressArchive(tarOut, findAppropriatePrefix(dest.getPath()), dest);
+               tarOut.close();
+       }
+
+       public synchronized static void compressZip(File zipFile, File dest) throws IOException
+       {
+               ZipArchiveOutputStream tarOut = new ZipArchiveOutputStream(new FileOutputStream(zipFile));
+               compressArchive(tarOut, findAppropriatePrefix(dest.getPath()), dest);
+               tarOut.close();
+       }
+
+       static public void main(String[] args) throws Throwable
+       {
+               // future: could use our cool code above to handle any archive type they pass.
+               if (args.length != 2) {
+                       System.err.println("USAGE: PackTar {tar.gz file} {source location}");
+                       System.exit(1);
+               }
+
+               try {
+                       PackTar.compressTarGZ(new File(args[0]), new File(args[1]));
+               } catch (Throwable t) {
+                       _logger.error("failed to compress tar file " + args[0] + " from " + args[1]);
+                       System.exit(1);
+               }
+
+               System.out.println("successfully compressed archive file " + args[0] + " from " + args[1]);
+       }
+
+}
diff --git a/kona/src/org/gffs/compression/UnpackTar.java b/kona/src/org/gffs/compression/UnpackTar.java
new file mode 100644 (file)
index 0000000..596b0e3
--- /dev/null
@@ -0,0 +1,168 @@
+package org.gffs.compression;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.nio.file.attribute.PosixFilePermission;
+import java.util.HashSet;
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveInputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
+import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class UnpackTar
+{
+       static private Log _logger = LogFactory.getLog(UnpackTar.class);
+
+       /**
+        * takes a tar.gz file as the "tarFile" parameter, then decompresses and unpacks the file into the "dest" location.
+        */
+
+       public enum archiveType {
+               TAR,
+               ZIP,
+               TGZ
+       };
+
+       public static void uncompressArchive(ArchiveInputStream tarIn, File dest, archiveType archType, boolean grantUserPermsToGroup)
+               throws IOException
+       {
+
+               if (dest.exists()) {
+                       // Don't unpack into an existing directory
+                       throw new IOException("Directory " + dest.getAbsolutePath() + " already exists. Unpacking exiting");
+               }
+               dest.mkdir();
+
+               ArchiveEntry tarEntry = tarIn.getNextEntry();
+               while (tarEntry != null) {
+                       // New code by ASG 2016-02-21. Added extracting user permission bits and OR ing them with group permissions
+                       int mode = 0;
+                       int defaultMode = 0750; // assume somewhat standard executable permissions if we cannot get the mode.
+                       switch (archType) {
+                               case TAR:
+                               case TGZ:
+                                       mode = ((TarArchiveEntry) tarEntry).getMode();
+                                       break;
+                               case ZIP:
+                                       mode = ((ZipArchiveEntry) tarEntry).getUnixMode();
+                                       break;
+                       }
+                       if (mode == 0) {
+                               mode = defaultMode;
+                       }
+                       if (_logger.isTraceEnabled())
+                               _logger.debug("The mode on '" + tarEntry.getName() + "' is " + Integer.toOctalString(mode));
+                       if (grantUserPermsToGroup) {
+                               int temp = mode & 0700;
+                               temp = temp / 8; // Shift it right 3 bit positions
+                               mode = mode | temp;
+                               if (_logger.isTraceEnabled())
+                                       _logger.debug("Now mode on '" + tarEntry.getName() + "' is " + Integer.toOctalString(mode));
+                       }
+                       // End of extracting and Or ing the permission bits.
+
+                       // create a file with the same name as the tarEntry
+                       File destPath = new File(dest, tarEntry.getName());
+                       if (_logger.isTraceEnabled())
+                               _logger.debug("working on: " + destPath.getCanonicalPath());
+                       if (tarEntry.isDirectory()) {
+                               destPath.mkdirs();
+                       } else {
+                               destPath.createNewFile();
+
+                               // byte [] btoRead = new byte[(int)tarEntry.getSize()];
+                               byte[] btoRead = new byte[8192];
+                               BufferedOutputStream bout = new BufferedOutputStream(new FileOutputStream(destPath));
+                               int len = 0;
+                               boolean wroteAnything = false;
+                               while ((len = tarIn.read(btoRead)) != -1) {
+                                       if (_logger.isTraceEnabled())
+                                               _logger.debug("read " + len + " bytes");
+                                       wroteAnything = true;
+                                       bout.write(btoRead, 0, len);
+                               }
+                               if (!wroteAnything) {
+                                       _logger.error("zero bytes read from: " + destPath.getCanonicalPath());
+                               }
+
+                               bout.close();
+                       }
+                       // using PosixFilePermission to set file permissions that we extracted earlier.
+                       HashSet<PosixFilePermission> perms = new HashSet<PosixFilePermission>();
+                       // add owners permission
+                       if ((mode & 0400) != 0)
+                               perms.add(PosixFilePermission.OWNER_READ);
+                       if ((mode & 0200) != 0)
+                               perms.add(PosixFilePermission.OWNER_WRITE);
+                       if ((mode & 0100) != 0)
+                               perms.add(PosixFilePermission.OWNER_EXECUTE);
+                       // add group permissions
+                       if ((mode & 0040) != 0)
+                               perms.add(PosixFilePermission.GROUP_READ);
+                       if ((mode & 0020) != 0)
+                               perms.add(PosixFilePermission.GROUP_WRITE);
+                       if ((mode & 0010) != 0)
+                               perms.add(PosixFilePermission.GROUP_EXECUTE);
+                       // add others permissions
+                       if ((mode & 0004) != 0)
+                               perms.add(PosixFilePermission.OTHERS_READ);
+                       if ((mode & 0002) != 0)
+                               perms.add(PosixFilePermission.OTHERS_WRITE);
+                       if ((mode & 0001) != 0)
+                               perms.add(PosixFilePermission.OTHERS_EXECUTE);
+
+                       Files.setPosixFilePermissions(Paths.get(destPath.getCanonicalPath()), perms);
+                       tarEntry = tarIn.getNextEntry();
+               }
+               tarIn.close();
+       }
+
+       public synchronized static void uncompressTarGZ(File tarFile, File dest, boolean grantUserPermsToGroup) throws IOException
+       {
+               TarArchiveInputStream tarIn = new TarArchiveInputStream(new GzipCompressorInputStream(new FileInputStream(tarFile)));
+
+               uncompressArchive(tarIn, dest, archiveType.TGZ, grantUserPermsToGroup);
+       }
+
+       public synchronized static void uncompressTar(File tarFile, File dest, boolean grantUserPermsToGroup) throws IOException
+       {
+               TarArchiveInputStream tarIn = new TarArchiveInputStream(new FileInputStream(tarFile));
+
+               uncompressArchive(tarIn, dest, archiveType.TAR, grantUserPermsToGroup);
+       }
+
+       public synchronized static void uncompressZip(File zipFile, File dest, boolean grantUserPermsToGroup) throws IOException
+       {
+               ZipArchiveInputStream tarIn = new ZipArchiveInputStream(new FileInputStream(zipFile));
+
+               uncompressArchive(tarIn, dest, archiveType.ZIP, grantUserPermsToGroup);
+       }
+
+       static public void main(String[] args) throws Throwable
+       {
+               if (args.length != 2) {
+                       System.err.println("USAGE: UnpackTar {tar.gz file} {output location}");
+                       System.exit(1);
+               }
+
+               try {
+                       UnpackTar.uncompressTarGZ(new File(args[0]), new File(args[1]), false);
+               } catch (Throwable t) {
+                       _logger.error("failed to uncompress tar file " + args[0] + " into " + args[1]);
+                       System.exit(1);
+               }
+
+               System.out.println("successfully uncompressed tar file " + args[0] + " into " + args[1]);
+       }
+
+}
diff --git a/kona/src/org/gffs/filesystem/FileSystemHelper.java b/kona/src/org/gffs/filesystem/FileSystemHelper.java
new file mode 100644 (file)
index 0000000..fd6fccc
--- /dev/null
@@ -0,0 +1,89 @@
+package org.gffs.filesystem;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class FileSystemHelper
+{
+       private static Log _logger = LogFactory.getLog(FileSystemHelper.class);
+
+       /**
+        * returns true if the path contains a symbolic link anywhere within it.
+        */
+       public static boolean pathContainsLink(String path) throws FileNotFoundException
+       {
+               // replace any backslashes with forward slashes.
+               path = path.replaceAll("\\+", "/");
+
+               // make sure path is absolute.
+               if (!path.startsWith("/")) {
+                       String msg = "path passed in was not absolute: '" + path + "'";
+                       _logger.error(msg);
+                       throw new FileNotFoundException(msg);
+               }
+
+               // replace any double slashes with single ones.
+               path = path.replaceAll("//", "/");
+               String[] components = path.split("/");
+
+               String currentPath = ""; // never expected to be a link.
+               for (String component : components) {
+                       currentPath = currentPath + "/" + component;
+                       if (isFileSymLink(new File(currentPath))) {
+                               return true;
+                       }
+               }
+               return false;
+
+               /*
+                * future: this could be more useful if it returned the position of the link as a component in path, but then we also need to accept a
+                * starting point for the link searching so they can find all of them.
+                */
+       }
+
+       /**
+        * returns true if the path specified is actually a symbolic link.
+        */
+       public static boolean isFileSymLink(File path)
+       {
+               Path nioPath = path.toPath();
+               return Files.isSymbolicLink(nioPath);
+       }
+
+       /**
+        * makes the string a more friendly filesystem path for java. this includes turning backslashes into forward slashes.
+        */
+       public static String sanitizeFilename(String toClean)
+       {
+               if (toClean == null)
+                       return toClean; // can't fix nothing.
+               // _logger.debug("before='" + toClean + "'");
+
+               String toReturn = toClean.replace('\\', '/');
+
+               // future: any other cleanings we should do on the path?
+
+               // _logger.debug("after='" + toReturn + "'");
+               return toReturn;
+       }
+
+       static public void main(String[] args)
+       {
+               String uglyPath = "C:\\Program Files\\GenesisII\\";
+               String fixedPath = sanitizeFilename(uglyPath);
+               String expectedPath = "C:/Program Files/GenesisII/";
+               if (!fixedPath.equals(expectedPath)) {
+                       System.err.println("FAILURE IN PARSING!  result is not right: '" + fixedPath + "' when it should be '" + expectedPath);
+                       System.exit(1);
+               } else {
+                       System.err.println("parsing occurred as expected.");
+               }
+               System.exit(0);
+       }
+
+}
diff --git a/kona/src/org/gffs/io/IOUtils.java b/kona/src/org/gffs/io/IOUtils.java
new file mode 100644 (file)
index 0000000..b71bac1
--- /dev/null
@@ -0,0 +1,109 @@
+package org.gffs.io;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.OutputStream;
+
+import org.apache.log4j.Logger;
+
+public class IOUtils
+{
+       static private Logger _logger = Logger.getLogger(IOUtils.class);
+
+       static final private int BUFFER_SIZE = 1024 * 8;
+
+       static private final int COPY_SIZE = 1024 * 8;
+
+       static public void copy(InputStream in, OutputStream out) throws IOException
+       {
+               byte[] data = new byte[BUFFER_SIZE];
+               int read;
+
+               while ((read = in.read(data)) > 0)
+                       out.write(data, 0, read);
+       }
+
+       static public void close(Closeable closeable)
+       {
+               if (closeable != null) {
+                       try {
+                               closeable.close();
+                       } catch (Throwable cause) {
+                               _logger.error("Error trying to close closeable item.", cause);
+                       }
+               }
+       }
+
+       static public void recursiveDelete(File target)
+       {
+               if (!target.exists())
+                       return;
+
+               if (target.isDirectory())
+                       for (File newTarget : target.listFiles())
+                               recursiveDelete(newTarget);
+
+               target.delete();
+       }
+
+       static public void copy(File source, File target) throws IOException
+       {
+               InputStream in = null;
+               OutputStream out = null;
+
+               try {
+                       in = new FileInputStream(source);
+                       out = new FileOutputStream(target);
+                       copy(in, out);
+               } finally {
+                       close(in);
+                       close(out);
+               }
+       }
+
+       static public void serialize(String filePath, Object obj) throws IOException
+       {
+               serialize(new File(filePath), obj);
+       }
+
+       static public void serialize(File target, Object obj) throws IOException
+       {
+               FileOutputStream fos = null;
+
+               try {
+                       fos = new FileOutputStream(target);
+                       ObjectOutputStream oos = new ObjectOutputStream(fos);
+                       oos.writeObject(obj);
+                       oos.close();
+               } finally {
+                       close(fos);
+               }
+       }
+
+       static public <Type> Type deserialize(Class<Type> cl, String sourcePath) throws FileNotFoundException, IOException
+       {
+               return deserialize(cl, new File(sourcePath));
+       }
+
+       static public <Type> Type deserialize(Class<Type> cl, File source) throws FileNotFoundException, IOException
+       {
+               FileInputStream fin = null;
+
+               try {
+                       fin = new FileInputStream(source);
+                       ObjectInputStream ois = new ObjectInputStream(fin);
+                       return cl.cast(ois.readObject());
+               } catch (ClassNotFoundException e) {
+                       throw new IOException("Unable to deserialize from file.", e);
+               } finally {
+                       close(fin);
+               }
+       }
+}
diff --git a/kona/src/org/gffs/network/DeadHostChecker.java b/kona/src/org/gffs/network/DeadHostChecker.java
new file mode 100644 (file)
index 0000000..4b4a833
--- /dev/null
@@ -0,0 +1,197 @@
+package org.gffs.network;
+
+import java.util.HashMap;
+
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+//import edu.virginia.vcgr.genii.client.ClientProperties;
+
+/**
+ * Manages a list of hosts that have proven to be down recently. Has support to not immediately fail the host, as this can lead to too quick
+ * an assumption that the host is down, but after N tries, the host is out. It will then be tested again periodically so we know when it comes
+ * back online.
+ */
+public class DeadHostChecker
+{
+       static private Log _logger = LogFactory.getLog(DeadHostChecker.class);
+       
+       //need better substitute for cli props.
+       public static class ClientProperties {
+               int timeout = 2 * 60 * 1000;  // 2 minutes timeout by default, in ms.
+               
+               ClientProperties() {
+               }
+       }
+       static public ClientProperties props;
+       
+
+       // this value specifies how many attempts can fail before the host is considered down.
+       static private final int HOW_MANY_DOWNS_ALLOWED = 1;
+
+       /*
+        * this is the longest amount of time between checking of dead hosts that we'll ever pause. exponential backoff will occur up until this
+        * delay time, and then stay at this delay time afterwards.
+        */
+       static private final int MAXIMUM_ALLOWABLE_CHECKING_DELAY = 60 * 1000 * 5; // current is 5 minutes max for exponential backoff on retries.
+
+       public static class HostKey
+       {
+               public String hostname;
+               public int port;
+
+               HostKey(String hostname, int port)
+               {
+                       this.hostname = hostname;
+                       this.port = port;
+               }
+
+               @Override
+               public int hashCode()
+               {
+                       return new HashCodeBuilder(37, 839). // two randomly chosen prime numbers
+                       // if deriving: appendSuper(super.hashCode()).
+                               append(hostname).append(port).toHashCode();
+               }
+
+               @Override
+               public boolean equals(Object o)
+               {
+                       if (!(o instanceof HostKey))
+                               return false; // wrong object.
+                       HostKey realo = (HostKey) o;
+                       return realo.hostname.equals(hostname) && (realo.port == port);
+               }
+
+               @Override
+               public String toString()
+               {
+                       return hostname + ":" + port;
+               }
+       }
+
+       static final HashMap<HostKey, RetryInfo> deadHosts = new HashMap<HostKey, RetryInfo>();
+
+       public static class RetryInfo
+       {
+               public long nextTime;
+               public int delay;
+               public int downCount = 0;
+
+               public RetryInfo()
+               {
+                       // We just failed, so base a delay on the overall timeout to delay our next attempt.
+                       delay = initialDelay();
+                       nextTime = System.currentTimeMillis() + delay;
+               }
+
+               int initialDelay()
+               {
+                       return props.timeout / 2;
+               }
+
+               boolean isThisHostDead()
+               {
+                       if (downCount < HOW_MANY_DOWNS_ALLOWED) {
+                               return false;
+                       }
+                       if (System.currentTimeMillis() > nextTime) {
+                               // this host is being allowed a retry.
+                               nextTime = System.currentTimeMillis() + delay;
+                               return false;
+                       }
+                       return true;
+               }
+
+               void recordDown()
+               {
+                       downCount++;
+               }
+       }
+
+       /**
+        * checks the host in our records and returns true if it is considered alive and false if it is considered dead.
+        */
+       public static boolean evaluateHostAlive(String host, int port)
+       {
+               HostKey key = new HostKey(host, port);
+
+               // Added July 14, 2015 by ASG to deal with dead hosts and not bother trying to talk to them. The timeouts kill us.
+               synchronized (deadHosts) {
+                       if (deadHosts.containsKey(host)) {
+                               RetryInfo inf = deadHosts.get(key);
+                               if (inf == null) {
+                                       _logger.warn("logic error: dead hosts list said it had host " + key + " was listed but we got a null record for it.");
+                                       return true;
+                               }
+                               return !inf.isThisHostDead();
+                       } else {
+                               // up as far as we know; no record exists.
+                               if (_logger.isTraceEnabled())
+                                       _logger.debug("host " + key + " is fine as far as we know.");
+                               return true;
+                       }
+               }
+       }
+
+       public static void addHostToDeadPool(String host, int port)
+       {
+               HostKey key = new HostKey(host, port);
+
+               synchronized (deadHosts) {
+                       RetryInfo inf = deadHosts.get(key);
+                       if (inf == null) {
+                               // Not there, set it up and add it.
+                               inf = new RetryInfo();
+                               deadHosts.put(key, inf);
+                       }
+
+                       boolean alreadyDead = false;
+                       if (inf.isThisHostDead()) {
+                               // this one is already down so expand the timeout.
+                               if (_logger.isDebugEnabled())
+                                       _logger.warn("host " + key + " is considered dead already; increasing delay.");
+                               inf.delay *= 2;
+                               inf.nextTime = System.currentTimeMillis() + inf.delay;
+
+                               if (inf.delay > MAXIMUM_ALLOWABLE_CHECKING_DELAY) {
+                                       inf.delay = MAXIMUM_ALLOWABLE_CHECKING_DELAY;
+                               }
+                               // flag this so we don't say something again below.
+                               alreadyDead = true;
+                       }
+
+                       // we definitely saw this host as down at least once, so record that now.
+                       inf.recordDown();
+
+                       if (!inf.isThisHostDead()) {
+                               // still up, although we needed to record that failure.
+                               if (_logger.isDebugEnabled())
+                                       _logger.debug("host " + key + " is not dead yet but suffered a connection problem.");
+                       } else {
+                               // this is dead now. say something about it if we didn't already.
+                               if (!alreadyDead && _logger.isDebugEnabled())
+                                       _logger.warn("host " + key + " is newly considered dead due to communication problems.");
+                       }
+               }
+       }
+
+       public static void removeHostFromDeadPool(String host, int port)
+       {
+               HostKey key = new HostKey(host, port);
+
+               // Well, the host was reported alive again, so remove if it is in deadHosts.
+               synchronized (deadHosts) {
+                       if (deadHosts.containsKey(key)) {
+                               if (_logger.isDebugEnabled()) {
+                                       // if it's not present, we don't say anything.
+                                       _logger.debug("host " + key + " is being removed from dead host pool.");
+                               }
+                               // drop it from the list.
+                               deadHosts.remove(key);
+                       }
+               }
+       }
+
+}
diff --git a/kona/src/org/gffs/network/URLDownloader.java b/kona/src/org/gffs/network/URLDownloader.java
new file mode 100644 (file)
index 0000000..f7c590f
--- /dev/null
@@ -0,0 +1,41 @@
+package org.gffs.network;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URL;
+import java.net.URLConnection;
+
+import org.gffs.io.IOUtils;
+
+public abstract class URLDownloader
+{
+       static final private int CONNECTION_TIMEOUT = 1000 * 8;
+       static final private int READ_TIMEOUT = 1000 * 8;
+
+       static public InputStream connect(URL url) throws IOException
+       {
+               URLConnection connection = url.openConnection();
+               connection.setConnectTimeout(CONNECTION_TIMEOUT);
+               connection.setReadTimeout(READ_TIMEOUT);
+               connection.connect();
+               return connection.getInputStream();
+       }
+
+       static public void download(URL source, File target) throws IOException
+       {
+               InputStream in = null;
+               OutputStream out = null;
+
+               try {
+                       in = connect(source);
+                       out = new FileOutputStream(target);
+                       IOUtils.copy(in, out);
+               } finally {
+                       IOUtils.close(in);
+                       IOUtils.close(out);
+               }
+       }
+}
\ No newline at end of file
diff --git a/kona/src/org/gffs/readme.txt b/kona/src/org/gffs/readme.txt
new file mode 100644 (file)
index 0000000..c67792f
--- /dev/null
@@ -0,0 +1,9 @@
+
+
+gffs.org is a hypothetical organization i invented in order to be able
+to use the GFFS assets from UVa without retaining the cumbersome and huge
+paths involved.
+those assets are released under the Apache ASL v2 license, and so they're
+provided here under the same license.
+
+
diff --git a/kona/src/org/gffs/version/Version.java b/kona/src/org/gffs/version/Version.java
new file mode 100644 (file)
index 0000000..3137099
--- /dev/null
@@ -0,0 +1,100 @@
+package org.gffs.version;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class Version implements Comparable<Version>
+{
+       static final private Pattern VERSION_PATTERN = Pattern.compile("^(\\d+)\\.(\\d+)\\.(\\d+)\\s+Build\\s+(\\d+)$");
+
+       private int _majorVersion;
+       private int _minorVersion;
+       private int _subMinorVersion;
+       private int _buildNumber;
+
+       public Version(int major, int minor, int subminor, int buildNumber)
+       {
+               _majorVersion = major;
+               _minorVersion = minor;
+               _subMinorVersion = subminor;
+               _buildNumber = buildNumber;
+       }
+
+       public Version(String str)
+       {
+               parseString(str);
+       }
+
+       public void parseString(String verString)
+       {
+               Matcher matcher = VERSION_PATTERN.matcher(verString);
+               if (!matcher.matches())
+                       throw new IllegalArgumentException("Version string wasn't of the form ###.###.### Build ####");
+
+               _majorVersion = Integer.parseInt(matcher.group(1));
+               _minorVersion = Integer.parseInt(matcher.group(2));
+               _subMinorVersion = Integer.parseInt(matcher.group(3));
+               _buildNumber = Integer.parseInt(matcher.group(4));
+       }
+
+       public Version(File propFile)
+       {
+               Properties props = new Properties();
+               try {
+                       FileInputStream fis = new FileInputStream(propFile);
+                       props.load(fis);
+                       String appVer = props.getProperty("genii.app-version");
+                       String buildNum = props.getProperty("genii.build-number");
+                       String formattedString = appVer + " Build " + buildNum;
+                       parseString(formattedString);
+               } catch (Throwable t) {
+                       throw new RuntimeException("failure to parse version from file: " + propFile, t);
+               }
+       }
+
+       public boolean equals(Version other)
+       {
+               return compareTo(other) == 0;
+       }
+
+       @Override
+       public boolean equals(Object other)
+       {
+               if (other instanceof Version)
+                       return equals((Version) other);
+
+               return false;
+       }
+
+       @Override
+       public int hashCode()
+       {
+               return (_majorVersion << 3) ^ (_minorVersion << 2) ^ (_subMinorVersion << 1) ^ _buildNumber;
+       }
+
+       @Override
+       public String toString()
+       {
+               return String.format("%d.%d.%d Build %d", _majorVersion, _minorVersion, _subMinorVersion, _buildNumber);
+       }
+
+       @Override
+       public int compareTo(Version o)
+       {
+               int diff = _majorVersion - o._majorVersion;
+               if (diff != 0)
+                       return diff;
+               diff = _minorVersion - o._minorVersion;
+               if (diff != 0)
+                       return diff;
+               diff = _subMinorVersion - o._subMinorVersion;
+               if (diff != 0)
+                       return diff;
+               return _buildNumber - o._buildNumber;
+       }
+
+       static public Version EMPTY_VERSION = new Version(0, 0, 0, 0);
+}
\ No newline at end of file
diff --git a/kona/src/org/gffs/version/VersionManager.java b/kona/src/org/gffs/version/VersionManager.java
new file mode 100644 (file)
index 0000000..16d9b50
--- /dev/null
@@ -0,0 +1,41 @@
+package org.gffs.version;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.gffs.application.ProgramTools;
+
+public class VersionManager
+{
+       static private final String VERSION_FILENAME = "current.version";
+
+       private Version _currentVersion;
+       private File _versionFile;
+
+       public VersionManager()
+       {
+               _versionFile = new File(ProgramTools.getInstallationDirectory(), VERSION_FILENAME);
+               _currentVersion = null;
+       }
+
+       public Version getCurrentVersion() throws IOException
+       {
+               if (_currentVersion == null) {
+                       // we go with the installer scheme to start with, where there's a current.version in the
+                       // top-level.
+                       if (!_versionFile.exists()) {
+                               // try failing over to the source code's version of the file inside the installer
+                               // directory.
+                               _versionFile = new File(ProgramTools.getInstallationDirectory(), "installer/" + VERSION_FILENAME);
+                               if (!_versionFile.exists()) {
+                                       _currentVersion = Version.EMPTY_VERSION;
+                               }
+                       }
+                       if (_versionFile.exists()) {
+                               _currentVersion = new Version(_versionFile);
+                       }
+               }
+
+               return _currentVersion;
+       }
+}