did not get the full set of new things again. argh. need better scripts for working on git branches.
--- /dev/null
+#!/bin/bash
+
+source $FEISTY_MEOW_SCRIPTS/buildor/seek_all_source.sh
+
+function strip_file {
+ file=$1
+ perl $FEISTY_MEOW_SCRIPTS/strip_cr.pl $file
+}
+
+#echo tempfile is $SOURCES_FOUND_LIST
+
+# this block has io redirected from the file to std in.
+while true; do
+ read line_found
+ if [ $? != 0 ]; then break; fi
+ chmod u+w "$line_found"
+done <$SOURCES_FOUND_LIST
+
+rm $SOURCES_FOUND_LIST # clean up.
+
--- /dev/null
+#!/bin/bash
+
+file="$1"; shift
+if [ ! -f "$file" ]; then
+ echo must pass filename on command line.
+ exit 3
+fi
+
+tempfile="$(mktemp "$TMP/zz_temp_codefix.XXXXXX")"
+
+#echo temp file is $tempfile
+
+cat "$file" \
+ | sed -e 's/command_line::__arg/application::__arg/g' \
+ | sed -e 's/IMPLEMENT_CLASS_NAME/DEFINE_CLASS_NAME/g' \
+ | sed -e 's/istring/astring/g' \
+ | sed -e 's/byte_format\([^t]\)/byte_formatter\1/g' \
+ | sed -e 's/isprintf/a_sprintf/g' \
+ | sed -e 's/portable::sleep_ms/time_control::sleep_ms/g' \
+ | sed -e 's/portable::env_string/environment::get/g' \
+ | sed -e 's/portable::launch_process/launch_process::run/g' \
+ | sed -e 's/portable::application_name/application_configuration::application_name/g' \
+ | sed -e 's/portable::process_id/application_configuration::process_id/g' \
+ | sed -e 's/log_base::platform_ending/parser_bits::platform_eol_to_chars/g' \
+ | sed -e 's/ithread/ethread/g' \
+ | sed -e 's/timed_object/timeable/g' \
+ | sed -e 's/utility::timestamp(/time_stamp::notarize(/g' \
+ | sed -e 's/anchor_window/hoople_service/g' \
+ | sed -e 's/basis::attach/structures::attach/g' \
+ | sed -e 's/basis::detach/structures::detach/g' \
+ | sed -e 's/portable::system_error/critical_events::system_error/g' \
+ | sed -e 's/basis::pack\([^a]\)/structures::pack_array\1/g' \
+ | sed -e 's/basis::unpack/structures::unpack_array/g' \
+ | sed -e 's/<data_struct/<structures/g' \
+ | sed -e 's/<basis\/set/<structures\/set/g' \
+ | sed -e 's/basis::set/structures::set/g' \
+ | sed -e 's/<basis\/object_base/<basis\/contracts/g' \
+ | sed -e 's/object_base/root_object/g' \
+ | sed -e 's/<basis\/function.h/<basis\/functions.h/g' \
+ | sed -e 's/^#include <basis\/portable.h> *$//g' \
+ | sed -e 's/^#include <basis\/log_base.h> *$//g' \
+ | sed -e 's/^#include <basis\/utility.h> *$//g' \
+ | sed -e 's/^#include <basis\/packable.h> *$//g' \
+ | sed -e 's/^#include <basis\/auto_synch.h> *$//g' \
+ | sed -e 's/class infoton_list;//g' \
+ | sed -e 's/^#include "[_a-zA-Z0-9]*_dll.h" *$//g' \
+ | sed -e 's/^#include "dll_[_a-zA-Z0-9]*.h" *$//g' \
+ | sed -e 's/^#ifndef .*IMPLEMENTATION_FILE *$//g' \
+ | sed -e 's/^#define .*IMPLEMENTATION_FILE *$//g' \
+ | sed -e 's/^#endif .*IMPLEMENTATION_FILE *$//g' \
+ | sed -e 's/convert_utf/utf_conversion/g' \
+ | sed -e 's/mechanisms\/time_stamp/timely\/time_stamp/g' \
+ | sed -e 's/mechanisms\/roller/structures\/roller/g' \
+ | sed -e 's/mechanisms\/safe_roller/processes\/safe_roller/g' \
+ | sed -e 's/basis.string_array/structures\/string_array/g' \
+ | sed -e 's/opsystem.application_shell/application\/application_shell/g' \
+ | sed -e 's/opsystem.filename/filesystem\/filename/g' \
+ | sed -e 's/opsystem.heavy_file_ops/filesystem\/heavy_file_ops/g' \
+ | sed -e 's/opsystem.huge_file/filesystem\/huge_file/g' \
+ | sed -e 's/opsystem.application_base/application\/base_application/g' \
+ | sed -e 's/opsystem.command_line/application\/command_line/g' \
+ | sed -e 's/opsystem.directory/filesystem\/directory/g' \
+ | sed -e 's/opsystem.rendezvous/application\/rendezvous/g' \
+ | sed -e 's/opsystem.singleton_application/application\/singleton_application/g' \
+ | sed -e 's/opsystem.timer_driver/timely\/timer_driver/g' \
+ | sed -e 's/opsystem.ini_config/configuration\/ini_configurator/g' \
+ | sed -e 's/opsystem.path_config/configuration\/application_config/g' \
+ | sed -e 's/opsystem.byte_filer/filesystem\/byte_filer/g' \
+ | sed -e 's/sockets.address/sockets\/internet_address/g' \
+ | sed -e 's/path_configuration/application_configuration/g' \
+ | sed -e 's/mechanisms.timer/timely\/stopwatch/g' \
+ | sed -e 's/mechanisms.ethread/processes\/ethread/g' \
+ | sed -e 's/mechanisms.safe_callback/processes\/safe_callback/g' \
+ | sed -e 's/mechanisms.thread_cabinet/processes\/thread_cabinet/g' \
+ | sed -e 's/basis.chaos/mathematics\/chaos/g' \
+ | sed -e 's/[A-Z_][A-Z_]*CLASS_STYLE //g' \
+ | sed -e 's/[A-Z_][A-Z_]*FUNCTION_STYLE //g' \
+ | sed -e 's/\([^:]\)u_int/\1basis::u_int/g' \
+ | sed -e 's/\([^:]\)u_short/\1basis::u_short/g' \
+ | sed -e 's/class astring;/#include <basis\/astring.h>/g' \
+ | sed -e 's/class int_set;/#include <structures\/set.h>/g' \
+ | sed -e 's/class int_roller;/#include <structures\/roller.h>/g' \
+ | sed -e 's/class outcome;/#include <basis\/outcome.h>/g' \
+ | sed -e 's/class mutex;/#include <basis\/mutex.h>/g' \
+ | sed -e 's/class ethread;/#include <processes\/ethread.h>/g' \
+ | sed -e 's/class byte_filer;/#include <filesystem\/byte_filer.h>/g' \
+ | sed -e 's/class string_array;/#include <structures\/string_array.h>/g' \
+ | sed -e 's/class string_table;/#include <structures\/string_table.h>/g' \
+ | sed -e 's/class byte_array;/#include <basis\/byte_array.h>/g' \
+ | sed -e 's/class string_set;/#include <structures\/set.h>/g' \
+ | sed -e 's/class time_stamp;/#include <timely\/time_stamp.h>/g' \
+ | sed -e 's/class directory_tree;/#include <filesystem\/directory_tree.h>/g' \
+ | sed -e 's/class filename_list;/#include <filesystem\/filename_list.h>/g' \
+ | sed -e 's/class chaos;/#include <mathematics\/chaos.h>/g' \
+ | sed -e 's/class configurator;/#include <configuration\/configurator.h>/g' \
+ | sed -e 's/class unique_int;/#include <structures\/unique_id.h>/g' \
+ | sed -e 's/class tcpip_stack;/#include <sockets\/tcpip_stack.h>/g' \
+ | sed -e 's/class safe_roller;/#include <processes\/safe_roller.h>/g' \
+ | sed -e 's/class blowfish_crypto;/#include <crypto\/blowfish_crypto.h>/g' \
+ | sed -e 's/class RSA_crypto;/#include <crypto\/RSA_crypto.h>/g' \
+ | sed -e 's/class entity_data_bin;/#include <octopus\/entity_data_bin.h>/g' \
+ | sed -e 's/class infoton;/#include <octopus\/infoton.h>/g' \
+ | sed -e 's/class octopus_request_id;/#include <octopus\/entity_defs.h>/g' \
+ | sed -e 's/class internet_address;/#include <sockets\/internet_address.h>/g' \
+ | sed -e 's/class machine_uid;/#include <sockets\/machine_uid.h>/g' \
+ | sed -e 's/class spocket;/#include <sockets\/spocket.h>/g' \
+ | sed -e 's/class encryption_tentacle;/#include <tentacles\/encryption_tentacle.h>/g' \
+ | sed -e 's/class login_tentacle;/#include <tentacles\/login_tentacle.h>/g' \
+ | sed -e 's/class thread_cabinet;/#include <processes\/thread_cabinet.h>/g' \
+ | sed -e 's/RSA_crypto/rsa_crypto/g' \
+ | sed -e 's/float_plus<double>/double_plus/g' \
+ | sed -e 's/basis::obscure_/structures::obscure_/g' \
+ | sed -e 's/program_wide_logger()/program_wide_logger::get()/g' \
+ | sed -e 's/textual.tokenizer/configuration\/tokenizer/g' \
+ | sed -e 's/\([^_]\)tokenizer/\1variable_tokenizer/g' \
+ | sed -e 's/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/[\/]*/\/\/\/\/\/\/\/\/\/\/\/\/\/\//g' \
+ >"$tempfile"
+
+mv "$tempfile" "$file"
+
+
--- /dev/null
+#!/bin/bash
+# this script takes one microsoft compiler output file and tries to find the
+# mangled C++ function names contained in it.
+file=$1
+dumpbin //all "$file" | grep SECREL | sed -e 's/^.*0000C *[0-9a-fA-F][0-9a-fA-F] *\([^ ]*\).*$/\1/'
+
--- /dev/null
+#!/bin/bash
+
+filename="$1"; shift
+function_name="$1"; shift
+
+good_path="$(cygpath -w -s $filename)"
+
+#/exports
+dumpbin /all $good_path | grep -q -i "$function_name"
+if [ $? -eq 0 ]; then
+ echo "Found $function_name in $filename"
+fi
+
+
+
--- /dev/null
+#!/bin/bash
+# this simple script kills off some troublesome processes in preparation for a new build
+# with visual studio.
+zap_process.exe msbuild.exe
+zap_process.exe mspdbsrv.exe
+
--- /dev/null
+#!/bin/bash
+
+# this script rebuilds the bookmarks files. it requires the variables:
+# WEBBED_SITES: points at the root of the web hierarchy.
+
+export GRUNTOSE_DIR=$WEBBED_SITES/gruntose.com
+
+rootname=$HOME/generated
+suffix=.html
+norm_add=_marks
+js_add=_js_tree_marks
+moz_add=_moz_bookmarks
+
+newmarx=${rootname}_links.csv
+genlinx=$rootname$norm_add$suffix
+genlinx_js=$rootname$js_add$suffix
+genlinx_moz=$rootname$moz_add$suffix
+
+if [ -f $genlinx ]; then rm $genlinx; fi
+if [ -f $newmarx ]; then rm $newmarx; fi
+if [ -f $genlinx_js ]; then rm $genlinx_js; fi
+if [ -f $genlinx_moz ]; then rm $genlinx_moz; fi
+
+$RUNTIME_PATH/binaries/marks_sorter -i $GRUNTOSE_DIR/Info/Twain/links_db.csv -o $newmarx
+if [ $? != 0 ]; then
+ echo error during sorting of the bookmarks.
+ exit 1
+fi
+
+$RUNTIME_PATH/binaries/marks_maker -i $GRUNTOSE_DIR/Info/Twain/links_db.csv -t $GRUNTOSE_DIR/Info/Twain/marks_template.html -o $genlinx -s human
+if [ $? != 0 ]; then
+ echo error during creation of the normal web page of bookmarks.
+ exit 1
+fi
+
+$RUNTIME_PATH/binaries/marks_maker -i $GRUNTOSE_DIR/Info/Twain/links_db.csv -t $GRUNTOSE_DIR/Info/Twain/marks_template.html -o $genlinx_moz -s mozilla
+if [ $? != 0 ]; then
+ echo error during creation of the mozilla format page of bookmarks.
+ exit 1
+fi
+
+$RUNTIME_PATH/binaries/js_marks_maker -i $GRUNTOSE_DIR/Info/Twain/links_db.csv -t $GRUNTOSE_DIR/Info/Twain/js_template.html -o $genlinx_js
+if [ $? != 0 ]; then
+ echo error during creation of the javascript bookmark page.
+ exit 1
+fi
+
+\mv -f $genlinx $genlinx_moz $genlinx_js $GRUNTOSE_DIR/Info/Twain
+\mv -f $newmarx $TMP
+
--- /dev/null
+
+# these metrics are how bogged down we are in to-do type items.
+
+REPORT_FILE="$HOME/cloud/fred_stats/overload_history.txt"
+
+# given a path, this will find how many items are under it, ignoring svn and git files, plus
+# other patterns we happen to notice are not useful.
+function calculate_count()
+{
+ local dir="$1"; shift
+ local count=$(find "$dir" -type f -exec echo \"{}\" ';' 2>/dev/null | grep -v "\.svn" | grep -v "\.git"| grep -v "\.basket" | grep -v "\.version" | grep -v "\.keep" | wc -l | tr -d ' ')
+ if [ -z "$count" ]; then echo 0; else echo "$count"; fi
+}
+
+# calculates the size in kilobytes of all the note files in a hierarchy.
+# this is just a raw statistic for how much content all those notes make up. since
+# we have not separated out all the to-dos in some files (most notably the metaverse
+# backlogs and to-do lists), it's good to also know what kind of girth the notes have.
+function calculate_weight()
+{
+ local dir="$1"; shift
+ local weight=$(find "$dir" -type f -exec echo \"{}\" ';' 2>/dev/null | grep -v "\.svn" | grep -v "\.git"| grep -v "\.basket" | grep -v "\.version" | grep -v "\.keep" | xargs ls -al | awk '{ print $5 }' | paste -sd+ | bc 2>/dev/null)
+ if [ -z "$weight" ]; then echo 0; else echo "$weight"; fi
+}
+
+# calculate_complexity gets a very simple metric of how many directory components are
+# present at the target location and below.
+function calculate_complexity()
+{
+ local dir="$1"; shift
+ local complexity=$(find "$dir" -type d | wc -l)
+ if [ -z "$complexity" ]; then echo 0; else echo "$complexity"; fi
+}
+
+# produces a report line in our format.
+function format_report_line()
+{
+ local count="$1"; shift
+ local weight="$1"; shift
+ weight=$((weight / 1024))
+ local complexity="$1"; shift
+ echo "$count\t${complexity}\t\t${weight}\t\t$*\n"
+}
+
+# two parameters are needed: the directory to sum up and the label to use for it in the report.
+# this will calculate the count and weight for a hierarchy of notes, and then produce a
+# line of reporting for those.
+function analyze_hierarchy_and_report()
+{
+ local dir="$1"; shift
+ local label="$1"; shift
+ local count=$(calculate_count "$dir")
+ total_overload=$(($count + $total_overload))
+ local weight=$(calculate_weight "$dir")
+ total_weight=$(($total_weight + $weight))
+ local complexity=$(calculate_complexity "$dir")
+ total_complexity=$(($total_complexity + $complexity))
+ full_report+=$(format_report_line "$count" "$weight" "$complexity" "$label")
+}
+
+# scans through items in the notes folder that begin with a pattern.
+# each of those is treated as an aggregable portion of the report.
+# first parameter is the title in the report, second and so on are
+# a list of directory patterns to scan and aggregate.
+function analyze_by_dir_patterns()
+{
+ local title="$1"; shift
+ local hier_count=0
+ local hier_weight=0
+ local hier_complexity=0
+ for folder in $@; do
+ temp_count=$(calculate_count $folder)
+ hier_count=$(($hier_count + $temp_count))
+ temp_weight=$(calculate_weight $folder)
+ hier_weight=$(($hier_weight + $temp_weight))
+ temp_complexity=$(calculate_complexity $folder)
+ hier_complexity=$(($hier_complexity + $temp_complexity))
+ done
+ total_overload=$(($hier_count + $total_overload))
+ total_weight=$(($total_weight + $hier_weight))
+ total_complexity=$(($total_complexity + $hier_complexity))
+ full_report+=$(format_report_line "$hier_count" "$hier_weight" "$hier_complexity" "$title")
+}
+
+##############
+
+# reset these before we add anything...
+total_overload=0
+total_weight=0
+
+# start out the report with a header.
+full_report="\
+\n\
+current information overload consists of:\n\
+\n\
+"
+full_report+="count\tcomplexity\tweight (kb)\tcategory\n\
+================================================================\n\
+"
+
+analyze_hierarchy_and_report ~/cloud/urgent "high priority"
+
+# notes are individual files of tasks, usually, although some are combined.
+analyze_hierarchy_and_report ~/cloud/grunty_notes "grunty notes (external facing things?)"
+
+# feisty notes are about feisty meow(r) concerns ltd codebase development.
+analyze_hierarchy_and_report ~/cloud/feisty_notes "feisty meow notes (code related)"
+
+# home notes are a new top-level category; used to be under the grunty.
+analyze_hierarchy_and_report ~/cloud/branch_road "hearth and home notes (branch road, yo!)"
+
+# games and fun stuff. not sure why these count as backlog, but whatever.
+analyze_hierarchy_and_report ~/cloud/games_yo "games, yo!"
+
+# some source code that needs to be sucked into other places, other codebases. they are not
+# supposed to pile up here. but they have, so we track them.
+analyze_hierarchy_and_report ~/cloud/scavenging_source "source scavenging"
+
+# and then count up the things that we think will be cleaned soon, but one thing we have learned
+# unsorted files haven't been categorized yet.
+analyze_hierarchy_and_report ~/cloud/disordered "unsorted files"
+
+# we now consider the backlog of things to read to be a relevant fact. this is going to hose
+# up our weight accounting considerably.
+analyze_hierarchy_and_report ~/cloud/reading "reading list"
+
+# scan all the items declared as active projects.
+analyze_by_dir_patterns "active items" ~/cloud/active*
+
+# scan across all appropriately named project or research folders that live in the "cloud".
+analyze_by_dir_patterns "project files" ~/cloud/project* ~/cloud/research*
+
+# snag any work related items for that category.
+analyze_by_dir_patterns "job and work tasks" ~/cloud/job*
+
+# scan all the trivial project folders.
+analyze_by_dir_patterns "trivial items" ~/cloud/trivia*
+
+full_report+="================================================================\n\
+"
+full_report+="$(format_report_line "$total_overload" "$total_weight" "$total_complexity" "total overload")"
+full_report+="\n\
+[gathered on $(date)]\n\n\
+##############"
+
+echo -e "$full_report" | tee -a "$REPORT_FILE"
+
--- /dev/null
+#!/bin/sh
+
+if [ ! -f $1 ]; then
+ echo "Usage: $0 "
+ echo "This script needs a file to sort and a new name for the file after sorting."
+ exit 1
+fi
+
+echo "Sorting $1 into $2"
+
+sort -t ' ' -k 4.9,4.12n -k 4.5,4.7M -k 4.2,4.3n -k 4.14,4.15n -k 4.17,4.18n -k 4.20,4.21n $1 > $2
+
+
--- /dev/null
+#!/bin/bash
+
+sourcedir="$1"; shift
+targetdir="$1"; shift
+
+# where we will look for mtp devices.
+mtp_base_path="/run/user/$UID/gvfs/mtp\:host\="
+
+if [ -z "$sourcedir" -o -z "$targetdir" ]; then
+ echo "This script needs source and target directory names that can be synched"
+ echo "between the computer's file system and a USB drive mounted with the mtp"
+ echo "protocol. The folder on the USB drive should include the entire path except"
+ echo "for the device mount location. For example:"
+ echo " $(basename $0) ebooks \"/Internal\ Storage/My\ Files/ebooks\""
+ exit 1
+fi
+
+# the mtp part will flux. if there is more than one device mounted, this will hose up.
+#checking for more than one device there:
+mtpdevices=("$mtp_base_path"*)
+if [ ${#mtpdevices[@]} -ne 1 ]; then
+ echo "There is more than one MTP device mounted. This script requires exactly one"
+ echo "MTP device mounted at a time. Sorry."
+ exit 1
+elif [ ! -d "${#mtpdevices[@]}" ]; then
+ echo "The MTP device does not seem to be mounted currently. The path did not"
+ echo "expand properly."
+ exit 1
+fi
+
+rsync -rv --exclude *.git --exclude *.svn "$sourcedir" "${mtpdevices[0]}/$targetdir"
+
--- /dev/null
+#!/usr/bin/python
+
+class phrase_replacer:
+ """ A simple replacement tool that honors some C/C++ syntax when replacing.
+
+ This will take a particular phrase given by the user and find it in a set of
+ documents. That phrase will be replaced when it appears completely, and is not
+ in a C or C++ style comment (// or /* ... */). It also must be clear of any
+ other alphanumeric pollution, and only be surrounded by white space or operation
+ characters.
+ """
+
+ def __init__(self, argv):
+ """ Initializes the class with a set of arguments to work with.
+
+ The arguments need to be in the form described by print_instructions().
+ """
+ self.arguments = argv
+ # we have three states for the processing: consuming normal code (not within a comment),
+ # consuming a single line comment, and consuming a multi-line comment.
+ self.EATING_NORMAL_TEXT = 0
+ self.EATING_ONELINE_COMMENT = 1
+ self.EATING_MULTILINE_COMMENT = 2
+
+ def print_instructions(self):
+ """ Shows the instructions for using this class. """
+ print("""
+This script will replace all occurrences of a phrase you specify in a set of files. The
+replacement process will be careful about C and C++ syntax and will not replace occurrences
+within comments or which are not "complete" phrases (due to other alpha-numeric characters
+that abut the phrase). The arguments to the script are:
+
+ {0}: PhraseToReplace ReplacementPhrase File1 [File2 ...]
+
+For example, if the phrase to replace is Goop, it will be replaced in these contexts:
+ Goop[32]
+ molo-Goop
+ *Goop
+but it will not be found in these contexts:
+ // doop de Goop
+ rGoop
+ Goop23
+""".format(self.arguments[0]))
+
+ def validate_and_consume_command_line(self):
+ """ Performs command line argument handling. """
+ arg_count = len(self.arguments)
+# for i in range(1, arg_count):
+# print("i is {0}, arg is {1}".format(i, self.arguments[i]))
+ # we need more than 2 arguments, since there needs to be at least one file also.
+ if arg_count < 4:
+ return False
+ self.phrase_to_replace = self.arguments[1]
+ self.replacement_bit = self.arguments[2]
+ print("got phrase to replace: \'{0}\' and replacement: \'{1}\'".format(self.phrase_to_replace, self.replacement_bit))
+ self.files = self.arguments[3:]
+ return True
+
+ def read_file_data(self, filename):
+ """ loads the file into our memory buffer for processing. """
+ try:
+ our_file = open(filename, "rb")
+ try:
+ file_buffer = our_file.read()
+ except IOError:
+ print("There was an error reading the file {0}".format(filename))
+ return False
+ finally:
+ our_file.close()
+ except IOError:
+ print("There was an error opening the file {0}".format(filename))
+ return False
+ self.file_lines = file_buffer.splitlines()
+ return True
+
+ def write_file_data(self, filename):
+ """ takes the processed buffer and sends it back out to the filename. """
+# output_filename = filename + ".new" # safe testing version.
+ output_filename = filename
+ try:
+ our_file = open(output_filename, "wb")
+ try:
+ file_buffer = our_file.write(self.processed_buffer)
+ except IOError:
+ print("There was an error writing the file {0}".format(output_filename))
+ return False
+ finally:
+ our_file.close()
+ except IOError:
+ print("There was an error opening the file {0}".format(output_filename))
+ return False
+ return True
+
+ def is_alphanumeric(self, check_char):
+ """ given a character, this returns true if it's between a-z, A-Z or 0-9. """
+ if (check_char[0] == "_"):
+ return True
+ if ( (check_char[0] <= "z") and (check_char[0] >= "a")):
+ return True
+ if ( (check_char[0] <= "Z") and (check_char[0] >= "A")):
+ return True
+ if ( (check_char[0] <= "9") and (check_char[0] >= "0")):
+ return True
+ return False
+
+ def replace_within_string(self, fix_string):
+ """ given a string to fix, this replaces all appropriate locations of the phrase. """
+ indy = 0
+# print("got to replace within string")
+ while (indy < len(fix_string)):
+ # locate next occurrence of replacement text, if any.
+ indy = fix_string.find(self.phrase_to_replace, indy)
+# print("find indy={0}".format(indy))
+ if (indy > -1):
+# print("found occurrence of replacement string")
+ # we found an occurrence, but we have to validate it's separated enough.
+ char_before = "?" # simple default that won't fail our check.
+ char_after = "?"
+ if (indy > 0):
+ char_before = fix_string[indy-1]
+ if (indy + len(self.phrase_to_replace) < len(fix_string) - 1):
+ char_after = fix_string[indy+len(self.phrase_to_replace)]
+# print("char before {0}, char after {1}".format(char_before, char_after))
+ if (not self.is_alphanumeric(char_before) and not self.is_alphanumeric(char_after)):
+ # this looks like a good candidate for replacement.
+ fix_string = "{0}{1}{2}".format(fix_string[0:indy], self.replacement_bit, fix_string[indy+len(self.phrase_to_replace):])
+# print("changed string to: {0}".format(fix_string))
+ else:
+ break
+ indy += 1 # no matches means we have to keep skipping forward.
+ return fix_string # give back processed form.
+
+ def emit_normal_accumulator(self):
+ """ handle emission of a chunk of normal code (without comments). """
+ # process the text to perform the replacement...
+ self.normal_accumulator = self.replace_within_string(self.normal_accumulator)
+ # then send the text into our main buffer; we're done looking at it.
+ self.processed_buffer += self.normal_accumulator
+ self.normal_accumulator = ""
+
+ def emit_comment_accumulator(self):
+ """ emits the piled up text for comments found in the code. """
+ self.processed_buffer += self.comment_accumulator
+ self.comment_accumulator = ""
+
+ def process_file_data(self):
+ """ iterates through the stored version of the file and replaces the phrase. """
+ self.state = self.EATING_NORMAL_TEXT;
+ # clear out any previously processed text.
+ self.processed_buffer = "" # reset our new version of the file contents.
+ self.normal_accumulator = ""
+ self.comment_accumulator = ""
+ # iterate through the file's lines.
+ while (len(self.file_lines) > 0):
+ # get the next line out of the input.
+ next_line = self.file_lines[0]
+ # drop that line from the remaining items.
+ self.file_lines = self.file_lines[1:]
+# print("next line: {0}".format(next_line))
+ # decide if we need a state transition.
+ indy = 0
+ if ((len(next_line) > 0) and (self.state == self.EATING_NORMAL_TEXT) and ('/' in next_line)):
+ # loop to catch cases where multiple slashes are in line and one IS a comment.
+ while (indy < len(next_line)):
+ # locate next slash, if any.
+ indy = next_line.find('/', indy)
+ if (indy < 0):
+ break
+ if ((len(next_line) > indy + 1) and (next_line[indy + 1] == '/')):
+ # switch states and handle any pent-up text.
+ self.normal_accumulator += next_line[0:indy] # get last tidbit before comment start.
+ next_line = next_line[indy:] # keep only the stuff starting at slash.
+ self.state = self.EATING_ONELINE_COMMENT
+# print("state => oneline comment")
+ self.emit_normal_accumulator()
+ break
+ if ((len(next_line) > indy + 1) and (next_line[indy + 1] == '*')):
+ # switch states and deal with accumulated text.
+ self.normal_accumulator += next_line[0:indy] # get last tidbit before comment start.
+ next_line = next_line[indy:] # keep only the stuff starting at slash.
+ self.state = self.EATING_MULTILINE_COMMENT
+# print("state => multiline comment")
+ self.emit_normal_accumulator()
+ break
+ indy += 1 # no matches means we have to keep skipping forward.
+
+ # now handle things appropriately for our current state.
+ if (self.state == self.EATING_NORMAL_TEXT):
+ # add the text to the normal accumulator.
+# print("would handle normal text")
+ self.normal_accumulator += next_line + "\n"
+ elif (self.state == self.EATING_ONELINE_COMMENT):
+ # save the text in comment accumulator.
+# print("would handle oneline comment")
+ self.comment_accumulator += next_line + "\n"
+ self.emit_comment_accumulator()
+ self.state = self.EATING_NORMAL_TEXT
+ elif (self.state == self.EATING_MULTILINE_COMMENT):
+ # save the text in comment accumulator.
+# print("would handle multiline comment")
+ self.comment_accumulator += next_line + "\n"
+ # check for whether the multi-line comment is completed on this line.
+ if ("*/" in next_line):
+# print("found completion for multiline comment on line.")
+ self.emit_comment_accumulator()
+ self.state = self.EATING_NORMAL_TEXT
+ # verify we're not in the wrong state still.
+ if (self.state == self.EATING_MULTILINE_COMMENT):
+ print("file seems to have unclosed multi-line comment.")
+ # last step is to spit out whatever was trailing in the accumulator.
+ self.emit_normal_accumulator()
+ # if we got to here, we seem to have happily consumed the file.
+ return True
+
+ def replace_all_occurrences(self):
+ """ Orchestrates the process of replacing the phrases. """
+ # process our command line arguments to see what we need to do.
+ try_command_line = self.validate_and_consume_command_line()
+ if (try_command_line != True):
+ print("failed to process the command line...\n")
+ self.print_instructions()
+ exit(1)
+ # iterate through the list of files we were given and process them.
+ for i in range(0, len(self.files)):
+ print("file {0} is \'{1}\'".format(i, self.files[i]))
+ worked = self.read_file_data(self.files[i])
+ if (worked is False):
+ print("skipping since file read failed on: {0}".format(self.files[i]))
+ continue
+# print("{0} got file contents:\n{1}".format(self.files[i], self.file_lines))
+ worked = self.process_file_data()
+ if (worked is False):
+ print("skipping, since processing failed on: {0}".format(self.files[i]))
+ continue
+ worked = self.write_file_data(self.files[i])
+ if (worked is False):
+ print("writing file back failed on: {0}".format(self.files[i]))
+ print("finished processing all files.")
+
+
+if __name__ == "__main__":
+ import sys
+ slicer = phrase_replacer(sys.argv)
+ slicer.replace_all_occurrences()
+
+##############
+
+# parking lot of things to do in future:
+
+#hmmm: actually sometimes one DOES want to replace within comments. argh.
+# make ignoring inside comments an optional thing. later.
+
+# hmmm: one little issue here is if the text to be replaced happens to reside on
+# the same line after a multi-line comment. we are okay with ignoring that
+# possibility for now since it seems brain-dead to write code that way.
+
+