1 /*****************************************************************************\
3 * Name : marks_checker *
4 * Author : Chris Koeritz *
8 * Checks on the existence of the links listed in a HOOPLE format link *
9 * database and reports the bad ones. *
11 *******************************************************************************
12 * Copyright (c) 2005-$now By Author. This program is free software; you can *
13 * redistribute it and/or modify it under the terms of the GNU General Public *
14 * License as published by the Free Software Foundation; either version 2 of *
15 * the License or (at your option) any later version. This is online at: *
16 * http://www.fsf.org/copyleft/gpl.html *
17 * Please send any updates to: fred@gruntose.com *
18 \*****************************************************************************/
20 #include "bookmark_tree.h"
22 #include <algorithms/sorts.h>
23 #include <application/command_line.h>
24 #include <application/hoople_main.h>
25 #include <application/windoze_helper.h>
26 #include <basis/astring.h>
27 #include <basis/functions.h>
28 #include <basis/guards.h>
29 #include <basis/mutex.h>
30 #include <filesystem/byte_filer.h>
31 #include <filesystem/filename.h>
32 #include <loggers/file_logger.h>
33 #include <mathematics/chaos.h>
34 #include <processes/ethread.h>
35 #include <processes/thread_cabinet.h>
36 #include <structures/static_memory_gremlin.h>
37 #include <structures/unique_id.h>
38 #include <textual/parser_bits.h>
39 #include <timely/time_control.h>
41 #include <curl/curl.h>
45 using namespace algorithms;
46 using namespace application;
47 using namespace basis;
48 using namespace filesystem;
49 using namespace loggers;
50 using namespace nodes;
51 using namespace mathematics;
52 using namespace processes;
53 using namespace structures;
54 using namespace textual;
55 using namespace timely;
58 // uncomment to have more debugging noise.
61 #define BASE_LOG(s) program_wide_logger::get().log(astring(s), ALWAYS_PRINT)
63 #define LOG(s) CLASS_EMERGENCY_LOG(program_wide_logger::get(), \
64 a_sprintf("line %d: ", _categories._line_number) + s)
66 const int PAUSEY_SNOOZE = 200;
67 // how long we sleep if there are too many threads running already.
69 const int MAXIMUM_THREADS = 14;
70 // we allow this many simultaneous web checks at a time.
72 const int MAXIMUM_READ = 1008;
73 // we only download this much of the link. this avoids huge downloads of
76 const int MAXIMUM_ATTEMPTS = 2;
77 // we'll retry the check if we get an actual error instead of an http error
78 // code. when a name can't be found in the DNS, it sometimes comes back
79 // shortly after it was checked. if we see we can't reach the domain after
80 // this many tries, then we give up on the address.
82 const int TIME_PER_REQUEST_IN_SEC = 60 * 6;
83 // limit our requests to this long of a period. then we will not be
84 // stalled forever by uncooperative websites.
86 const char *FAKE_AGENT_STRING = "FredWeb/7.0 (X11; U; Linux i686; "
87 "en-US; rv:1.8.19) Flecko/20081031";
88 // we use this as our agent type, since some sites won't treat us fairly
89 // if they think we're robots when we're checking their site health.
91 // for example (ahem!), the usa today websites.
93 ////////////////////////////////////////////////////////////////////////////
98 safe_int_array() : _lock(), _list(0) {}
100 void add(int to_add) {
101 ///BASE_LOG(a_sprintf("adding %d to list", to_add));
102 auto_synchronizer l(_lock);
107 auto_synchronizer l(_lock);
108 return _list.length();
111 basis::int_array make_copy() {
112 auto_synchronizer l(_lock);
118 basis::int_array _list;
121 ////////////////////////////////////////////////////////////////////////////
123 class marks_checker : public application_shell
127 : application_shell(), _check_redirection(false),
128 _max_threads(MAXIMUM_THREADS), _null_file(filename::null_device(), "w")
131 DEFINE_CLASS_NAME("marks_checker");
132 virtual int execute();
133 int print_instructions(const filename &program_name);
135 int test_all_links();
136 // goes through the tree of links and tests them all.
138 int check_link(const astring &url, astring &error_msg);
139 // synchronously checks the "url" for health. the return value is zero
140 // on success or an HTTP error code on failure.
142 void write_new_files();
143 // writes out the two new files given the info accumulated so far.
146 bookmark_tree _categories; // our tree of categories.
147 safe_int_array _bad_lines; // lines with bad contents.
148 thread_cabinet _checkers; // threads checking on links.
149 astring _input_filename; // we'll store our link database name here.
150 astring _output_filename; // where the list of good links is stored.
151 astring _bad_link_filename; // garbage dump of bad links.
152 bool _check_redirection; // true if redirection is disallowed.
153 int _max_threads; // the most threads we'll allow at once.
154 byte_filer _null_file; // we'll use this for trashing output data.
156 static void handle_OS_signal(int sig_id);
157 // handles break signals from the user.
160 ////////////////////////////////////////////////////////////////////////////
162 class checking_thread : public ethread
165 checking_thread(const link_record &link_info, safe_int_array &bad_lines,
166 marks_checker &checker)
167 : ethread(), _bad_lines(bad_lines), _checker(checker), _info(link_info) {}
169 void perform_activity(void *formal(ptr)) {
171 int ret = _checker.check_link(_info._url, message);
173 astring complaint = a_sprintf("Bad Link at line %d:", _info._uid)
174 += parser_bits::platform_eol_to_chars();
175 const astring spacer(' ', 4);
176 complaint += spacer + _info._url += parser_bits::platform_eol_to_chars();
177 complaint += spacer + _info._description += parser_bits::platform_eol_to_chars();
178 complaint += spacer + "error: " += message;
180 if ( (_info._uid> 100000) || (_info._uid < 0) ) {
181 BASE_LOG(a_sprintf("somehow got bogus line number! %d", _info._uid));
184 _bad_lines.add(_info._uid); // list ours as bad.
189 safe_int_array &_bad_lines;
190 marks_checker &_checker;
194 ////////////////////////////////////////////////////////////////////////////
196 int marks_checker::print_instructions(const filename &program_name)
198 a_sprintf to_show("%s:\n\
199 This program needs three filenames as command line parameters. The -i flag\n\
200 is used to specify the input filename. The -o flag specifies the file where\n\
201 where the good links will be written. The -b flag specifies the file where\n\
202 the bad links are written. The optional flag --no-redirs can be used to\n\
203 disallow web-site redirection, which will catch when the site has changed\n\
204 its location. Note that redirection is not necessarily an error, but it\n\
205 instead may just be a link that needs its URL modified. It is recommended\n\
206 that you omit this flag in early runs, in order to only locate definitely\n\
207 dead links. Then later checking runs can find any sites that were redirected\n\
208 or being routed to a dead link page which doesn't provide an error code.\n\
209 The optional flag --threads with a parameter will set the maximum number of\n\
210 threads that will simultaneously check on links.\n\
211 The input file is expected to be in the HOOPLE link database format.\n\
212 The HOOPLE link format is documented here:\n\
213 http://feistymeow.org/guides/link_database/format_manifesto.txt\n\
214 ", program_name.basename().raw().s(), program_name.basename().raw().s());
215 program_wide_logger::get().log(to_show, ALWAYS_PRINT);
219 // this function just eats any data it's handed.
220 size_t data_sink(void *formal(ptr), size_t size, size_t number, void *formal(stream))
221 { return size * number; }
223 int marks_checker::check_link(const astring &url, astring &error_msg)
227 CURL *cur = curl_easy_init();
229 curl_easy_setopt(cur, CURLOPT_URL, url.s()); // set the URL itself.
231 curl_easy_setopt(cur, CURLOPT_SSL_VERIFYPEER, 0);
232 // don't verify SSL certificates.
233 curl_easy_setopt(cur, CURLOPT_MAXFILESIZE, MAXIMUM_READ);
234 // limit the download size; causes size errors, which we elide to success.
235 curl_easy_setopt(cur, CURLOPT_NOSIGNAL, 1);
236 // don't use signals since it interferes with sleep.
237 curl_easy_setopt(cur, CURLOPT_TIMEOUT, TIME_PER_REQUEST_IN_SEC);
238 // limit time allowed per operation.
239 curl_easy_setopt(cur, CURLOPT_AUTOREFERER, true);
240 // automatically fill in the referer field when redirected.
242 curl_easy_setopt(cur, CURLOPT_WRITEDATA, _null_file.file_handle());
243 // set the file handle where we want our downloaded data to go. since
244 // we're just checking the links, this goes right to the trash.
245 curl_easy_setopt(cur, CURLOPT_WRITEFUNCTION, data_sink);
246 // set the function which will be given all the downloaded data.
248 curl_easy_setopt(cur, CURLOPT_USERAGENT, FAKE_AGENT_STRING);
249 // fake being a browser here since otherwise we get no respect.
251 curl_easy_setopt(cur, CURLOPT_FTPLISTONLY, 1);
252 // get only a simple list of files, which allows us to hit ftp sites
253 // properly. if the normal curl mode is used, we get nothing.
255 if (_check_redirection) {
256 // attempting to quash redirects as being valid.
257 curl_easy_setopt(cur, CURLOPT_FOLLOWLOCATION, 1); // follow redirects.
258 curl_easy_setopt(cur, CURLOPT_MAXREDIRS, 0); // allow zero redirects.
262 while (tries++ < MAXIMUM_ATTEMPTS) {
264 // we do the error message again every time, since it gets shrunk after
265 // the web page check and is no longer available where it was in memory.
266 error_msg = astring(' ', CURL_ERROR_SIZE + 5);
267 curl_easy_setopt(cur, CURLOPT_ERRORBUFFER, error_msg.s());
269 // set the error message buffer so we know what happened.
271 // try to lookup the web page we've been given.
272 to_return = curl_easy_perform(cur);
274 error_msg.shrink(); // just use the message without extra spaces.
276 // we turn file size errors into non-errors, since we have set a very
277 // low file size in order to avoid downloading too much. we really just
278 // want to check links, not download their contents.
279 if (to_return == CURLE_FILESIZE_EXCEEDED) to_return = 0;
282 // supposedly this is a success, but let's check the result code.
284 curl_easy_getinfo(cur, CURLINFO_RESPONSE_CODE, &result);
286 error_msg = a_sprintf("received http failure code %d", result);
289 break; // this was a successful result, a zero outcome from perform.
292 time_control::sleep_ms(10 * SECOND_ms); // give it a few more seconds...
295 curl_easy_cleanup(cur);
300 int marks_checker::test_all_links()
302 FUNCDEF("test_all_links");
303 // traverse the tree in prefix order.
304 tree::iterator itty = _categories.access_root().start(tree::prefix);
305 tree *curr = NULL_POINTER;
306 while ( (curr = itty.next()) ) {
307 inner_mark_tree *nod = dynamic_cast<inner_mark_tree *>(curr);
309 non_continuable_error(static_class_name(), func, "failed to cast a tree node");
310 // iterate on all the links at this node to check them.
311 for (int i = 0; i < nod->_links.elements(); i++) {
312 link_record *lin = nod->_links.borrow(i);
313 if (!lin->_url) continue; // not a link.
315 while (_checkers.threads() > _max_threads) {
316 time_control::sleep_ms(PAUSEY_SNOOZE);
317 _checkers.clean_debris();
320 checking_thread *new_thread = new checking_thread(*lin, _bad_lines,
322 unique_int id = _checkers.add_thread(new_thread, true, NULL_POINTER);
326 BASE_LOG("... finished iterating on tree.");
328 // now wait until all the threads are finished.
329 while (_checkers.threads()) {
330 time_control::sleep_ms(PAUSEY_SNOOZE);
331 _checkers.clean_debris();
334 BASE_LOG("... finished waiting for all threads.");
339 void marks_checker::write_new_files()
341 byte_filer input_file(_input_filename, "r");
342 byte_filer output_file(_output_filename, "w");
343 byte_filer badness_file(_bad_link_filename, "w");
345 basis::int_array badness = _bad_lines.make_copy();
346 shell_sort<int>(badness.access(), badness.length());
348 BASE_LOG("bad links are on lines:");
350 for (int i = 0; i < badness.length(); i++) {
351 bad_list += a_sprintf("%d, ", badness[i]);
357 while (!input_file.eof()) {
359 while (badness.length() && (badness[0] < curr_line) ) {
360 BASE_LOG(a_sprintf("whacking too low line number: %d", badness[0]));
363 input_file.getline(buffer, 2048);
364 //make that a constant.
365 if (badness.length() && (badness[0] == curr_line)) {
366 // we seem to have found a bad line.
367 badness_file.write(buffer);
368 badness.zap(0, 0); // remove the current line number.
370 // this is a healthy line.
371 output_file.write(buffer);
377 badness_file.close();
380 marks_checker *main_program = NULL_POINTER;
382 void marks_checker::handle_OS_signal(int formal(sig_id))
384 signal(SIGINT, SIG_IGN); // turn off that signal for now.
385 BASE_LOG("caught break signal... now writing files.");
386 if (main_program) main_program->write_new_files();
387 BASE_LOG("exiting after handling break.");
388 main_program = NULL_POINTER;
392 int marks_checker::execute()
397 main_program = this; // used by our signal handler.
399 command_line cmds(_global_argc, _global_argv); // process the command line parameters.
400 if (!cmds.get_value('i', _input_filename, false))
401 return print_instructions(cmds.program_name());
402 if (!cmds.get_value('o', _output_filename, false))
403 return print_instructions(cmds.program_name());
404 if (!cmds.get_value('b', _bad_link_filename, false))
405 return print_instructions(cmds.program_name());
409 // optional flag for checking website redirection.
410 if (cmds.get_value("no-redirs", temp, false)) {
411 BASE_LOG("Enabling redirection checking: redirected web sites are reported as bad.");
412 _check_redirection = true;
414 // optional flag for number of threads.
416 if (cmds.get_value("threads", threads, false)) {
417 _max_threads = threads.convert(0);
418 BASE_LOG(a_sprintf("Maximum threads allowed=%d", _max_threads));
421 BASE_LOG(astring("input file: ") + _input_filename);
422 BASE_LOG(astring("output file: ") + _output_filename);
423 BASE_LOG(astring("bad link file: ") + _bad_link_filename);
425 //hmmm: check if output file already exists.
426 //hmmm: check if bad file already exists.
428 LOG("before reading input...");
430 int ret = _categories.read_csv_file(_input_filename);
431 if (ret) return ret; // failure during read means we can't do much.
433 LOG("after reading input...");
435 signal(SIGINT, handle_OS_signal);
436 // hook the break signal so we can still do part of the job if they
439 curl_global_init(CURL_GLOBAL_ALL); // crank up the cURL engine.
441 ret = test_all_links();
444 main_program = NULL_POINTER;
446 curl_global_cleanup(); // shut down cURL engine again.
451 ////////////////////////////////////////////////////////////////////////////
453 HOOPLE_MAIN(marks_checker, )