+++ /dev/null
-#! /usr/bin/env python\r
-\r
-# Original code by Guido van Rossum; extensive changes by Sam Bayer,\r
-# including code to check URL fragments.\r
-\r
-"""Web tree checker.\r
-\r
-This utility is handy to check a subweb of the world-wide web for\r
-errors. A subweb is specified by giving one or more ``root URLs''; a\r
-page belongs to the subweb if one of the root URLs is an initial\r
-prefix of it.\r
-\r
-File URL extension:\r
-\r
-In order to easy the checking of subwebs via the local file system,\r
-the interpretation of ``file:'' URLs is extended to mimic the behavior\r
-of your average HTTP daemon: if a directory pathname is given, the\r
-file index.html in that directory is returned if it exists, otherwise\r
-a directory listing is returned. Now, you can point webchecker to the\r
-document tree in the local file system of your HTTP daemon, and have\r
-most of it checked. In fact the default works this way if your local\r
-web tree is located at /usr/local/etc/httpd/htdpcs (the default for\r
-the NCSA HTTP daemon and probably others).\r
-\r
-Report printed:\r
-\r
-When done, it reports pages with bad links within the subweb. When\r
-interrupted, it reports for the pages that it has checked already.\r
-\r
-In verbose mode, additional messages are printed during the\r
-information gathering phase. By default, it prints a summary of its\r
-work status every 50 URLs (adjustable with the -r option), and it\r
-reports errors as they are encountered. Use the -q option to disable\r
-this output.\r
-\r
-Checkpoint feature:\r
-\r
-Whether interrupted or not, it dumps its state (a Python pickle) to a\r
-checkpoint file and the -R option allows it to restart from the\r
-checkpoint (assuming that the pages on the subweb that were already\r
-processed haven't changed). Even when it has run till completion, -R\r
-can still be useful -- it will print the reports again, and -Rq prints\r
-the errors only. In this case, the checkpoint file is not written\r
-again. The checkpoint file can be set with the -d option.\r
-\r
-The checkpoint file is written as a Python pickle. Remember that\r
-Python's pickle module is currently quite slow. Give it the time it\r
-needs to load and save the checkpoint file. When interrupted while\r
-writing the checkpoint file, the old checkpoint file is not\r
-overwritten, but all work done in the current run is lost.\r
-\r
-Miscellaneous:\r
-\r
-- You may find the (Tk-based) GUI version easier to use. See wcgui.py.\r
-\r
-- Webchecker honors the "robots.txt" convention. Thanks to Skip\r
-Montanaro for his robotparser.py module (included in this directory)!\r
-The agent name is hardwired to "webchecker". URLs that are disallowed\r
-by the robots.txt file are reported as external URLs.\r
-\r
-- Because the SGML parser is a bit slow, very large SGML files are\r
-skipped. The size limit can be set with the -m option.\r
-\r
-- When the server or protocol does not tell us a file's type, we guess\r
-it based on the URL's suffix. The mimetypes.py module (also in this\r
-directory) has a built-in table mapping most currently known suffixes,\r
-and in addition attempts to read the mime.types configuration files in\r
-the default locations of Netscape and the NCSA HTTP daemon.\r
-\r
-- We follow links indicated by <A>, <FRAME> and <IMG> tags. We also\r
-honor the <BASE> tag.\r
-\r
-- We now check internal NAME anchor links, as well as toplevel links.\r
-\r
-- Checking external links is now done by default; use -x to *disable*\r
-this feature. External links are now checked during normal\r
-processing. (XXX The status of a checked link could be categorized\r
-better. Later...)\r
-\r
-- If external links are not checked, you can use the -t flag to\r
-provide specific overrides to -x.\r
-\r
-Usage: webchecker.py [option] ... [rooturl] ...\r
-\r
-Options:\r
-\r
--R -- restart from checkpoint file\r
--d file -- checkpoint filename (default %(DUMPFILE)s)\r
--m bytes -- skip HTML pages larger than this size (default %(MAXPAGE)d)\r
--n -- reports only, no checking (use with -R)\r
--q -- quiet operation (also suppresses external links report)\r
--r number -- number of links processed per round (default %(ROUNDSIZE)d)\r
--t root -- specify root dir which should be treated as internal (can repeat)\r
--v -- verbose operation; repeating -v will increase verbosity\r
--x -- don't check external links (these are often slow to check)\r
--a -- don't check name anchors\r
-\r
-Arguments:\r
-\r
-rooturl -- URL to start checking\r
- (default %(DEFROOT)s)\r
-\r
-"""\r
-\r
-\r
-__version__ = "$Revision$"\r
-\r
-\r
-import sys\r
-import os\r
-from types import *\r
-import StringIO\r
-import getopt\r
-import pickle\r
-\r
-import urllib\r
-import urlparse\r
-import sgmllib\r
-import cgi\r
-\r
-import mimetypes\r
-import robotparser\r
-\r
-# Extract real version number if necessary\r
-if __version__[0] == '$':\r
- _v = __version__.split()\r
- if len(_v) == 3:\r
- __version__ = _v[1]\r
-\r
-\r
-# Tunable parameters\r
-DEFROOT = "file:/usr/local/etc/httpd/htdocs/" # Default root URL\r
-CHECKEXT = 1 # Check external references (1 deep)\r
-VERBOSE = 1 # Verbosity level (0-3)\r
-MAXPAGE = 150000 # Ignore files bigger than this\r
-ROUNDSIZE = 50 # Number of links processed per round\r
-DUMPFILE = "@webchecker.pickle" # Pickled checkpoint\r
-AGENTNAME = "webchecker" # Agent name for robots.txt parser\r
-NONAMES = 0 # Force name anchor checking\r
-\r
-\r
-# Global variables\r
-\r
-\r
-def main():\r
- checkext = CHECKEXT\r
- verbose = VERBOSE\r
- maxpage = MAXPAGE\r
- roundsize = ROUNDSIZE\r
- dumpfile = DUMPFILE\r
- restart = 0\r
- norun = 0\r
-\r
- try:\r
- opts, args = getopt.getopt(sys.argv[1:], 'Rd:m:nqr:t:vxa')\r
- except getopt.error, msg:\r
- sys.stdout = sys.stderr\r
- print msg\r
- print __doc__%globals()\r
- sys.exit(2)\r
-\r
- # The extra_roots variable collects extra roots.\r
- extra_roots = []\r
- nonames = NONAMES\r
-\r
- for o, a in opts:\r
- if o == '-R':\r
- restart = 1\r
- if o == '-d':\r
- dumpfile = a\r
- if o == '-m':\r
- maxpage = int(a)\r
- if o == '-n':\r
- norun = 1\r
- if o == '-q':\r
- verbose = 0\r
- if o == '-r':\r
- roundsize = int(a)\r
- if o == '-t':\r
- extra_roots.append(a)\r
- if o == '-a':\r
- nonames = not nonames\r
- if o == '-v':\r
- verbose = verbose + 1\r
- if o == '-x':\r
- checkext = not checkext\r
-\r
- if verbose > 0:\r
- print AGENTNAME, "version", __version__\r
-\r
- if restart:\r
- c = load_pickle(dumpfile=dumpfile, verbose=verbose)\r
- else:\r
- c = Checker()\r
-\r
- c.setflags(checkext=checkext, verbose=verbose,\r
- maxpage=maxpage, roundsize=roundsize,\r
- nonames=nonames\r
- )\r
-\r
- if not restart and not args:\r
- args.append(DEFROOT)\r
-\r
- for arg in args:\r
- c.addroot(arg)\r
-\r
- # The -t flag is only needed if external links are not to be\r
- # checked. So -t values are ignored unless -x was specified.\r
- if not checkext:\r
- for root in extra_roots:\r
- # Make sure it's terminated by a slash,\r
- # so that addroot doesn't discard the last\r
- # directory component.\r
- if root[-1] != "/":\r
- root = root + "/"\r
- c.addroot(root, add_to_do = 0)\r
-\r
- try:\r
-\r
- if not norun:\r
- try:\r
- c.run()\r
- except KeyboardInterrupt:\r
- if verbose > 0:\r
- print "[run interrupted]"\r
-\r
- try:\r
- c.report()\r
- except KeyboardInterrupt:\r
- if verbose > 0:\r
- print "[report interrupted]"\r
-\r
- finally:\r
- if c.save_pickle(dumpfile):\r
- if dumpfile == DUMPFILE:\r
- print "Use ``%s -R'' to restart." % sys.argv[0]\r
- else:\r
- print "Use ``%s -R -d %s'' to restart." % (sys.argv[0],\r
- dumpfile)\r
-\r
-\r
-def load_pickle(dumpfile=DUMPFILE, verbose=VERBOSE):\r
- if verbose > 0:\r
- print "Loading checkpoint from %s ..." % dumpfile\r
- f = open(dumpfile, "rb")\r
- c = pickle.load(f)\r
- f.close()\r
- if verbose > 0:\r
- print "Done."\r
- print "Root:", "\n ".join(c.roots)\r
- return c\r
-\r
-\r
-class Checker:\r
-\r
- checkext = CHECKEXT\r
- verbose = VERBOSE\r
- maxpage = MAXPAGE\r
- roundsize = ROUNDSIZE\r
- nonames = NONAMES\r
-\r
- validflags = tuple(dir())\r
-\r
- def __init__(self):\r
- self.reset()\r
-\r
- def setflags(self, **kw):\r
- for key in kw.keys():\r
- if key not in self.validflags:\r
- raise NameError, "invalid keyword argument: %s" % str(key)\r
- for key, value in kw.items():\r
- setattr(self, key, value)\r
-\r
- def reset(self):\r
- self.roots = []\r
- self.todo = {}\r
- self.done = {}\r
- self.bad = {}\r
-\r
- # Add a name table, so that the name URLs can be checked. Also\r
- # serves as an implicit cache for which URLs are done.\r
- self.name_table = {}\r
-\r
- self.round = 0\r
- # The following are not pickled:\r
- self.robots = {}\r
- self.errors = {}\r
- self.urlopener = MyURLopener()\r
- self.changed = 0\r
-\r
- def note(self, level, format, *args):\r
- if self.verbose > level:\r
- if args:\r
- format = format%args\r
- self.message(format)\r
-\r
- def message(self, format, *args):\r
- if args:\r
- format = format%args\r
- print format\r
-\r
- def __getstate__(self):\r
- return (self.roots, self.todo, self.done, self.bad, self.round)\r
-\r
- def __setstate__(self, state):\r
- self.reset()\r
- (self.roots, self.todo, self.done, self.bad, self.round) = state\r
- for root in self.roots:\r
- self.addrobot(root)\r
- for url in self.bad.keys():\r
- self.markerror(url)\r
-\r
- def addroot(self, root, add_to_do = 1):\r
- if root not in self.roots:\r
- troot = root\r
- scheme, netloc, path, params, query, fragment = \\r
- urlparse.urlparse(root)\r
- i = path.rfind("/") + 1\r
- if 0 < i < len(path):\r
- path = path[:i]\r
- troot = urlparse.urlunparse((scheme, netloc, path,\r
- params, query, fragment))\r
- self.roots.append(troot)\r
- self.addrobot(root)\r
- if add_to_do:\r
- self.newlink((root, ""), ("<root>", root))\r
-\r
- def addrobot(self, root):\r
- root = urlparse.urljoin(root, "/")\r
- if self.robots.has_key(root): return\r
- url = urlparse.urljoin(root, "/robots.txt")\r
- self.robots[root] = rp = robotparser.RobotFileParser()\r
- self.note(2, "Parsing %s", url)\r
- rp.debug = self.verbose > 3\r
- rp.set_url(url)\r
- try:\r
- rp.read()\r
- except (OSError, IOError), msg:\r
- self.note(1, "I/O error parsing %s: %s", url, msg)\r
-\r
- def run(self):\r
- while self.todo:\r
- self.round = self.round + 1\r
- self.note(0, "\nRound %d (%s)\n", self.round, self.status())\r
- urls = self.todo.keys()\r
- urls.sort()\r
- del urls[self.roundsize:]\r
- for url in urls:\r
- self.dopage(url)\r
-\r
- def status(self):\r
- return "%d total, %d to do, %d done, %d bad" % (\r
- len(self.todo)+len(self.done),\r
- len(self.todo), len(self.done),\r
- len(self.bad))\r
-\r
- def report(self):\r
- self.message("")\r
- if not self.todo: s = "Final"\r
- else: s = "Interim"\r
- self.message("%s Report (%s)", s, self.status())\r
- self.report_errors()\r
-\r
- def report_errors(self):\r
- if not self.bad:\r
- self.message("\nNo errors")\r
- return\r
- self.message("\nError Report:")\r
- sources = self.errors.keys()\r
- sources.sort()\r
- for source in sources:\r
- triples = self.errors[source]\r
- self.message("")\r
- if len(triples) > 1:\r
- self.message("%d Errors in %s", len(triples), source)\r
- else:\r
- self.message("Error in %s", source)\r
- # Call self.format_url() instead of referring\r
- # to the URL directly, since the URLs in these\r
- # triples is now a (URL, fragment) pair. The value\r
- # of the "source" variable comes from the list of\r
- # origins, and is a URL, not a pair.\r
- for url, rawlink, msg in triples:\r
- if rawlink != self.format_url(url): s = " (%s)" % rawlink\r
- else: s = ""\r
- self.message(" HREF %s%s\n msg %s",\r
- self.format_url(url), s, msg)\r
-\r
- def dopage(self, url_pair):\r
-\r
- # All printing of URLs uses format_url(); argument changed to\r
- # url_pair for clarity.\r
- if self.verbose > 1:\r
- if self.verbose > 2:\r
- self.show("Check ", self.format_url(url_pair),\r
- " from", self.todo[url_pair])\r
- else:\r
- self.message("Check %s", self.format_url(url_pair))\r
- url, local_fragment = url_pair\r
- if local_fragment and self.nonames:\r
- self.markdone(url_pair)\r
- return\r
- try:\r
- page = self.getpage(url_pair)\r
- except sgmllib.SGMLParseError, msg:\r
- msg = self.sanitize(msg)\r
- self.note(0, "Error parsing %s: %s",\r
- self.format_url(url_pair), msg)\r
- # Dont actually mark the URL as bad - it exists, just\r
- # we can't parse it!\r
- page = None\r
- if page:\r
- # Store the page which corresponds to this URL.\r
- self.name_table[url] = page\r
- # If there is a fragment in this url_pair, and it's not\r
- # in the list of names for the page, call setbad(), since\r
- # it's a missing anchor.\r
- if local_fragment and local_fragment not in page.getnames():\r
- self.setbad(url_pair, ("Missing name anchor `%s'" % local_fragment))\r
- for info in page.getlinkinfos():\r
- # getlinkinfos() now returns the fragment as well,\r
- # and we store that fragment here in the "todo" dictionary.\r
- link, rawlink, fragment = info\r
- # However, we don't want the fragment as the origin, since\r
- # the origin is logically a page.\r
- origin = url, rawlink\r
- self.newlink((link, fragment), origin)\r
- else:\r
- # If no page has been created yet, we want to\r
- # record that fact.\r
- self.name_table[url_pair[0]] = None\r
- self.markdone(url_pair)\r
-\r
- def newlink(self, url, origin):\r
- if self.done.has_key(url):\r
- self.newdonelink(url, origin)\r
- else:\r
- self.newtodolink(url, origin)\r
-\r
- def newdonelink(self, url, origin):\r
- if origin not in self.done[url]:\r
- self.done[url].append(origin)\r
-\r
- # Call self.format_url(), since the URL here\r
- # is now a (URL, fragment) pair.\r
- self.note(3, " Done link %s", self.format_url(url))\r
-\r
- # Make sure that if it's bad, that the origin gets added.\r
- if self.bad.has_key(url):\r
- source, rawlink = origin\r
- triple = url, rawlink, self.bad[url]\r
- self.seterror(source, triple)\r
-\r
- def newtodolink(self, url, origin):\r
- # Call self.format_url(), since the URL here\r
- # is now a (URL, fragment) pair.\r
- if self.todo.has_key(url):\r
- if origin not in self.todo[url]:\r
- self.todo[url].append(origin)\r
- self.note(3, " Seen todo link %s", self.format_url(url))\r
- else:\r
- self.todo[url] = [origin]\r
- self.note(3, " New todo link %s", self.format_url(url))\r
-\r
- def format_url(self, url):\r
- link, fragment = url\r
- if fragment: return link + "#" + fragment\r
- else: return link\r
-\r
- def markdone(self, url):\r
- self.done[url] = self.todo[url]\r
- del self.todo[url]\r
- self.changed = 1\r
-\r
- def inroots(self, url):\r
- for root in self.roots:\r
- if url[:len(root)] == root:\r
- return self.isallowed(root, url)\r
- return 0\r
-\r
- def isallowed(self, root, url):\r
- root = urlparse.urljoin(root, "/")\r
- return self.robots[root].can_fetch(AGENTNAME, url)\r
-\r
- def getpage(self, url_pair):\r
- # Incoming argument name is a (URL, fragment) pair.\r
- # The page may have been cached in the name_table variable.\r
- url, fragment = url_pair\r
- if self.name_table.has_key(url):\r
- return self.name_table[url]\r
-\r
- scheme, path = urllib.splittype(url)\r
- if scheme in ('mailto', 'news', 'javascript', 'telnet'):\r
- self.note(1, " Not checking %s URL" % scheme)\r
- return None\r
- isint = self.inroots(url)\r
-\r
- # Ensure that openpage gets the URL pair to\r
- # print out its error message and record the error pair\r
- # correctly.\r
- if not isint:\r
- if not self.checkext:\r
- self.note(1, " Not checking ext link")\r
- return None\r
- f = self.openpage(url_pair)\r
- if f:\r
- self.safeclose(f)\r
- return None\r
- text, nurl = self.readhtml(url_pair)\r
-\r
- if nurl != url:\r
- self.note(1, " Redirected to %s", nurl)\r
- url = nurl\r
- if text:\r
- return Page(text, url, maxpage=self.maxpage, checker=self)\r
-\r
- # These next three functions take (URL, fragment) pairs as\r
- # arguments, so that openpage() receives the appropriate tuple to\r
- # record error messages.\r
- def readhtml(self, url_pair):\r
- url, fragment = url_pair\r
- text = None\r
- f, url = self.openhtml(url_pair)\r
- if f:\r
- text = f.read()\r
- f.close()\r
- return text, url\r
-\r
- def openhtml(self, url_pair):\r
- url, fragment = url_pair\r
- f = self.openpage(url_pair)\r
- if f:\r
- url = f.geturl()\r
- info = f.info()\r
- if not self.checkforhtml(info, url):\r
- self.safeclose(f)\r
- f = None\r
- return f, url\r
-\r
- def openpage(self, url_pair):\r
- url, fragment = url_pair\r
- try:\r
- return self.urlopener.open(url)\r
- except (OSError, IOError), msg:\r
- msg = self.sanitize(msg)\r
- self.note(0, "Error %s", msg)\r
- if self.verbose > 0:\r
- self.show(" HREF ", url, " from", self.todo[url_pair])\r
- self.setbad(url_pair, msg)\r
- return None\r
-\r
- def checkforhtml(self, info, url):\r
- if info.has_key('content-type'):\r
- ctype = cgi.parse_header(info['content-type'])[0].lower()\r
- if ';' in ctype:\r
- # handle content-type: text/html; charset=iso8859-1 :\r
- ctype = ctype.split(';', 1)[0].strip()\r
- else:\r
- if url[-1:] == "/":\r
- return 1\r
- ctype, encoding = mimetypes.guess_type(url)\r
- if ctype == 'text/html':\r
- return 1\r
- else:\r
- self.note(1, " Not HTML, mime type %s", ctype)\r
- return 0\r
-\r
- def setgood(self, url):\r
- if self.bad.has_key(url):\r
- del self.bad[url]\r
- self.changed = 1\r
- self.note(0, "(Clear previously seen error)")\r
-\r
- def setbad(self, url, msg):\r
- if self.bad.has_key(url) and self.bad[url] == msg:\r
- self.note(0, "(Seen this error before)")\r
- return\r
- self.bad[url] = msg\r
- self.changed = 1\r
- self.markerror(url)\r
-\r
- def markerror(self, url):\r
- try:\r
- origins = self.todo[url]\r
- except KeyError:\r
- origins = self.done[url]\r
- for source, rawlink in origins:\r
- triple = url, rawlink, self.bad[url]\r
- self.seterror(source, triple)\r
-\r
- def seterror(self, url, triple):\r
- try:\r
- # Because of the way the URLs are now processed, I need to\r
- # check to make sure the URL hasn't been entered in the\r
- # error list. The first element of the triple here is a\r
- # (URL, fragment) pair, but the URL key is not, since it's\r
- # from the list of origins.\r
- if triple not in self.errors[url]:\r
- self.errors[url].append(triple)\r
- except KeyError:\r
- self.errors[url] = [triple]\r
-\r
- # The following used to be toplevel functions; they have been\r
- # changed into methods so they can be overridden in subclasses.\r
-\r
- def show(self, p1, link, p2, origins):\r
- self.message("%s %s", p1, link)\r
- i = 0\r
- for source, rawlink in origins:\r
- i = i+1\r
- if i == 2:\r
- p2 = ' '*len(p2)\r
- if rawlink != link: s = " (%s)" % rawlink\r
- else: s = ""\r
- self.message("%s %s%s", p2, source, s)\r
-\r
- def sanitize(self, msg):\r
- if isinstance(IOError, ClassType) and isinstance(msg, IOError):\r
- # Do the other branch recursively\r
- msg.args = self.sanitize(msg.args)\r
- elif isinstance(msg, TupleType):\r
- if len(msg) >= 4 and msg[0] == 'http error' and \\r
- isinstance(msg[3], InstanceType):\r
- # Remove the Message instance -- it may contain\r
- # a file object which prevents pickling.\r
- msg = msg[:3] + msg[4:]\r
- return msg\r
-\r
- def safeclose(self, f):\r
- try:\r
- url = f.geturl()\r
- except AttributeError:\r
- pass\r
- else:\r
- if url[:4] == 'ftp:' or url[:7] == 'file://':\r
- # Apparently ftp connections don't like to be closed\r
- # prematurely...\r
- text = f.read()\r
- f.close()\r
-\r
- def save_pickle(self, dumpfile=DUMPFILE):\r
- if not self.changed:\r
- self.note(0, "\nNo need to save checkpoint")\r
- elif not dumpfile:\r
- self.note(0, "No dumpfile, won't save checkpoint")\r
- else:\r
- self.note(0, "\nSaving checkpoint to %s ...", dumpfile)\r
- newfile = dumpfile + ".new"\r
- f = open(newfile, "wb")\r
- pickle.dump(self, f)\r
- f.close()\r
- try:\r
- os.unlink(dumpfile)\r
- except os.error:\r
- pass\r
- os.rename(newfile, dumpfile)\r
- self.note(0, "Done.")\r
- return 1\r
-\r
-\r
-class Page:\r
-\r
- def __init__(self, text, url, verbose=VERBOSE, maxpage=MAXPAGE, checker=None):\r
- self.text = text\r
- self.url = url\r
- self.verbose = verbose\r
- self.maxpage = maxpage\r
- self.checker = checker\r
-\r
- # The parsing of the page is done in the __init__() routine in\r
- # order to initialize the list of names the file\r
- # contains. Stored the parser in an instance variable. Passed\r
- # the URL to MyHTMLParser().\r
- size = len(self.text)\r
- if size > self.maxpage:\r
- self.note(0, "Skip huge file %s (%.0f Kbytes)", self.url, (size*0.001))\r
- self.parser = None\r
- return\r
- self.checker.note(2, " Parsing %s (%d bytes)", self.url, size)\r
- self.parser = MyHTMLParser(url, verbose=self.verbose,\r
- checker=self.checker)\r
- self.parser.feed(self.text)\r
- self.parser.close()\r
-\r
- def note(self, level, msg, *args):\r
- if self.checker:\r
- apply(self.checker.note, (level, msg) + args)\r
- else:\r
- if self.verbose >= level:\r
- if args:\r
- msg = msg%args\r
- print msg\r
-\r
- # Method to retrieve names.\r
- def getnames(self):\r
- if self.parser:\r
- return self.parser.names\r
- else:\r
- return []\r
-\r
- def getlinkinfos(self):\r
- # File reading is done in __init__() routine. Store parser in\r
- # local variable to indicate success of parsing.\r
-\r
- # If no parser was stored, fail.\r
- if not self.parser: return []\r
-\r
- rawlinks = self.parser.getlinks()\r
- base = urlparse.urljoin(self.url, self.parser.getbase() or "")\r
- infos = []\r
- for rawlink in rawlinks:\r
- t = urlparse.urlparse(rawlink)\r
- # DON'T DISCARD THE FRAGMENT! Instead, include\r
- # it in the tuples which are returned. See Checker.dopage().\r
- fragment = t[-1]\r
- t = t[:-1] + ('',)\r
- rawlink = urlparse.urlunparse(t)\r
- link = urlparse.urljoin(base, rawlink)\r
- infos.append((link, rawlink, fragment))\r
-\r
- return infos\r
-\r
-\r
-class MyStringIO(StringIO.StringIO):\r
-\r
- def __init__(self, url, info):\r
- self.__url = url\r
- self.__info = info\r
- StringIO.StringIO.__init__(self)\r
-\r
- def info(self):\r
- return self.__info\r
-\r
- def geturl(self):\r
- return self.__url\r
-\r
-\r
-class MyURLopener(urllib.FancyURLopener):\r
-\r
- http_error_default = urllib.URLopener.http_error_default\r
-\r
- def __init__(*args):\r
- self = args[0]\r
- apply(urllib.FancyURLopener.__init__, args)\r
- self.addheaders = [\r
- ('User-agent', 'Python-webchecker/%s' % __version__),\r
- ]\r
-\r
- def http_error_401(self, url, fp, errcode, errmsg, headers):\r
- return None\r
-\r
- def open_file(self, url):\r
- path = urllib.url2pathname(urllib.unquote(url))\r
- if os.path.isdir(path):\r
- if path[-1] != os.sep:\r
- url = url + '/'\r
- indexpath = os.path.join(path, "index.html")\r
- if os.path.exists(indexpath):\r
- return self.open_file(url + "index.html")\r
- try:\r
- names = os.listdir(path)\r
- except os.error, msg:\r
- exc_type, exc_value, exc_tb = sys.exc_info()\r
- raise IOError, msg, exc_tb\r
- names.sort()\r
- s = MyStringIO("file:"+url, {'content-type': 'text/html'})\r
- s.write('<BASE HREF="file:%s">\n' %\r
- urllib.quote(os.path.join(path, "")))\r
- for name in names:\r
- q = urllib.quote(name)\r
- s.write('<A HREF="%s">%s</A>\n' % (q, q))\r
- s.seek(0)\r
- return s\r
- return urllib.FancyURLopener.open_file(self, url)\r
-\r
-\r
-class MyHTMLParser(sgmllib.SGMLParser):\r
-\r
- def __init__(self, url, verbose=VERBOSE, checker=None):\r
- self.myverbose = verbose # now unused\r
- self.checker = checker\r
- self.base = None\r
- self.links = {}\r
- self.names = []\r
- self.url = url\r
- sgmllib.SGMLParser.__init__(self)\r
-\r
- def check_name_id(self, attributes):\r
- """ Check the name or id attributes on an element.\r
- """\r
- # We must rescue the NAME or id (name is deprecated in XHTML)\r
- # attributes from the anchor, in order to\r
- # cache the internal anchors which are made\r
- # available in the page.\r
- for name, value in attributes:\r
- if name == "name" or name == "id":\r
- if value in self.names:\r
- self.checker.message("WARNING: duplicate ID name %s in %s",\r
- value, self.url)\r
- else: self.names.append(value)\r
- break\r
-\r
- def unknown_starttag(self, tag, attributes):\r
- """ In XHTML, you can have id attributes on any element.\r
- """\r
- self.check_name_id(attributes)\r
-\r
- def start_a(self, attributes):\r
- self.link_attr(attributes, 'href')\r
- self.check_name_id(attributes)\r
-\r
- def end_a(self): pass\r
-\r
- def do_area(self, attributes):\r
- self.link_attr(attributes, 'href')\r
- self.check_name_id(attributes)\r
-\r
- def do_body(self, attributes):\r
- self.link_attr(attributes, 'background', 'bgsound')\r
- self.check_name_id(attributes)\r
-\r
- def do_img(self, attributes):\r
- self.link_attr(attributes, 'src', 'lowsrc')\r
- self.check_name_id(attributes)\r
-\r
- def do_frame(self, attributes):\r
- self.link_attr(attributes, 'src', 'longdesc')\r
- self.check_name_id(attributes)\r
-\r
- def do_iframe(self, attributes):\r
- self.link_attr(attributes, 'src', 'longdesc')\r
- self.check_name_id(attributes)\r
-\r
- def do_link(self, attributes):\r
- for name, value in attributes:\r
- if name == "rel":\r
- parts = value.lower().split()\r
- if ( parts == ["stylesheet"]\r
- or parts == ["alternate", "stylesheet"]):\r
- self.link_attr(attributes, "href")\r
- break\r
- self.check_name_id(attributes)\r
-\r
- def do_object(self, attributes):\r
- self.link_attr(attributes, 'data', 'usemap')\r
- self.check_name_id(attributes)\r
-\r
- def do_script(self, attributes):\r
- self.link_attr(attributes, 'src')\r
- self.check_name_id(attributes)\r
-\r
- def do_table(self, attributes):\r
- self.link_attr(attributes, 'background')\r
- self.check_name_id(attributes)\r
-\r
- def do_td(self, attributes):\r
- self.link_attr(attributes, 'background')\r
- self.check_name_id(attributes)\r
-\r
- def do_th(self, attributes):\r
- self.link_attr(attributes, 'background')\r
- self.check_name_id(attributes)\r
-\r
- def do_tr(self, attributes):\r
- self.link_attr(attributes, 'background')\r
- self.check_name_id(attributes)\r
-\r
- def link_attr(self, attributes, *args):\r
- for name, value in attributes:\r
- if name in args:\r
- if value: value = value.strip()\r
- if value: self.links[value] = None\r
-\r
- def do_base(self, attributes):\r
- for name, value in attributes:\r
- if name == 'href':\r
- if value: value = value.strip()\r
- if value:\r
- if self.checker:\r
- self.checker.note(1, " Base %s", value)\r
- self.base = value\r
- self.check_name_id(attributes)\r
-\r
- def getlinks(self):\r
- return self.links.keys()\r
-\r
- def getbase(self):\r
- return self.base\r
-\r
-\r
-if __name__ == '__main__':\r
- main()\r