]> git.proxmox.com Git - mirror_edk2.git/blobdiff - AppPkg/Applications/Python/Python-2.7.2/Tools/pybench/pybench.py
edk2: Remove AppPkg, StdLib, StdLibPrivateInternalFiles
[mirror_edk2.git] / AppPkg / Applications / Python / Python-2.7.2 / Tools / pybench / pybench.py
diff --git a/AppPkg/Applications/Python/Python-2.7.2/Tools/pybench/pybench.py b/AppPkg/Applications/Python/Python-2.7.2/Tools/pybench/pybench.py
deleted file mode 100644 (file)
index 4bfc7f2..0000000
+++ /dev/null
@@ -1,961 +0,0 @@
-#!/usr/local/bin/python -O\r
-\r
-""" A Python Benchmark Suite\r
-\r
-"""\r
-#\r
-# Note: Please keep this module compatible to Python 1.5.2.\r
-#\r
-# Tests may include features in later Python versions, but these\r
-# should then be embedded in try-except clauses in the configuration\r
-# module Setup.py.\r
-#\r
-\r
-# pybench Copyright\r
-__copyright__ = """\\r
-Copyright (c), 1997-2006, Marc-Andre Lemburg (mal@lemburg.com)\r
-Copyright (c), 2000-2006, eGenix.com Software GmbH (info@egenix.com)\r
-\r
-                   All Rights Reserved.\r
-\r
-Permission to use, copy, modify, and distribute this software and its\r
-documentation for any purpose and without fee or royalty is hereby\r
-granted, provided that the above copyright notice appear in all copies\r
-and that both that copyright notice and this permission notice appear\r
-in supporting documentation or portions thereof, including\r
-modifications, that you make.\r
-\r
-THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO\r
-THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\r
-FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,\r
-INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING\r
-FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,\r
-NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION\r
-WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !\r
-"""\r
-\r
-import sys, time, operator, string, platform\r
-from CommandLine import *\r
-\r
-try:\r
-    import cPickle\r
-    pickle = cPickle\r
-except ImportError:\r
-    import pickle\r
-\r
-# Version number; version history: see README file !\r
-__version__ = '2.0'\r
-\r
-### Constants\r
-\r
-# Second fractions\r
-MILLI_SECONDS = 1e3\r
-MICRO_SECONDS = 1e6\r
-\r
-# Percent unit\r
-PERCENT = 100\r
-\r
-# Horizontal line length\r
-LINE = 79\r
-\r
-# Minimum test run-time\r
-MIN_TEST_RUNTIME = 1e-3\r
-\r
-# Number of calibration runs to use for calibrating the tests\r
-CALIBRATION_RUNS = 20\r
-\r
-# Number of calibration loops to run for each calibration run\r
-CALIBRATION_LOOPS = 20\r
-\r
-# Allow skipping calibration ?\r
-ALLOW_SKIPPING_CALIBRATION = 1\r
-\r
-# Timer types\r
-TIMER_TIME_TIME = 'time.time'\r
-TIMER_TIME_CLOCK = 'time.clock'\r
-TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'\r
-\r
-# Choose platform default timer\r
-if sys.platform[:3] == 'win':\r
-    # On WinXP this has 2.5ms resolution\r
-    TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK\r
-else:\r
-    # On Linux this has 1ms resolution\r
-    TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME\r
-\r
-# Print debug information ?\r
-_debug = 0\r
-\r
-### Helpers\r
-\r
-def get_timer(timertype):\r
-\r
-    if timertype == TIMER_TIME_TIME:\r
-        return time.time\r
-    elif timertype == TIMER_TIME_CLOCK:\r
-        return time.clock\r
-    elif timertype == TIMER_SYSTIMES_PROCESSTIME:\r
-        import systimes\r
-        return systimes.processtime\r
-    else:\r
-        raise TypeError('unknown timer type: %s' % timertype)\r
-\r
-def get_machine_details():\r
-\r
-    if _debug:\r
-        print 'Getting machine details...'\r
-    buildno, builddate = platform.python_build()\r
-    python = platform.python_version()\r
-    try:\r
-        unichr(100000)\r
-    except ValueError:\r
-        # UCS2 build (standard)\r
-        unicode = 'UCS2'\r
-    except NameError:\r
-        unicode = None\r
-    else:\r
-        # UCS4 build (most recent Linux distros)\r
-        unicode = 'UCS4'\r
-    bits, linkage = platform.architecture()\r
-    return {\r
-        'platform': platform.platform(),\r
-        'processor': platform.processor(),\r
-        'executable': sys.executable,\r
-        'implementation': getattr(platform, 'python_implementation',\r
-                                  lambda:'n/a')(),\r
-        'python': platform.python_version(),\r
-        'compiler': platform.python_compiler(),\r
-        'buildno': buildno,\r
-        'builddate': builddate,\r
-        'unicode': unicode,\r
-        'bits': bits,\r
-        }\r
-\r
-def print_machine_details(d, indent=''):\r
-\r
-    l = ['Machine Details:',\r
-         '   Platform ID:    %s' % d.get('platform', 'n/a'),\r
-         '   Processor:      %s' % d.get('processor', 'n/a'),\r
-         '',\r
-         'Python:',\r
-         '   Implementation: %s' % d.get('implementation', 'n/a'),\r
-         '   Executable:     %s' % d.get('executable', 'n/a'),\r
-         '   Version:        %s' % d.get('python', 'n/a'),\r
-         '   Compiler:       %s' % d.get('compiler', 'n/a'),\r
-         '   Bits:           %s' % d.get('bits', 'n/a'),\r
-         '   Build:          %s (#%s)' % (d.get('builddate', 'n/a'),\r
-                                          d.get('buildno', 'n/a')),\r
-         '   Unicode:        %s' % d.get('unicode', 'n/a'),\r
-         ]\r
-    print indent + string.join(l, '\n' + indent) + '\n'\r
-\r
-### Test baseclass\r
-\r
-class Test:\r
-\r
-    """ All test must have this class as baseclass. It provides\r
-        the necessary interface to the benchmark machinery.\r
-\r
-        The tests must set .rounds to a value high enough to let the\r
-        test run between 20-50 seconds. This is needed because\r
-        clock()-timing only gives rather inaccurate values (on Linux,\r
-        for example, it is accurate to a few hundreths of a\r
-        second). If you don't want to wait that long, use a warp\r
-        factor larger than 1.\r
-\r
-        It is also important to set the .operations variable to a\r
-        value representing the number of "virtual operations" done per\r
-        call of .run().\r
-\r
-        If you change a test in some way, don't forget to increase\r
-        its version number.\r
-\r
-    """\r
-\r
-    ### Instance variables that each test should override\r
-\r
-    # Version number of the test as float (x.yy); this is important\r
-    # for comparisons of benchmark runs - tests with unequal version\r
-    # number will not get compared.\r
-    version = 2.0\r
-\r
-    # The number of abstract operations done in each round of the\r
-    # test. An operation is the basic unit of what you want to\r
-    # measure. The benchmark will output the amount of run-time per\r
-    # operation. Note that in order to raise the measured timings\r
-    # significantly above noise level, it is often required to repeat\r
-    # sets of operations more than once per test round. The measured\r
-    # overhead per test round should be less than 1 second.\r
-    operations = 1\r
-\r
-    # Number of rounds to execute per test run. This should be\r
-    # adjusted to a figure that results in a test run-time of between\r
-    # 1-2 seconds.\r
-    rounds = 100000\r
-\r
-    ### Internal variables\r
-\r
-    # Mark this class as implementing a test\r
-    is_a_test = 1\r
-\r
-    # Last timing: (real, run, overhead)\r
-    last_timing = (0.0, 0.0, 0.0)\r
-\r
-    # Warp factor to use for this test\r
-    warp = 1\r
-\r
-    # Number of calibration runs to use\r
-    calibration_runs = CALIBRATION_RUNS\r
-\r
-    # List of calibration timings\r
-    overhead_times = None\r
-\r
-    # List of test run timings\r
-    times = []\r
-\r
-    # Timer used for the benchmark\r
-    timer = TIMER_PLATFORM_DEFAULT\r
-\r
-    def __init__(self, warp=None, calibration_runs=None, timer=None):\r
-\r
-        # Set parameters\r
-        if warp is not None:\r
-            self.rounds = int(self.rounds / warp)\r
-            if self.rounds == 0:\r
-                raise ValueError('warp factor set too high')\r
-            self.warp = warp\r
-        if calibration_runs is not None:\r
-            if (not ALLOW_SKIPPING_CALIBRATION and\r
-                calibration_runs < 1):\r
-                raise ValueError('at least one calibration run is required')\r
-            self.calibration_runs = calibration_runs\r
-        if timer is not None:\r
-            self.timer = timer\r
-\r
-        # Init variables\r
-        self.times = []\r
-        self.overhead_times = []\r
-\r
-        # We want these to be in the instance dict, so that pickle\r
-        # saves them\r
-        self.version = self.version\r
-        self.operations = self.operations\r
-        self.rounds = self.rounds\r
-\r
-    def get_timer(self):\r
-\r
-        """ Return the timer function to use for the test.\r
-\r
-        """\r
-        return get_timer(self.timer)\r
-\r
-    def compatible(self, other):\r
-\r
-        """ Return 1/0 depending on whether the test is compatible\r
-            with the other Test instance or not.\r
-\r
-        """\r
-        if self.version != other.version:\r
-            return 0\r
-        if self.rounds != other.rounds:\r
-            return 0\r
-        return 1\r
-\r
-    def calibrate_test(self):\r
-\r
-        if self.calibration_runs == 0:\r
-            self.overhead_times = [0.0]\r
-            return\r
-\r
-        calibrate = self.calibrate\r
-        timer = self.get_timer()\r
-        calibration_loops = range(CALIBRATION_LOOPS)\r
-\r
-        # Time the calibration loop overhead\r
-        prep_times = []\r
-        for i in range(self.calibration_runs):\r
-            t = timer()\r
-            for i in calibration_loops:\r
-                pass\r
-            t = timer() - t\r
-            prep_times.append(t / CALIBRATION_LOOPS)\r
-        min_prep_time = min(prep_times)\r
-        if _debug:\r
-            print\r
-            print 'Calib. prep time     = %.6fms' % (\r
-                min_prep_time * MILLI_SECONDS)\r
-\r
-        # Time the calibration runs (doing CALIBRATION_LOOPS loops of\r
-        # .calibrate() method calls each)\r
-        for i in range(self.calibration_runs):\r
-            t = timer()\r
-            for i in calibration_loops:\r
-                calibrate()\r
-            t = timer() - t\r
-            self.overhead_times.append(t / CALIBRATION_LOOPS\r
-                                       - min_prep_time)\r
-\r
-        # Check the measured times\r
-        min_overhead = min(self.overhead_times)\r
-        max_overhead = max(self.overhead_times)\r
-        if _debug:\r
-            print 'Calib. overhead time = %.6fms' % (\r
-                min_overhead * MILLI_SECONDS)\r
-        if min_overhead < 0.0:\r
-            raise ValueError('calibration setup did not work')\r
-        if max_overhead - min_overhead > 0.1:\r
-            raise ValueError(\r
-                'overhead calibration timing range too inaccurate: '\r
-                '%r - %r' % (min_overhead, max_overhead))\r
-\r
-    def run(self):\r
-\r
-        """ Run the test in two phases: first calibrate, then\r
-            do the actual test. Be careful to keep the calibration\r
-            timing low w/r to the test timing.\r
-\r
-        """\r
-        test = self.test\r
-        timer = self.get_timer()\r
-\r
-        # Get calibration\r
-        min_overhead = min(self.overhead_times)\r
-\r
-        # Test run\r
-        t = timer()\r
-        test()\r
-        t = timer() - t\r
-        if t < MIN_TEST_RUNTIME:\r
-            raise ValueError('warp factor too high: '\r
-                             'test times are < 10ms')\r
-        eff_time = t - min_overhead\r
-        if eff_time < 0:\r
-            raise ValueError('wrong calibration')\r
-        self.last_timing = (eff_time, t, min_overhead)\r
-        self.times.append(eff_time)\r
-\r
-    def calibrate(self):\r
-\r
-        """ Calibrate the test.\r
-\r
-            This method should execute everything that is needed to\r
-            setup and run the test - except for the actual operations\r
-            that you intend to measure. pybench uses this method to\r
-            measure the test implementation overhead.\r
-\r
-        """\r
-        return\r
-\r
-    def test(self):\r
-\r
-        """ Run the test.\r
-\r
-            The test needs to run self.rounds executing\r
-            self.operations number of operations each.\r
-\r
-        """\r
-        return\r
-\r
-    def stat(self):\r
-\r
-        """ Return test run statistics as tuple:\r
-\r
-            (minimum run time,\r
-             average run time,\r
-             total run time,\r
-             average time per operation,\r
-             minimum overhead time)\r
-\r
-        """\r
-        runs = len(self.times)\r
-        if runs == 0:\r
-            return 0.0, 0.0, 0.0, 0.0\r
-        min_time = min(self.times)\r
-        total_time = reduce(operator.add, self.times, 0.0)\r
-        avg_time = total_time / float(runs)\r
-        operation_avg = total_time / float(runs\r
-                                           * self.rounds\r
-                                           * self.operations)\r
-        if self.overhead_times:\r
-            min_overhead = min(self.overhead_times)\r
-        else:\r
-            min_overhead = self.last_timing[2]\r
-        return min_time, avg_time, total_time, operation_avg, min_overhead\r
-\r
-### Load Setup\r
-\r
-# This has to be done after the definition of the Test class, since\r
-# the Setup module will import subclasses using this class.\r
-\r
-import Setup\r
-\r
-### Benchmark base class\r
-\r
-class Benchmark:\r
-\r
-    # Name of the benchmark\r
-    name = ''\r
-\r
-    # Number of benchmark rounds to run\r
-    rounds = 1\r
-\r
-    # Warp factor use to run the tests\r
-    warp = 1                    # Warp factor\r
-\r
-    # Average benchmark round time\r
-    roundtime = 0\r
-\r
-    # Benchmark version number as float x.yy\r
-    version = 2.0\r
-\r
-    # Produce verbose output ?\r
-    verbose = 0\r
-\r
-    # Dictionary with the machine details\r
-    machine_details = None\r
-\r
-    # Timer used for the benchmark\r
-    timer = TIMER_PLATFORM_DEFAULT\r
-\r
-    def __init__(self, name, verbose=None, timer=None, warp=None,\r
-                 calibration_runs=None):\r
-\r
-        if name:\r
-            self.name = name\r
-        else:\r
-            self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \\r
-                        (time.localtime(time.time())[:6])\r
-        if verbose is not None:\r
-            self.verbose = verbose\r
-        if timer is not None:\r
-            self.timer = timer\r
-        if warp is not None:\r
-            self.warp = warp\r
-        if calibration_runs is not None:\r
-            self.calibration_runs = calibration_runs\r
-\r
-        # Init vars\r
-        self.tests = {}\r
-        if _debug:\r
-            print 'Getting machine details...'\r
-        self.machine_details = get_machine_details()\r
-\r
-        # Make .version an instance attribute to have it saved in the\r
-        # Benchmark pickle\r
-        self.version = self.version\r
-\r
-    def get_timer(self):\r
-\r
-        """ Return the timer function to use for the test.\r
-\r
-        """\r
-        return get_timer(self.timer)\r
-\r
-    def compatible(self, other):\r
-\r
-        """ Return 1/0 depending on whether the benchmark is\r
-            compatible with the other Benchmark instance or not.\r
-\r
-        """\r
-        if self.version != other.version:\r
-            return 0\r
-        if (self.machine_details == other.machine_details and\r
-            self.timer != other.timer):\r
-            return 0\r
-        if (self.calibration_runs == 0 and\r
-            other.calibration_runs != 0):\r
-            return 0\r
-        if (self.calibration_runs != 0 and\r
-            other.calibration_runs == 0):\r
-            return 0\r
-        return 1\r
-\r
-    def load_tests(self, setupmod, limitnames=None):\r
-\r
-        # Add tests\r
-        if self.verbose:\r
-            print 'Searching for tests ...'\r
-            print '--------------------------------------'\r
-        for testclass in setupmod.__dict__.values():\r
-            if not hasattr(testclass, 'is_a_test'):\r
-                continue\r
-            name = testclass.__name__\r
-            if  name == 'Test':\r
-                continue\r
-            if (limitnames is not None and\r
-                limitnames.search(name) is None):\r
-                continue\r
-            self.tests[name] = testclass(\r
-                warp=self.warp,\r
-                calibration_runs=self.calibration_runs,\r
-                timer=self.timer)\r
-        l = self.tests.keys()\r
-        l.sort()\r
-        if self.verbose:\r
-            for name in l:\r
-                print '  %s' % name\r
-            print '--------------------------------------'\r
-            print '  %i tests found' % len(l)\r
-            print\r
-\r
-    def calibrate(self):\r
-\r
-        print 'Calibrating tests. Please wait...',\r
-        sys.stdout.flush()\r
-        if self.verbose:\r
-            print\r
-            print\r
-            print 'Test                              min      max'\r
-            print '-' * LINE\r
-        tests = self.tests.items()\r
-        tests.sort()\r
-        for i in range(len(tests)):\r
-            name, test = tests[i]\r
-            test.calibrate_test()\r
-            if self.verbose:\r
-                print '%30s:  %6.3fms  %6.3fms' % \\r
-                      (name,\r
-                       min(test.overhead_times) * MILLI_SECONDS,\r
-                       max(test.overhead_times) * MILLI_SECONDS)\r
-        if self.verbose:\r
-            print\r
-            print 'Done with the calibration.'\r
-        else:\r
-            print 'done.'\r
-        print\r
-\r
-    def run(self):\r
-\r
-        tests = self.tests.items()\r
-        tests.sort()\r
-        timer = self.get_timer()\r
-        print 'Running %i round(s) of the suite at warp factor %i:' % \\r
-              (self.rounds, self.warp)\r
-        print\r
-        self.roundtimes = []\r
-        for i in range(self.rounds):\r
-            if self.verbose:\r
-                print ' Round %-25i  effective   absolute  overhead' % (i+1)\r
-            total_eff_time = 0.0\r
-            for j in range(len(tests)):\r
-                name, test = tests[j]\r
-                if self.verbose:\r
-                    print '%30s:' % name,\r
-                test.run()\r
-                (eff_time, abs_time, min_overhead) = test.last_timing\r
-                total_eff_time = total_eff_time + eff_time\r
-                if self.verbose:\r
-                    print '    %5.0fms    %5.0fms %7.3fms' % \\r
-                          (eff_time * MILLI_SECONDS,\r
-                           abs_time * MILLI_SECONDS,\r
-                           min_overhead * MILLI_SECONDS)\r
-            self.roundtimes.append(total_eff_time)\r
-            if self.verbose:\r
-                print ('                   '\r
-                       '               ------------------------------')\r
-                print ('                   '\r
-                       '     Totals:    %6.0fms' %\r
-                       (total_eff_time * MILLI_SECONDS))\r
-                print\r
-            else:\r
-                print '* Round %i done in %.3f seconds.' % (i+1,\r
-                                                            total_eff_time)\r
-        print\r
-\r
-    def stat(self):\r
-\r
-        """ Return benchmark run statistics as tuple:\r
-\r
-            (minimum round time,\r
-             average round time,\r
-             maximum round time)\r
-\r
-            XXX Currently not used, since the benchmark does test\r
-                statistics across all rounds.\r
-\r
-        """\r
-        runs = len(self.roundtimes)\r
-        if runs == 0:\r
-            return 0.0, 0.0\r
-        min_time = min(self.roundtimes)\r
-        total_time = reduce(operator.add, self.roundtimes, 0.0)\r
-        avg_time = total_time / float(runs)\r
-        max_time = max(self.roundtimes)\r
-        return (min_time, avg_time, max_time)\r
-\r
-    def print_header(self, title='Benchmark'):\r
-\r
-        print '-' * LINE\r
-        print '%s: %s' % (title, self.name)\r
-        print '-' * LINE\r
-        print\r
-        print '    Rounds: %s' % self.rounds\r
-        print '    Warp:   %s' % self.warp\r
-        print '    Timer:  %s' % self.timer\r
-        print\r
-        if self.machine_details:\r
-            print_machine_details(self.machine_details, indent='    ')\r
-            print\r
-\r
-    def print_benchmark(self, hidenoise=0, limitnames=None):\r
-\r
-        print ('Test                          '\r
-               '   minimum  average  operation  overhead')\r
-        print '-' * LINE\r
-        tests = self.tests.items()\r
-        tests.sort()\r
-        total_min_time = 0.0\r
-        total_avg_time = 0.0\r
-        for name, test in tests:\r
-            if (limitnames is not None and\r
-                limitnames.search(name) is None):\r
-                continue\r
-            (min_time,\r
-             avg_time,\r
-             total_time,\r
-             op_avg,\r
-             min_overhead) = test.stat()\r
-            total_min_time = total_min_time + min_time\r
-            total_avg_time = total_avg_time + avg_time\r
-            print '%30s:  %5.0fms  %5.0fms  %6.2fus  %7.3fms' % \\r
-                  (name,\r
-                   min_time * MILLI_SECONDS,\r
-                   avg_time * MILLI_SECONDS,\r
-                   op_avg * MICRO_SECONDS,\r
-                   min_overhead *MILLI_SECONDS)\r
-        print '-' * LINE\r
-        print ('Totals:                        '\r
-               ' %6.0fms %6.0fms' %\r
-               (total_min_time * MILLI_SECONDS,\r
-                total_avg_time * MILLI_SECONDS,\r
-                ))\r
-        print\r
-\r
-    def print_comparison(self, compare_to, hidenoise=0, limitnames=None):\r
-\r
-        # Check benchmark versions\r
-        if compare_to.version != self.version:\r
-            print ('* Benchmark versions differ: '\r
-                   'cannot compare this benchmark to "%s" !' %\r
-                   compare_to.name)\r
-            print\r
-            self.print_benchmark(hidenoise=hidenoise,\r
-                                 limitnames=limitnames)\r
-            return\r
-\r
-        # Print header\r
-        compare_to.print_header('Comparing with')\r
-        print ('Test                          '\r
-               '   minimum run-time        average  run-time')\r
-        print ('                              '\r
-               '   this    other   diff    this    other   diff')\r
-        print '-' * LINE\r
-\r
-        # Print test comparisons\r
-        tests = self.tests.items()\r
-        tests.sort()\r
-        total_min_time = other_total_min_time = 0.0\r
-        total_avg_time = other_total_avg_time = 0.0\r
-        benchmarks_compatible = self.compatible(compare_to)\r
-        tests_compatible = 1\r
-        for name, test in tests:\r
-            if (limitnames is not None and\r
-                limitnames.search(name) is None):\r
-                continue\r
-            (min_time,\r
-             avg_time,\r
-             total_time,\r
-             op_avg,\r
-             min_overhead) = test.stat()\r
-            total_min_time = total_min_time + min_time\r
-            total_avg_time = total_avg_time + avg_time\r
-            try:\r
-                other = compare_to.tests[name]\r
-            except KeyError:\r
-                other = None\r
-            if other is None:\r
-                # Other benchmark doesn't include the given test\r
-                min_diff, avg_diff = 'n/a', 'n/a'\r
-                other_min_time = 0.0\r
-                other_avg_time = 0.0\r
-                tests_compatible = 0\r
-            else:\r
-                (other_min_time,\r
-                 other_avg_time,\r
-                 other_total_time,\r
-                 other_op_avg,\r
-                 other_min_overhead) = other.stat()\r
-                other_total_min_time = other_total_min_time + other_min_time\r
-                other_total_avg_time = other_total_avg_time + other_avg_time\r
-                if (benchmarks_compatible and\r
-                    test.compatible(other)):\r
-                    # Both benchmark and tests are comparable\r
-                    min_diff = ((min_time * self.warp) /\r
-                                (other_min_time * other.warp) - 1.0)\r
-                    avg_diff = ((avg_time * self.warp) /\r
-                                (other_avg_time * other.warp) - 1.0)\r
-                    if hidenoise and abs(min_diff) < 10.0:\r
-                        min_diff = ''\r
-                    else:\r
-                        min_diff = '%+5.1f%%' % (min_diff * PERCENT)\r
-                    if hidenoise and abs(avg_diff) < 10.0:\r
-                        avg_diff = ''\r
-                    else:\r
-                        avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)\r
-                else:\r
-                    # Benchmark or tests are not comparable\r
-                    min_diff, avg_diff = 'n/a', 'n/a'\r
-                    tests_compatible = 0\r
-            print '%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \\r
-                  (name,\r
-                   min_time * MILLI_SECONDS,\r
-                   other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,\r
-                   min_diff,\r
-                   avg_time * MILLI_SECONDS,\r
-                   other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,\r
-                   avg_diff)\r
-        print '-' * LINE\r
-\r
-        # Summarise test results\r
-        if not benchmarks_compatible or not tests_compatible:\r
-            min_diff, avg_diff = 'n/a', 'n/a'\r
-        else:\r
-            if other_total_min_time != 0.0:\r
-                min_diff = '%+5.1f%%' % (\r
-                    ((total_min_time * self.warp) /\r
-                     (other_total_min_time * compare_to.warp) - 1.0) * PERCENT)\r
-            else:\r
-                min_diff = 'n/a'\r
-            if other_total_avg_time != 0.0:\r
-                avg_diff = '%+5.1f%%' % (\r
-                    ((total_avg_time * self.warp) /\r
-                     (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)\r
-            else:\r
-                avg_diff = 'n/a'\r
-        print ('Totals:                       '\r
-               '  %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %\r
-               (total_min_time * MILLI_SECONDS,\r
-                (other_total_min_time * compare_to.warp/self.warp\r
-                 * MILLI_SECONDS),\r
-                min_diff,\r
-                total_avg_time * MILLI_SECONDS,\r
-                (other_total_avg_time * compare_to.warp/self.warp\r
-                 * MILLI_SECONDS),\r
-                avg_diff\r
-               ))\r
-        print\r
-        print '(this=%s, other=%s)' % (self.name,\r
-                                       compare_to.name)\r
-        print\r
-\r
-class PyBenchCmdline(Application):\r
-\r
-    header = ("PYBENCH - a benchmark test suite for Python "\r
-              "interpreters/compilers.")\r
-\r
-    version = __version__\r
-\r
-    debug = _debug\r
-\r
-    options = [ArgumentOption('-n',\r
-                              'number of rounds',\r
-                              Setup.Number_of_rounds),\r
-               ArgumentOption('-f',\r
-                              'save benchmark to file arg',\r
-                              ''),\r
-               ArgumentOption('-c',\r
-                              'compare benchmark with the one in file arg',\r
-                              ''),\r
-               ArgumentOption('-s',\r
-                              'show benchmark in file arg, then exit',\r
-                              ''),\r
-               ArgumentOption('-w',\r
-                              'set warp factor to arg',\r
-                              Setup.Warp_factor),\r
-               ArgumentOption('-t',\r
-                              'run only tests with names matching arg',\r
-                              ''),\r
-               ArgumentOption('-C',\r
-                              'set the number of calibration runs to arg',\r
-                              CALIBRATION_RUNS),\r
-               SwitchOption('-d',\r
-                            'hide noise in comparisons',\r
-                            0),\r
-               SwitchOption('-v',\r
-                            'verbose output (not recommended)',\r
-                            0),\r
-               SwitchOption('--with-gc',\r
-                            'enable garbage collection',\r
-                            0),\r
-               SwitchOption('--with-syscheck',\r
-                            'use default sys check interval',\r
-                            0),\r
-               ArgumentOption('--timer',\r
-                            'use given timer',\r
-                            TIMER_PLATFORM_DEFAULT),\r
-               ]\r
-\r
-    about = """\\r
-The normal operation is to run the suite and display the\r
-results. Use -f to save them for later reuse or comparisons.\r
-\r
-Available timers:\r
-\r
-   time.time\r
-   time.clock\r
-   systimes.processtime\r
-\r
-Examples:\r
-\r
-python2.1 pybench.py -f p21.pybench\r
-python2.5 pybench.py -f p25.pybench\r
-python pybench.py -s p25.pybench -c p21.pybench\r
-"""\r
-    copyright = __copyright__\r
-\r
-    def main(self):\r
-\r
-        rounds = self.values['-n']\r
-        reportfile = self.values['-f']\r
-        show_bench = self.values['-s']\r
-        compare_to = self.values['-c']\r
-        hidenoise = self.values['-d']\r
-        warp = int(self.values['-w'])\r
-        withgc = self.values['--with-gc']\r
-        limitnames = self.values['-t']\r
-        if limitnames:\r
-            if _debug:\r
-                print '* limiting test names to one with substring "%s"' % \\r
-                      limitnames\r
-            limitnames = re.compile(limitnames, re.I)\r
-        else:\r
-            limitnames = None\r
-        verbose = self.verbose\r
-        withsyscheck = self.values['--with-syscheck']\r
-        calibration_runs = self.values['-C']\r
-        timer = self.values['--timer']\r
-\r
-        print '-' * LINE\r
-        print 'PYBENCH %s' % __version__\r
-        print '-' * LINE\r
-        print '* using %s %s' % (\r
-            getattr(platform, 'python_implementation', lambda:'Python')(),\r
-            string.join(string.split(sys.version), ' '))\r
-\r
-        # Switch off garbage collection\r
-        if not withgc:\r
-            try:\r
-                import gc\r
-            except ImportError:\r
-                print '* Python version doesn\'t support garbage collection'\r
-            else:\r
-                try:\r
-                    gc.disable()\r
-                except NotImplementedError:\r
-                    print '* Python version doesn\'t support gc.disable'\r
-                else:\r
-                    print '* disabled garbage collection'\r
-\r
-        # "Disable" sys check interval\r
-        if not withsyscheck:\r
-            # Too bad the check interval uses an int instead of a long...\r
-            value = 2147483647\r
-            try:\r
-                sys.setcheckinterval(value)\r
-            except (AttributeError, NotImplementedError):\r
-                print '* Python version doesn\'t support sys.setcheckinterval'\r
-            else:\r
-                print '* system check interval set to maximum: %s' % value\r
-\r
-        if timer == TIMER_SYSTIMES_PROCESSTIME:\r
-            import systimes\r
-            print '* using timer: systimes.processtime (%s)' % \\r
-                  systimes.SYSTIMES_IMPLEMENTATION\r
-        else:\r
-            print '* using timer: %s' % timer\r
-\r
-        print\r
-\r
-        if compare_to:\r
-            try:\r
-                f = open(compare_to,'rb')\r
-                bench = pickle.load(f)\r
-                bench.name = compare_to\r
-                f.close()\r
-                compare_to = bench\r
-            except IOError, reason:\r
-                print '* Error opening/reading file %s: %s' % (\r
-                    repr(compare_to),\r
-                    reason)\r
-                compare_to = None\r
-\r
-        if show_bench:\r
-            try:\r
-                f = open(show_bench,'rb')\r
-                bench = pickle.load(f)\r
-                bench.name = show_bench\r
-                f.close()\r
-                bench.print_header()\r
-                if compare_to:\r
-                    bench.print_comparison(compare_to,\r
-                                           hidenoise=hidenoise,\r
-                                           limitnames=limitnames)\r
-                else:\r
-                    bench.print_benchmark(hidenoise=hidenoise,\r
-                                          limitnames=limitnames)\r
-            except IOError, reason:\r
-                print '* Error opening/reading file %s: %s' % (\r
-                    repr(show_bench),\r
-                    reason)\r
-                print\r
-            return\r
-\r
-        if reportfile:\r
-            print 'Creating benchmark: %s (rounds=%i, warp=%i)' % \\r
-                  (reportfile, rounds, warp)\r
-            print\r
-\r
-        # Create benchmark object\r
-        bench = Benchmark(reportfile,\r
-                          verbose=verbose,\r
-                          timer=timer,\r
-                          warp=warp,\r
-                          calibration_runs=calibration_runs)\r
-        bench.rounds = rounds\r
-        bench.load_tests(Setup, limitnames=limitnames)\r
-        try:\r
-            bench.calibrate()\r
-            bench.run()\r
-        except KeyboardInterrupt:\r
-            print\r
-            print '*** KeyboardInterrupt -- Aborting'\r
-            print\r
-            return\r
-        bench.print_header()\r
-        if compare_to:\r
-            bench.print_comparison(compare_to,\r
-                                   hidenoise=hidenoise,\r
-                                   limitnames=limitnames)\r
-        else:\r
-            bench.print_benchmark(hidenoise=hidenoise,\r
-                                  limitnames=limitnames)\r
-\r
-        # Ring bell\r
-        sys.stderr.write('\007')\r
-\r
-        if reportfile:\r
-            try:\r
-                f = open(reportfile,'wb')\r
-                bench.name = reportfile\r
-                pickle.dump(bench,f)\r
-                f.close()\r
-            except IOError, reason:\r
-                print '* Error opening/writing reportfile'\r
-            except IOError, reason:\r
-                print '* Error opening/writing reportfile %s: %s' % (\r
-                    reportfile,\r
-                    reason)\r
-                print\r
-\r
-if __name__ == '__main__':\r
-    PyBenchCmdline()\r