]> git.proxmox.com Git - mirror_edk2.git/blobdiff - AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_threading.py
edk2: Remove AppPkg, StdLib, StdLibPrivateInternalFiles
[mirror_edk2.git] / AppPkg / Applications / Python / Python-2.7.2 / Lib / test / test_threading.py
diff --git a/AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_threading.py b/AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_threading.py
deleted file mode 100644 (file)
index 939e11b..0000000
+++ /dev/null
@@ -1,734 +0,0 @@
-# Very rudimentary test of threading module\r
-\r
-import test.test_support\r
-from test.test_support import verbose\r
-import random\r
-import re\r
-import sys\r
-thread = test.test_support.import_module('thread')\r
-threading = test.test_support.import_module('threading')\r
-import time\r
-import unittest\r
-import weakref\r
-import os\r
-import subprocess\r
-\r
-from test import lock_tests\r
-\r
-# A trivial mutable counter.\r
-class Counter(object):\r
-    def __init__(self):\r
-        self.value = 0\r
-    def inc(self):\r
-        self.value += 1\r
-    def dec(self):\r
-        self.value -= 1\r
-    def get(self):\r
-        return self.value\r
-\r
-class TestThread(threading.Thread):\r
-    def __init__(self, name, testcase, sema, mutex, nrunning):\r
-        threading.Thread.__init__(self, name=name)\r
-        self.testcase = testcase\r
-        self.sema = sema\r
-        self.mutex = mutex\r
-        self.nrunning = nrunning\r
-\r
-    def run(self):\r
-        delay = random.random() / 10000.0\r
-        if verbose:\r
-            print 'task %s will run for %.1f usec' % (\r
-                self.name, delay * 1e6)\r
-\r
-        with self.sema:\r
-            with self.mutex:\r
-                self.nrunning.inc()\r
-                if verbose:\r
-                    print self.nrunning.get(), 'tasks are running'\r
-                self.testcase.assertTrue(self.nrunning.get() <= 3)\r
-\r
-            time.sleep(delay)\r
-            if verbose:\r
-                print 'task', self.name, 'done'\r
-\r
-            with self.mutex:\r
-                self.nrunning.dec()\r
-                self.testcase.assertTrue(self.nrunning.get() >= 0)\r
-                if verbose:\r
-                    print '%s is finished. %d tasks are running' % (\r
-                        self.name, self.nrunning.get())\r
-\r
-class BaseTestCase(unittest.TestCase):\r
-    def setUp(self):\r
-        self._threads = test.test_support.threading_setup()\r
-\r
-    def tearDown(self):\r
-        test.test_support.threading_cleanup(*self._threads)\r
-        test.test_support.reap_children()\r
-\r
-\r
-class ThreadTests(BaseTestCase):\r
-\r
-    # Create a bunch of threads, let each do some work, wait until all are\r
-    # done.\r
-    def test_various_ops(self):\r
-        # This takes about n/3 seconds to run (about n/3 clumps of tasks,\r
-        # times about 1 second per clump).\r
-        NUMTASKS = 10\r
-\r
-        # no more than 3 of the 10 can run at once\r
-        sema = threading.BoundedSemaphore(value=3)\r
-        mutex = threading.RLock()\r
-        numrunning = Counter()\r
-\r
-        threads = []\r
-\r
-        for i in range(NUMTASKS):\r
-            t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)\r
-            threads.append(t)\r
-            self.assertEqual(t.ident, None)\r
-            self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))\r
-            t.start()\r
-\r
-        if verbose:\r
-            print 'waiting for all tasks to complete'\r
-        for t in threads:\r
-            t.join(NUMTASKS)\r
-            self.assertTrue(not t.is_alive())\r
-            self.assertNotEqual(t.ident, 0)\r
-            self.assertFalse(t.ident is None)\r
-            self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))\r
-        if verbose:\r
-            print 'all tasks done'\r
-        self.assertEqual(numrunning.get(), 0)\r
-\r
-    def test_ident_of_no_threading_threads(self):\r
-        # The ident still must work for the main thread and dummy threads.\r
-        self.assertFalse(threading.currentThread().ident is None)\r
-        def f():\r
-            ident.append(threading.currentThread().ident)\r
-            done.set()\r
-        done = threading.Event()\r
-        ident = []\r
-        thread.start_new_thread(f, ())\r
-        done.wait()\r
-        self.assertFalse(ident[0] is None)\r
-        # Kill the "immortal" _DummyThread\r
-        del threading._active[ident[0]]\r
-\r
-    # run with a small(ish) thread stack size (256kB)\r
-    def test_various_ops_small_stack(self):\r
-        if verbose:\r
-            print 'with 256kB thread stack size...'\r
-        try:\r
-            threading.stack_size(262144)\r
-        except thread.error:\r
-            if verbose:\r
-                print 'platform does not support changing thread stack size'\r
-            return\r
-        self.test_various_ops()\r
-        threading.stack_size(0)\r
-\r
-    # run with a large thread stack size (1MB)\r
-    def test_various_ops_large_stack(self):\r
-        if verbose:\r
-            print 'with 1MB thread stack size...'\r
-        try:\r
-            threading.stack_size(0x100000)\r
-        except thread.error:\r
-            if verbose:\r
-                print 'platform does not support changing thread stack size'\r
-            return\r
-        self.test_various_ops()\r
-        threading.stack_size(0)\r
-\r
-    def test_foreign_thread(self):\r
-        # Check that a "foreign" thread can use the threading module.\r
-        def f(mutex):\r
-            # Calling current_thread() forces an entry for the foreign\r
-            # thread to get made in the threading._active map.\r
-            threading.current_thread()\r
-            mutex.release()\r
-\r
-        mutex = threading.Lock()\r
-        mutex.acquire()\r
-        tid = thread.start_new_thread(f, (mutex,))\r
-        # Wait for the thread to finish.\r
-        mutex.acquire()\r
-        self.assertIn(tid, threading._active)\r
-        self.assertIsInstance(threading._active[tid], threading._DummyThread)\r
-        del threading._active[tid]\r
-\r
-    # PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)\r
-    # exposed at the Python level.  This test relies on ctypes to get at it.\r
-    def test_PyThreadState_SetAsyncExc(self):\r
-        try:\r
-            import ctypes\r
-        except ImportError:\r
-            if verbose:\r
-                print "test_PyThreadState_SetAsyncExc can't import ctypes"\r
-            return  # can't do anything\r
-\r
-        set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc\r
-\r
-        class AsyncExc(Exception):\r
-            pass\r
-\r
-        exception = ctypes.py_object(AsyncExc)\r
-\r
-        # First check it works when setting the exception from the same thread.\r
-        tid = thread.get_ident()\r
-\r
-        try:\r
-            result = set_async_exc(ctypes.c_long(tid), exception)\r
-            # The exception is async, so we might have to keep the VM busy until\r
-            # it notices.\r
-            while True:\r
-                pass\r
-        except AsyncExc:\r
-            pass\r
-        else:\r
-            # This code is unreachable but it reflects the intent. If we wanted\r
-            # to be smarter the above loop wouldn't be infinite.\r
-            self.fail("AsyncExc not raised")\r
-        try:\r
-            self.assertEqual(result, 1) # one thread state modified\r
-        except UnboundLocalError:\r
-            # The exception was raised too quickly for us to get the result.\r
-            pass\r
-\r
-        # `worker_started` is set by the thread when it's inside a try/except\r
-        # block waiting to catch the asynchronously set AsyncExc exception.\r
-        # `worker_saw_exception` is set by the thread upon catching that\r
-        # exception.\r
-        worker_started = threading.Event()\r
-        worker_saw_exception = threading.Event()\r
-\r
-        class Worker(threading.Thread):\r
-            def run(self):\r
-                self.id = thread.get_ident()\r
-                self.finished = False\r
-\r
-                try:\r
-                    while True:\r
-                        worker_started.set()\r
-                        time.sleep(0.1)\r
-                except AsyncExc:\r
-                    self.finished = True\r
-                    worker_saw_exception.set()\r
-\r
-        t = Worker()\r
-        t.daemon = True # so if this fails, we don't hang Python at shutdown\r
-        t.start()\r
-        if verbose:\r
-            print "    started worker thread"\r
-\r
-        # Try a thread id that doesn't make sense.\r
-        if verbose:\r
-            print "    trying nonsensical thread id"\r
-        result = set_async_exc(ctypes.c_long(-1), exception)\r
-        self.assertEqual(result, 0)  # no thread states modified\r
-\r
-        # Now raise an exception in the worker thread.\r
-        if verbose:\r
-            print "    waiting for worker thread to get started"\r
-        ret = worker_started.wait()\r
-        self.assertTrue(ret)\r
-        if verbose:\r
-            print "    verifying worker hasn't exited"\r
-        self.assertTrue(not t.finished)\r
-        if verbose:\r
-            print "    attempting to raise asynch exception in worker"\r
-        result = set_async_exc(ctypes.c_long(t.id), exception)\r
-        self.assertEqual(result, 1) # one thread state modified\r
-        if verbose:\r
-            print "    waiting for worker to say it caught the exception"\r
-        worker_saw_exception.wait(timeout=10)\r
-        self.assertTrue(t.finished)\r
-        if verbose:\r
-            print "    all OK -- joining worker"\r
-        if t.finished:\r
-            t.join()\r
-        # else the thread is still running, and we have no way to kill it\r
-\r
-    def test_limbo_cleanup(self):\r
-        # Issue 7481: Failure to start thread should cleanup the limbo map.\r
-        def fail_new_thread(*args):\r
-            raise thread.error()\r
-        _start_new_thread = threading._start_new_thread\r
-        threading._start_new_thread = fail_new_thread\r
-        try:\r
-            t = threading.Thread(target=lambda: None)\r
-            self.assertRaises(thread.error, t.start)\r
-            self.assertFalse(\r
-                t in threading._limbo,\r
-                "Failed to cleanup _limbo map on failure of Thread.start().")\r
-        finally:\r
-            threading._start_new_thread = _start_new_thread\r
-\r
-    def test_finalize_runnning_thread(self):\r
-        # Issue 1402: the PyGILState_Ensure / _Release functions may be called\r
-        # very late on python exit: on deallocation of a running thread for\r
-        # example.\r
-        try:\r
-            import ctypes\r
-        except ImportError:\r
-            if verbose:\r
-                print("test_finalize_with_runnning_thread can't import ctypes")\r
-            return  # can't do anything\r
-\r
-        rc = subprocess.call([sys.executable, "-c", """if 1:\r
-            import ctypes, sys, time, thread\r
-\r
-            # This lock is used as a simple event variable.\r
-            ready = thread.allocate_lock()\r
-            ready.acquire()\r
-\r
-            # Module globals are cleared before __del__ is run\r
-            # So we save the functions in class dict\r
-            class C:\r
-                ensure = ctypes.pythonapi.PyGILState_Ensure\r
-                release = ctypes.pythonapi.PyGILState_Release\r
-                def __del__(self):\r
-                    state = self.ensure()\r
-                    self.release(state)\r
-\r
-            def waitingThread():\r
-                x = C()\r
-                ready.release()\r
-                time.sleep(100)\r
-\r
-            thread.start_new_thread(waitingThread, ())\r
-            ready.acquire()  # Be sure the other thread is waiting.\r
-            sys.exit(42)\r
-            """])\r
-        self.assertEqual(rc, 42)\r
-\r
-    def test_finalize_with_trace(self):\r
-        # Issue1733757\r
-        # Avoid a deadlock when sys.settrace steps into threading._shutdown\r
-        p = subprocess.Popen([sys.executable, "-c", """if 1:\r
-            import sys, threading\r
-\r
-            # A deadlock-killer, to prevent the\r
-            # testsuite to hang forever\r
-            def killer():\r
-                import os, time\r
-                time.sleep(2)\r
-                print 'program blocked; aborting'\r
-                os._exit(2)\r
-            t = threading.Thread(target=killer)\r
-            t.daemon = True\r
-            t.start()\r
-\r
-            # This is the trace function\r
-            def func(frame, event, arg):\r
-                threading.current_thread()\r
-                return func\r
-\r
-            sys.settrace(func)\r
-            """],\r
-            stdout=subprocess.PIPE,\r
-            stderr=subprocess.PIPE)\r
-        self.addCleanup(p.stdout.close)\r
-        self.addCleanup(p.stderr.close)\r
-        stdout, stderr = p.communicate()\r
-        rc = p.returncode\r
-        self.assertFalse(rc == 2, "interpreted was blocked")\r
-        self.assertTrue(rc == 0,\r
-                        "Unexpected error: " + repr(stderr))\r
-\r
-    def test_join_nondaemon_on_shutdown(self):\r
-        # Issue 1722344\r
-        # Raising SystemExit skipped threading._shutdown\r
-        p = subprocess.Popen([sys.executable, "-c", """if 1:\r
-                import threading\r
-                from time import sleep\r
-\r
-                def child():\r
-                    sleep(1)\r
-                    # As a non-daemon thread we SHOULD wake up and nothing\r
-                    # should be torn down yet\r
-                    print "Woke up, sleep function is:", sleep\r
-\r
-                threading.Thread(target=child).start()\r
-                raise SystemExit\r
-            """],\r
-            stdout=subprocess.PIPE,\r
-            stderr=subprocess.PIPE)\r
-        self.addCleanup(p.stdout.close)\r
-        self.addCleanup(p.stderr.close)\r
-        stdout, stderr = p.communicate()\r
-        self.assertEqual(stdout.strip(),\r
-            "Woke up, sleep function is: <built-in function sleep>")\r
-        stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()\r
-        self.assertEqual(stderr, "")\r
-\r
-    def test_enumerate_after_join(self):\r
-        # Try hard to trigger #1703448: a thread is still returned in\r
-        # threading.enumerate() after it has been join()ed.\r
-        enum = threading.enumerate\r
-        old_interval = sys.getcheckinterval()\r
-        try:\r
-            for i in xrange(1, 100):\r
-                # Try a couple times at each thread-switching interval\r
-                # to get more interleavings.\r
-                sys.setcheckinterval(i // 5)\r
-                t = threading.Thread(target=lambda: None)\r
-                t.start()\r
-                t.join()\r
-                l = enum()\r
-                self.assertNotIn(t, l,\r
-                    "#1703448 triggered after %d trials: %s" % (i, l))\r
-        finally:\r
-            sys.setcheckinterval(old_interval)\r
-\r
-    def test_no_refcycle_through_target(self):\r
-        class RunSelfFunction(object):\r
-            def __init__(self, should_raise):\r
-                # The links in this refcycle from Thread back to self\r
-                # should be cleaned up when the thread completes.\r
-                self.should_raise = should_raise\r
-                self.thread = threading.Thread(target=self._run,\r
-                                               args=(self,),\r
-                                               kwargs={'yet_another':self})\r
-                self.thread.start()\r
-\r
-            def _run(self, other_ref, yet_another):\r
-                if self.should_raise:\r
-                    raise SystemExit\r
-\r
-        cyclic_object = RunSelfFunction(should_raise=False)\r
-        weak_cyclic_object = weakref.ref(cyclic_object)\r
-        cyclic_object.thread.join()\r
-        del cyclic_object\r
-        self.assertEqual(None, weak_cyclic_object(),\r
-                         msg=('%d references still around' %\r
-                              sys.getrefcount(weak_cyclic_object())))\r
-\r
-        raising_cyclic_object = RunSelfFunction(should_raise=True)\r
-        weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)\r
-        raising_cyclic_object.thread.join()\r
-        del raising_cyclic_object\r
-        self.assertEqual(None, weak_raising_cyclic_object(),\r
-                         msg=('%d references still around' %\r
-                              sys.getrefcount(weak_raising_cyclic_object())))\r
-\r
-\r
-class ThreadJoinOnShutdown(BaseTestCase):\r
-\r
-    def _run_and_join(self, script):\r
-        script = """if 1:\r
-            import sys, os, time, threading\r
-\r
-            # a thread, which waits for the main program to terminate\r
-            def joiningfunc(mainthread):\r
-                mainthread.join()\r
-                print 'end of thread'\r
-        \n""" + script\r
-\r
-        p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)\r
-        rc = p.wait()\r
-        data = p.stdout.read().replace('\r', '')\r
-        p.stdout.close()\r
-        self.assertEqual(data, "end of main\nend of thread\n")\r
-        self.assertFalse(rc == 2, "interpreter was blocked")\r
-        self.assertTrue(rc == 0, "Unexpected error")\r
-\r
-    def test_1_join_on_shutdown(self):\r
-        # The usual case: on exit, wait for a non-daemon thread\r
-        script = """if 1:\r
-            import os\r
-            t = threading.Thread(target=joiningfunc,\r
-                                 args=(threading.current_thread(),))\r
-            t.start()\r
-            time.sleep(0.1)\r
-            print 'end of main'\r
-            """\r
-        self._run_and_join(script)\r
-\r
-\r
-    def test_2_join_in_forked_process(self):\r
-        # Like the test above, but from a forked interpreter\r
-        import os\r
-        if not hasattr(os, 'fork'):\r
-            return\r
-        script = """if 1:\r
-            childpid = os.fork()\r
-            if childpid != 0:\r
-                os.waitpid(childpid, 0)\r
-                sys.exit(0)\r
-\r
-            t = threading.Thread(target=joiningfunc,\r
-                                 args=(threading.current_thread(),))\r
-            t.start()\r
-            print 'end of main'\r
-            """\r
-        self._run_and_join(script)\r
-\r
-    def test_3_join_in_forked_from_thread(self):\r
-        # Like the test above, but fork() was called from a worker thread\r
-        # In the forked process, the main Thread object must be marked as stopped.\r
-        import os\r
-        if not hasattr(os, 'fork'):\r
-            return\r
-        # Skip platforms with known problems forking from a worker thread.\r
-        # See http://bugs.python.org/issue3863.\r
-        if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',\r
-                           'os2emx'):\r
-            print >>sys.stderr, ('Skipping test_3_join_in_forked_from_thread'\r
-                                 ' due to known OS bugs on'), sys.platform\r
-            return\r
-        script = """if 1:\r
-            main_thread = threading.current_thread()\r
-            def worker():\r
-                childpid = os.fork()\r
-                if childpid != 0:\r
-                    os.waitpid(childpid, 0)\r
-                    sys.exit(0)\r
-\r
-                t = threading.Thread(target=joiningfunc,\r
-                                     args=(main_thread,))\r
-                print 'end of main'\r
-                t.start()\r
-                t.join() # Should not block: main_thread is already stopped\r
-\r
-            w = threading.Thread(target=worker)\r
-            w.start()\r
-            """\r
-        self._run_and_join(script)\r
-\r
-    def assertScriptHasOutput(self, script, expected_output):\r
-        p = subprocess.Popen([sys.executable, "-c", script],\r
-                             stdout=subprocess.PIPE)\r
-        rc = p.wait()\r
-        data = p.stdout.read().decode().replace('\r', '')\r
-        self.assertEqual(rc, 0, "Unexpected error")\r
-        self.assertEqual(data, expected_output)\r
-\r
-    @unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")\r
-    def test_4_joining_across_fork_in_worker_thread(self):\r
-        # There used to be a possible deadlock when forking from a child\r
-        # thread.  See http://bugs.python.org/issue6643.\r
-\r
-        # Skip platforms with known problems forking from a worker thread.\r
-        # See http://bugs.python.org/issue3863.\r
-        if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):\r
-            raise unittest.SkipTest('due to known OS bugs on ' + sys.platform)\r
-\r
-        # The script takes the following steps:\r
-        # - The main thread in the parent process starts a new thread and then\r
-        #   tries to join it.\r
-        # - The join operation acquires the Lock inside the thread's _block\r
-        #   Condition.  (See threading.py:Thread.join().)\r
-        # - We stub out the acquire method on the condition to force it to wait\r
-        #   until the child thread forks.  (See LOCK ACQUIRED HERE)\r
-        # - The child thread forks.  (See LOCK HELD and WORKER THREAD FORKS\r
-        #   HERE)\r
-        # - The main thread of the parent process enters Condition.wait(),\r
-        #   which releases the lock on the child thread.\r
-        # - The child process returns.  Without the necessary fix, when the\r
-        #   main thread of the child process (which used to be the child thread\r
-        #   in the parent process) attempts to exit, it will try to acquire the\r
-        #   lock in the Thread._block Condition object and hang, because the\r
-        #   lock was held across the fork.\r
-\r
-        script = """if 1:\r
-            import os, time, threading\r
-\r
-            finish_join = False\r
-            start_fork = False\r
-\r
-            def worker():\r
-                # Wait until this thread's lock is acquired before forking to\r
-                # create the deadlock.\r
-                global finish_join\r
-                while not start_fork:\r
-                    time.sleep(0.01)\r
-                # LOCK HELD: Main thread holds lock across this call.\r
-                childpid = os.fork()\r
-                finish_join = True\r
-                if childpid != 0:\r
-                    # Parent process just waits for child.\r
-                    os.waitpid(childpid, 0)\r
-                # Child process should just return.\r
-\r
-            w = threading.Thread(target=worker)\r
-\r
-            # Stub out the private condition variable's lock acquire method.\r
-            # This acquires the lock and then waits until the child has forked\r
-            # before returning, which will release the lock soon after.  If\r
-            # someone else tries to fix this test case by acquiring this lock\r
-            # before forking instead of resetting it, the test case will\r
-            # deadlock when it shouldn't.\r
-            condition = w._block\r
-            orig_acquire = condition.acquire\r
-            call_count_lock = threading.Lock()\r
-            call_count = 0\r
-            def my_acquire():\r
-                global call_count\r
-                global start_fork\r
-                orig_acquire()  # LOCK ACQUIRED HERE\r
-                start_fork = True\r
-                if call_count == 0:\r
-                    while not finish_join:\r
-                        time.sleep(0.01)  # WORKER THREAD FORKS HERE\r
-                with call_count_lock:\r
-                    call_count += 1\r
-            condition.acquire = my_acquire\r
-\r
-            w.start()\r
-            w.join()\r
-            print('end of main')\r
-            """\r
-        self.assertScriptHasOutput(script, "end of main\n")\r
-\r
-    @unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")\r
-    def test_5_clear_waiter_locks_to_avoid_crash(self):\r
-        # Check that a spawned thread that forks doesn't segfault on certain\r
-        # platforms, namely OS X.  This used to happen if there was a waiter\r
-        # lock in the thread's condition variable's waiters list.  Even though\r
-        # we know the lock will be held across the fork, it is not safe to\r
-        # release locks held across forks on all platforms, so releasing the\r
-        # waiter lock caused a segfault on OS X.  Furthermore, since locks on\r
-        # OS X are (as of this writing) implemented with a mutex + condition\r
-        # variable instead of a semaphore, while we know that the Python-level\r
-        # lock will be acquired, we can't know if the internal mutex will be\r
-        # acquired at the time of the fork.\r
-\r
-        # Skip platforms with known problems forking from a worker thread.\r
-        # See http://bugs.python.org/issue3863.\r
-        if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):\r
-            raise unittest.SkipTest('due to known OS bugs on ' + sys.platform)\r
-        script = """if True:\r
-            import os, time, threading\r
-\r
-            start_fork = False\r
-\r
-            def worker():\r
-                # Wait until the main thread has attempted to join this thread\r
-                # before continuing.\r
-                while not start_fork:\r
-                    time.sleep(0.01)\r
-                childpid = os.fork()\r
-                if childpid != 0:\r
-                    # Parent process just waits for child.\r
-                    (cpid, rc) = os.waitpid(childpid, 0)\r
-                    assert cpid == childpid\r
-                    assert rc == 0\r
-                    print('end of worker thread')\r
-                else:\r
-                    # Child process should just return.\r
-                    pass\r
-\r
-            w = threading.Thread(target=worker)\r
-\r
-            # Stub out the private condition variable's _release_save method.\r
-            # This releases the condition's lock and flips the global that\r
-            # causes the worker to fork.  At this point, the problematic waiter\r
-            # lock has been acquired once by the waiter and has been put onto\r
-            # the waiters list.\r
-            condition = w._block\r
-            orig_release_save = condition._release_save\r
-            def my_release_save():\r
-                global start_fork\r
-                orig_release_save()\r
-                # Waiter lock held here, condition lock released.\r
-                start_fork = True\r
-            condition._release_save = my_release_save\r
-\r
-            w.start()\r
-            w.join()\r
-            print('end of main thread')\r
-            """\r
-        output = "end of worker thread\nend of main thread\n"\r
-        self.assertScriptHasOutput(script, output)\r
-\r
-\r
-class ThreadingExceptionTests(BaseTestCase):\r
-    # A RuntimeError should be raised if Thread.start() is called\r
-    # multiple times.\r
-    def test_start_thread_again(self):\r
-        thread = threading.Thread()\r
-        thread.start()\r
-        self.assertRaises(RuntimeError, thread.start)\r
-\r
-    def test_joining_current_thread(self):\r
-        current_thread = threading.current_thread()\r
-        self.assertRaises(RuntimeError, current_thread.join);\r
-\r
-    def test_joining_inactive_thread(self):\r
-        thread = threading.Thread()\r
-        self.assertRaises(RuntimeError, thread.join)\r
-\r
-    def test_daemonize_active_thread(self):\r
-        thread = threading.Thread()\r
-        thread.start()\r
-        self.assertRaises(RuntimeError, setattr, thread, "daemon", True)\r
-\r
-\r
-class LockTests(lock_tests.LockTests):\r
-    locktype = staticmethod(threading.Lock)\r
-\r
-class RLockTests(lock_tests.RLockTests):\r
-    locktype = staticmethod(threading.RLock)\r
-\r
-class EventTests(lock_tests.EventTests):\r
-    eventtype = staticmethod(threading.Event)\r
-\r
-class ConditionAsRLockTests(lock_tests.RLockTests):\r
-    # An Condition uses an RLock by default and exports its API.\r
-    locktype = staticmethod(threading.Condition)\r
-\r
-class ConditionTests(lock_tests.ConditionTests):\r
-    condtype = staticmethod(threading.Condition)\r
-\r
-class SemaphoreTests(lock_tests.SemaphoreTests):\r
-    semtype = staticmethod(threading.Semaphore)\r
-\r
-class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):\r
-    semtype = staticmethod(threading.BoundedSemaphore)\r
-\r
-    @unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')\r
-    def test_recursion_limit(self):\r
-        # Issue 9670\r
-        # test that excessive recursion within a non-main thread causes\r
-        # an exception rather than crashing the interpreter on platforms\r
-        # like Mac OS X or FreeBSD which have small default stack sizes\r
-        # for threads\r
-        script = """if True:\r
-            import threading\r
-\r
-            def recurse():\r
-                return recurse()\r
-\r
-            def outer():\r
-                try:\r
-                    recurse()\r
-                except RuntimeError:\r
-                    pass\r
-\r
-            w = threading.Thread(target=outer)\r
-            w.start()\r
-            w.join()\r
-            print('end of main thread')\r
-            """\r
-        expected_output = "end of main thread\n"\r
-        p = subprocess.Popen([sys.executable, "-c", script],\r
-                             stdout=subprocess.PIPE)\r
-        stdout, stderr = p.communicate()\r
-        data = stdout.decode().replace('\r', '')\r
-        self.assertEqual(p.returncode, 0, "Unexpected error")\r
-        self.assertEqual(data, expected_output)\r
-\r
-def test_main():\r
-    test.test_support.run_unittest(LockTests, RLockTests, EventTests,\r
-                                   ConditionAsRLockTests, ConditionTests,\r
-                                   SemaphoreTests, BoundedSemaphoreTests,\r
-                                   ThreadTests,\r
-                                   ThreadJoinOnShutdown,\r
-                                   ThreadingExceptionTests,\r
-                                   )\r
-\r
-if __name__ == "__main__":\r
-    test_main()\r