]> git.proxmox.com Git - mirror_edk2.git/blobdiff - AppPkg/Applications/Python/Python-2.7.2/Lib/email/test/test_email_codecs_renamed.py
edk2: Remove AppPkg, StdLib, StdLibPrivateInternalFiles
[mirror_edk2.git] / AppPkg / Applications / Python / Python-2.7.2 / Lib / email / test / test_email_codecs_renamed.py
diff --git a/AppPkg/Applications/Python/Python-2.7.2/Lib/email/test/test_email_codecs_renamed.py b/AppPkg/Applications/Python/Python-2.7.2/Lib/email/test/test_email_codecs_renamed.py
deleted file mode 100644 (file)
index be0a996..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (C) 2002-2006 Python Software Foundation\r
-# Contact: email-sig@python.org\r
-# email package unit tests for (optional) Asian codecs\r
-\r
-import unittest\r
-from test.test_support import run_unittest\r
-\r
-from email.test.test_email import TestEmailBase\r
-from email.charset import Charset\r
-from email.header import Header, decode_header\r
-from email.message import Message\r
-\r
-# We're compatible with Python 2.3, but it doesn't have the built-in Asian\r
-# codecs, so we have to skip all these tests.\r
-try:\r
-    unicode('foo', 'euc-jp')\r
-except LookupError:\r
-    raise unittest.SkipTest\r
-\r
-\r
-\f\r
-class TestEmailAsianCodecs(TestEmailBase):\r
-    def test_japanese_codecs(self):\r
-        eq = self.ndiffAssertEqual\r
-        j = Charset("euc-jp")\r
-        g = Charset("iso-8859-1")\r
-        h = Header("Hello World!")\r
-        jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa'\r
-        ghello = 'Gr\xfc\xdf Gott!'\r
-        h.append(jhello, j)\r
-        h.append(ghello, g)\r
-        # BAW: This used to -- and maybe should -- fold the two iso-8859-1\r
-        # chunks into a single encoded word.  However it doesn't violate the\r
-        # standard to have them as two encoded chunks and maybe it's\r
-        # reasonable <wink> for each .append() call to result in a separate\r
-        # encoded word.\r
-        eq(h.encode(), """\\r
-Hello World! =?iso-2022-jp?b?GyRCJU8lbSE8JW8hPCVrJUkhKhsoQg==?=\r
- =?iso-8859-1?q?Gr=FC=DF?= =?iso-8859-1?q?_Gott!?=""")\r
-        eq(decode_header(h.encode()),\r
-           [('Hello World!', None),\r
-            ('\x1b$B%O%m!<%o!<%k%I!*\x1b(B', 'iso-2022-jp'),\r
-            ('Gr\xfc\xdf Gott!', 'iso-8859-1')])\r
-        long = 'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5\xa4\xec\xa4\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2\xf1\xbc\xd4\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9'\r
-        h = Header(long, j, header_name="Subject")\r
-        # test a very long header\r
-        enc = h.encode()\r
-        # TK: splitting point may differ by codec design and/or Header encoding\r
-        eq(enc , """\\r
-=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKGyhC?=\r
- =?iso-2022-jp?b?GyRCMnE8VCROPjVHJyRyQlQkQyRGJCQkXiQ5GyhC?=""")\r
-        # TK: full decode comparison\r
-        eq(h.__unicode__().encode('euc-jp'), long)\r
-\r
-    def test_payload_encoding(self):\r
-        jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa'\r
-        jcode  = 'euc-jp'\r
-        msg = Message()\r
-        msg.set_payload(jhello, jcode)\r
-        ustr = unicode(msg.get_payload(), msg.get_content_charset())\r
-        self.assertEqual(jhello, ustr.encode(jcode))\r
-\r
-\r
-\f\r
-def suite():\r
-    suite = unittest.TestSuite()\r
-    suite.addTest(unittest.makeSuite(TestEmailAsianCodecs))\r
-    return suite\r
-\r
-\r
-def test_main():\r
-    run_unittest(TestEmailAsianCodecs)\r
-\r
-\r
-\f\r
-if __name__ == '__main__':\r
-    unittest.main(defaultTest='suite')\r