]> git.proxmox.com Git - mirror_edk2.git/blame - AppPkg/Applications/Python/Python-2.7.2/Lib/email/header.py
EmbeddedPkg: Extend NvVarStoreFormattedLib LIBRARY_CLASS
[mirror_edk2.git] / AppPkg / Applications / Python / Python-2.7.2 / Lib / email / header.py
CommitLineData
4710c53d 1# Copyright (C) 2002-2006 Python Software Foundation\r
2# Author: Ben Gertzfield, Barry Warsaw\r
3# Contact: email-sig@python.org\r
4\r
5"""Header encoding and decoding functionality."""\r
6\r
7__all__ = [\r
8 'Header',\r
9 'decode_header',\r
10 'make_header',\r
11 ]\r
12\r
13import re\r
14import binascii\r
15\r
16import email.quoprimime\r
17import email.base64mime\r
18\r
19from email.errors import HeaderParseError\r
20from email.charset import Charset\r
21\r
22NL = '\n'\r
23SPACE = ' '\r
24USPACE = u' '\r
25SPACE8 = ' ' * 8\r
26UEMPTYSTRING = u''\r
27\r
28MAXLINELEN = 76\r
29\r
30USASCII = Charset('us-ascii')\r
31UTF8 = Charset('utf-8')\r
32\r
33# Match encoded-word strings in the form =?charset?q?Hello_World?=\r
34ecre = re.compile(r'''\r
35 =\? # literal =?\r
36 (?P<charset>[^?]*?) # non-greedy up to the next ? is the charset\r
37 \? # literal ?\r
38 (?P<encoding>[qb]) # either a "q" or a "b", case insensitive\r
39 \? # literal ?\r
40 (?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string\r
41 \?= # literal ?=\r
42 (?=[ \t]|$) # whitespace or the end of the string\r
43 ''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)\r
44\r
45# Field name regexp, including trailing colon, but not separating whitespace,\r
46# according to RFC 2822. Character range is from tilde to exclamation mark.\r
47# For use with .match()\r
48fcre = re.compile(r'[\041-\176]+:$')\r
49\r
50# Find a header embedded in a putative header value. Used to check for\r
51# header injection attack.\r
52_embeded_header = re.compile(r'\n[^ \t]+:')\r
53\r
54\r
55\f\r
56# Helpers\r
57_max_append = email.quoprimime._max_append\r
58\r
59\r
60\f\r
61def decode_header(header):\r
62 """Decode a message header value without converting charset.\r
63\r
64 Returns a list of (decoded_string, charset) pairs containing each of the\r
65 decoded parts of the header. Charset is None for non-encoded parts of the\r
66 header, otherwise a lower-case string containing the name of the character\r
67 set specified in the encoded string.\r
68\r
69 An email.errors.HeaderParseError may be raised when certain decoding error\r
70 occurs (e.g. a base64 decoding exception).\r
71 """\r
72 # If no encoding, just return the header\r
73 header = str(header)\r
74 if not ecre.search(header):\r
75 return [(header, None)]\r
76 decoded = []\r
77 dec = ''\r
78 for line in header.splitlines():\r
79 # This line might not have an encoding in it\r
80 if not ecre.search(line):\r
81 decoded.append((line, None))\r
82 continue\r
83 parts = ecre.split(line)\r
84 while parts:\r
85 unenc = parts.pop(0).strip()\r
86 if unenc:\r
87 # Should we continue a long line?\r
88 if decoded and decoded[-1][1] is None:\r
89 decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)\r
90 else:\r
91 decoded.append((unenc, None))\r
92 if parts:\r
93 charset, encoding = [s.lower() for s in parts[0:2]]\r
94 encoded = parts[2]\r
95 dec = None\r
96 if encoding == 'q':\r
97 dec = email.quoprimime.header_decode(encoded)\r
98 elif encoding == 'b':\r
99 paderr = len(encoded) % 4 # Postel's law: add missing padding\r
100 if paderr:\r
101 encoded += '==='[:4 - paderr]\r
102 try:\r
103 dec = email.base64mime.decode(encoded)\r
104 except binascii.Error:\r
105 # Turn this into a higher level exception. BAW: Right\r
106 # now we throw the lower level exception away but\r
107 # when/if we get exception chaining, we'll preserve it.\r
108 raise HeaderParseError\r
109 if dec is None:\r
110 dec = encoded\r
111\r
112 if decoded and decoded[-1][1] == charset:\r
113 decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])\r
114 else:\r
115 decoded.append((dec, charset))\r
116 del parts[0:3]\r
117 return decoded\r
118\r
119\r
120\f\r
121def make_header(decoded_seq, maxlinelen=None, header_name=None,\r
122 continuation_ws=' '):\r
123 """Create a Header from a sequence of pairs as returned by decode_header()\r
124\r
125 decode_header() takes a header value string and returns a sequence of\r
126 pairs of the format (decoded_string, charset) where charset is the string\r
127 name of the character set.\r
128\r
129 This function takes one of those sequence of pairs and returns a Header\r
130 instance. Optional maxlinelen, header_name, and continuation_ws are as in\r
131 the Header constructor.\r
132 """\r
133 h = Header(maxlinelen=maxlinelen, header_name=header_name,\r
134 continuation_ws=continuation_ws)\r
135 for s, charset in decoded_seq:\r
136 # None means us-ascii but we can simply pass it on to h.append()\r
137 if charset is not None and not isinstance(charset, Charset):\r
138 charset = Charset(charset)\r
139 h.append(s, charset)\r
140 return h\r
141\r
142\r
143\f\r
144class Header:\r
145 def __init__(self, s=None, charset=None,\r
146 maxlinelen=None, header_name=None,\r
147 continuation_ws=' ', errors='strict'):\r
148 """Create a MIME-compliant header that can contain many character sets.\r
149\r
150 Optional s is the initial header value. If None, the initial header\r
151 value is not set. You can later append to the header with .append()\r
152 method calls. s may be a byte string or a Unicode string, but see the\r
153 .append() documentation for semantics.\r
154\r
155 Optional charset serves two purposes: it has the same meaning as the\r
156 charset argument to the .append() method. It also sets the default\r
157 character set for all subsequent .append() calls that omit the charset\r
158 argument. If charset is not provided in the constructor, the us-ascii\r
159 charset is used both as s's initial charset and as the default for\r
160 subsequent .append() calls.\r
161\r
162 The maximum line length can be specified explicit via maxlinelen. For\r
163 splitting the first line to a shorter value (to account for the field\r
164 header which isn't included in s, e.g. `Subject') pass in the name of\r
165 the field in header_name. The default maxlinelen is 76.\r
166\r
167 continuation_ws must be RFC 2822 compliant folding whitespace (usually\r
168 either a space or a hard tab) which will be prepended to continuation\r
169 lines.\r
170\r
171 errors is passed through to the .append() call.\r
172 """\r
173 if charset is None:\r
174 charset = USASCII\r
175 if not isinstance(charset, Charset):\r
176 charset = Charset(charset)\r
177 self._charset = charset\r
178 self._continuation_ws = continuation_ws\r
179 cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))\r
180 # BAW: I believe `chunks' and `maxlinelen' should be non-public.\r
181 self._chunks = []\r
182 if s is not None:\r
183 self.append(s, charset, errors)\r
184 if maxlinelen is None:\r
185 maxlinelen = MAXLINELEN\r
186 if header_name is None:\r
187 # We don't know anything about the field header so the first line\r
188 # is the same length as subsequent lines.\r
189 self._firstlinelen = maxlinelen\r
190 else:\r
191 # The first line should be shorter to take into account the field\r
192 # header. Also subtract off 2 extra for the colon and space.\r
193 self._firstlinelen = maxlinelen - len(header_name) - 2\r
194 # Second and subsequent lines should subtract off the length in\r
195 # columns of the continuation whitespace prefix.\r
196 self._maxlinelen = maxlinelen - cws_expanded_len\r
197\r
198 def __str__(self):\r
199 """A synonym for self.encode()."""\r
200 return self.encode()\r
201\r
202 def __unicode__(self):\r
203 """Helper for the built-in unicode function."""\r
204 uchunks = []\r
205 lastcs = None\r
206 for s, charset in self._chunks:\r
207 # We must preserve spaces between encoded and non-encoded word\r
208 # boundaries, which means for us we need to add a space when we go\r
209 # from a charset to None/us-ascii, or from None/us-ascii to a\r
210 # charset. Only do this for the second and subsequent chunks.\r
211 nextcs = charset\r
212 if uchunks:\r
213 if lastcs not in (None, 'us-ascii'):\r
214 if nextcs in (None, 'us-ascii'):\r
215 uchunks.append(USPACE)\r
216 nextcs = None\r
217 elif nextcs not in (None, 'us-ascii'):\r
218 uchunks.append(USPACE)\r
219 lastcs = nextcs\r
220 uchunks.append(unicode(s, str(charset)))\r
221 return UEMPTYSTRING.join(uchunks)\r
222\r
223 # Rich comparison operators for equality only. BAW: does it make sense to\r
224 # have or explicitly disable <, <=, >, >= operators?\r
225 def __eq__(self, other):\r
226 # other may be a Header or a string. Both are fine so coerce\r
227 # ourselves to a string, swap the args and do another comparison.\r
228 return other == self.encode()\r
229\r
230 def __ne__(self, other):\r
231 return not self == other\r
232\r
233 def append(self, s, charset=None, errors='strict'):\r
234 """Append a string to the MIME header.\r
235\r
236 Optional charset, if given, should be a Charset instance or the name\r
237 of a character set (which will be converted to a Charset instance). A\r
238 value of None (the default) means that the charset given in the\r
239 constructor is used.\r
240\r
241 s may be a byte string or a Unicode string. If it is a byte string\r
242 (i.e. isinstance(s, str) is true), then charset is the encoding of\r
243 that byte string, and a UnicodeError will be raised if the string\r
244 cannot be decoded with that charset. If s is a Unicode string, then\r
245 charset is a hint specifying the character set of the characters in\r
246 the string. In this case, when producing an RFC 2822 compliant header\r
247 using RFC 2047 rules, the Unicode string will be encoded using the\r
248 following charsets in order: us-ascii, the charset hint, utf-8. The\r
249 first character set not to provoke a UnicodeError is used.\r
250\r
251 Optional `errors' is passed as the third argument to any unicode() or\r
252 ustr.encode() call.\r
253 """\r
254 if charset is None:\r
255 charset = self._charset\r
256 elif not isinstance(charset, Charset):\r
257 charset = Charset(charset)\r
258 # If the charset is our faux 8bit charset, leave the string unchanged\r
259 if charset != '8bit':\r
260 # We need to test that the string can be converted to unicode and\r
261 # back to a byte string, given the input and output codecs of the\r
262 # charset.\r
263 if isinstance(s, str):\r
264 # Possibly raise UnicodeError if the byte string can't be\r
265 # converted to a unicode with the input codec of the charset.\r
266 incodec = charset.input_codec or 'us-ascii'\r
267 ustr = unicode(s, incodec, errors)\r
268 # Now make sure that the unicode could be converted back to a\r
269 # byte string with the output codec, which may be different\r
270 # than the iput coded. Still, use the original byte string.\r
271 outcodec = charset.output_codec or 'us-ascii'\r
272 ustr.encode(outcodec, errors)\r
273 elif isinstance(s, unicode):\r
274 # Now we have to be sure the unicode string can be converted\r
275 # to a byte string with a reasonable output codec. We want to\r
276 # use the byte string in the chunk.\r
277 for charset in USASCII, charset, UTF8:\r
278 try:\r
279 outcodec = charset.output_codec or 'us-ascii'\r
280 s = s.encode(outcodec, errors)\r
281 break\r
282 except UnicodeError:\r
283 pass\r
284 else:\r
285 assert False, 'utf-8 conversion failed'\r
286 self._chunks.append((s, charset))\r
287\r
288 def _split(self, s, charset, maxlinelen, splitchars):\r
289 # Split up a header safely for use with encode_chunks.\r
290 splittable = charset.to_splittable(s)\r
291 encoded = charset.from_splittable(splittable, True)\r
292 elen = charset.encoded_header_len(encoded)\r
293 # If the line's encoded length first, just return it\r
294 if elen <= maxlinelen:\r
295 return [(encoded, charset)]\r
296 # If we have undetermined raw 8bit characters sitting in a byte\r
297 # string, we really don't know what the right thing to do is. We\r
298 # can't really split it because it might be multibyte data which we\r
299 # could break if we split it between pairs. The least harm seems to\r
300 # be to not split the header at all, but that means they could go out\r
301 # longer than maxlinelen.\r
302 if charset == '8bit':\r
303 return [(s, charset)]\r
304 # BAW: I'm not sure what the right test here is. What we're trying to\r
305 # do is be faithful to RFC 2822's recommendation that ($2.2.3):\r
306 #\r
307 # "Note: Though structured field bodies are defined in such a way that\r
308 # folding can take place between many of the lexical tokens (and even\r
309 # within some of the lexical tokens), folding SHOULD be limited to\r
310 # placing the CRLF at higher-level syntactic breaks."\r
311 #\r
312 # For now, I can only imagine doing this when the charset is us-ascii,\r
313 # although it's possible that other charsets may also benefit from the\r
314 # higher-level syntactic breaks.\r
315 elif charset == 'us-ascii':\r
316 return self._split_ascii(s, charset, maxlinelen, splitchars)\r
317 # BAW: should we use encoded?\r
318 elif elen == len(s):\r
319 # We can split on _maxlinelen boundaries because we know that the\r
320 # encoding won't change the size of the string\r
321 splitpnt = maxlinelen\r
322 first = charset.from_splittable(splittable[:splitpnt], False)\r
323 last = charset.from_splittable(splittable[splitpnt:], False)\r
324 else:\r
325 # Binary search for split point\r
326 first, last = _binsplit(splittable, charset, maxlinelen)\r
327 # first is of the proper length so just wrap it in the appropriate\r
328 # chrome. last must be recursively split.\r
329 fsplittable = charset.to_splittable(first)\r
330 fencoded = charset.from_splittable(fsplittable, True)\r
331 chunk = [(fencoded, charset)]\r
332 return chunk + self._split(last, charset, self._maxlinelen, splitchars)\r
333\r
334 def _split_ascii(self, s, charset, firstlen, splitchars):\r
335 chunks = _split_ascii(s, firstlen, self._maxlinelen,\r
336 self._continuation_ws, splitchars)\r
337 return zip(chunks, [charset]*len(chunks))\r
338\r
339 def _encode_chunks(self, newchunks, maxlinelen):\r
340 # MIME-encode a header with many different charsets and/or encodings.\r
341 #\r
342 # Given a list of pairs (string, charset), return a MIME-encoded\r
343 # string suitable for use in a header field. Each pair may have\r
344 # different charsets and/or encodings, and the resulting header will\r
345 # accurately reflect each setting.\r
346 #\r
347 # Each encoding can be email.utils.QP (quoted-printable, for\r
348 # ASCII-like character sets like iso-8859-1), email.utils.BASE64\r
349 # (Base64, for non-ASCII like character sets like KOI8-R and\r
350 # iso-2022-jp), or None (no encoding).\r
351 #\r
352 # Each pair will be represented on a separate line; the resulting\r
353 # string will be in the format:\r
354 #\r
355 # =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n\r
356 # =?charset2?b?SvxyZ2VuIEL2aW5n?="\r
357 chunks = []\r
358 for header, charset in newchunks:\r
359 if not header:\r
360 continue\r
361 if charset is None or charset.header_encoding is None:\r
362 s = header\r
363 else:\r
364 s = charset.header_encode(header)\r
365 # Don't add more folding whitespace than necessary\r
366 if chunks and chunks[-1].endswith(' '):\r
367 extra = ''\r
368 else:\r
369 extra = ' '\r
370 _max_append(chunks, s, maxlinelen, extra)\r
371 joiner = NL + self._continuation_ws\r
372 return joiner.join(chunks)\r
373\r
374 def encode(self, splitchars=';, '):\r
375 """Encode a message header into an RFC-compliant format.\r
376\r
377 There are many issues involved in converting a given string for use in\r
378 an email header. Only certain character sets are readable in most\r
379 email clients, and as header strings can only contain a subset of\r
380 7-bit ASCII, care must be taken to properly convert and encode (with\r
381 Base64 or quoted-printable) header strings. In addition, there is a\r
382 75-character length limit on any given encoded header field, so\r
383 line-wrapping must be performed, even with double-byte character sets.\r
384\r
385 This method will do its best to convert the string to the correct\r
386 character set used in email, and encode and line wrap it safely with\r
387 the appropriate scheme for that character set.\r
388\r
389 If the given charset is not known or an error occurs during\r
390 conversion, this function will return the header untouched.\r
391\r
392 Optional splitchars is a string containing characters to split long\r
393 ASCII lines on, in rough support of RFC 2822's `highest level\r
394 syntactic breaks'. This doesn't affect RFC 2047 encoded lines.\r
395 """\r
396 newchunks = []\r
397 maxlinelen = self._firstlinelen\r
398 lastlen = 0\r
399 for s, charset in self._chunks:\r
400 # The first bit of the next chunk should be just long enough to\r
401 # fill the next line. Don't forget the space separating the\r
402 # encoded words.\r
403 targetlen = maxlinelen - lastlen - 1\r
404 if targetlen < charset.encoded_header_len(''):\r
405 # Stick it on the next line\r
406 targetlen = maxlinelen\r
407 newchunks += self._split(s, charset, targetlen, splitchars)\r
408 lastchunk, lastcharset = newchunks[-1]\r
409 lastlen = lastcharset.encoded_header_len(lastchunk)\r
410 value = self._encode_chunks(newchunks, maxlinelen)\r
411 if _embeded_header.search(value):\r
412 raise HeaderParseError("header value appears to contain "\r
413 "an embedded header: {!r}".format(value))\r
414 return value\r
415\r
416\r
417\f\r
418def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):\r
419 lines = []\r
420 maxlen = firstlen\r
421 for line in s.splitlines():\r
422 # Ignore any leading whitespace (i.e. continuation whitespace) already\r
423 # on the line, since we'll be adding our own.\r
424 line = line.lstrip()\r
425 if len(line) < maxlen:\r
426 lines.append(line)\r
427 maxlen = restlen\r
428 continue\r
429 # Attempt to split the line at the highest-level syntactic break\r
430 # possible. Note that we don't have a lot of smarts about field\r
431 # syntax; we just try to break on semi-colons, then commas, then\r
432 # whitespace.\r
433 for ch in splitchars:\r
434 if ch in line:\r
435 break\r
436 else:\r
437 # There's nothing useful to split the line on, not even spaces, so\r
438 # just append this line unchanged\r
439 lines.append(line)\r
440 maxlen = restlen\r
441 continue\r
442 # Now split the line on the character plus trailing whitespace\r
443 cre = re.compile(r'%s\s*' % ch)\r
444 if ch in ';,':\r
445 eol = ch\r
446 else:\r
447 eol = ''\r
448 joiner = eol + ' '\r
449 joinlen = len(joiner)\r
450 wslen = len(continuation_ws.replace('\t', SPACE8))\r
451 this = []\r
452 linelen = 0\r
453 for part in cre.split(line):\r
454 curlen = linelen + max(0, len(this)-1) * joinlen\r
455 partlen = len(part)\r
456 onfirstline = not lines\r
457 # We don't want to split after the field name, if we're on the\r
458 # first line and the field name is present in the header string.\r
459 if ch == ' ' and onfirstline and \\r
460 len(this) == 1 and fcre.match(this[0]):\r
461 this.append(part)\r
462 linelen += partlen\r
463 elif curlen + partlen > maxlen:\r
464 if this:\r
465 lines.append(joiner.join(this) + eol)\r
466 # If this part is longer than maxlen and we aren't already\r
467 # splitting on whitespace, try to recursively split this line\r
468 # on whitespace.\r
469 if partlen > maxlen and ch != ' ':\r
470 subl = _split_ascii(part, maxlen, restlen,\r
471 continuation_ws, ' ')\r
472 lines.extend(subl[:-1])\r
473 this = [subl[-1]]\r
474 else:\r
475 this = [part]\r
476 linelen = wslen + len(this[-1])\r
477 maxlen = restlen\r
478 else:\r
479 this.append(part)\r
480 linelen += partlen\r
481 # Put any left over parts on a line by themselves\r
482 if this:\r
483 lines.append(joiner.join(this))\r
484 return lines\r
485\r
486\r
487\f\r
488def _binsplit(splittable, charset, maxlinelen):\r
489 i = 0\r
490 j = len(splittable)\r
491 while i < j:\r
492 # Invariants:\r
493 # 1. splittable[:k] fits for all k <= i (note that we *assume*,\r
494 # at the start, that splittable[:0] fits).\r
495 # 2. splittable[:k] does not fit for any k > j (at the start,\r
496 # this means we shouldn't look at any k > len(splittable)).\r
497 # 3. We don't know about splittable[:k] for k in i+1..j.\r
498 # 4. We want to set i to the largest k that fits, with i <= k <= j.\r
499 #\r
500 m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j\r
501 chunk = charset.from_splittable(splittable[:m], True)\r
502 chunklen = charset.encoded_header_len(chunk)\r
503 if chunklen <= maxlinelen:\r
504 # m is acceptable, so is a new lower bound.\r
505 i = m\r
506 else:\r
507 # m is not acceptable, so final i must be < m.\r
508 j = m - 1\r
509 # i == j. Invariant #1 implies that splittable[:i] fits, and\r
510 # invariant #2 implies that splittable[:i+1] does not fit, so i\r
511 # is what we're looking for.\r
512 first = charset.from_splittable(splittable[:i], False)\r
513 last = charset.from_splittable(splittable[i:], False)\r
514 return first, last\r