]>
git.proxmox.com Git - rustc.git/blob - src/etc/unicode.py
3 # Copyright 2011-2013 The Rust Project Developers. See the COPYRIGHT
4 # file at the top-level directory of this distribution and at
5 # http://rust-lang.org/COPYRIGHT.
7 # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
8 # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
9 # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
10 # option. This file may not be copied, modified, or distributed
11 # except according to those terms.
13 # This script uses the following Unicode tables:
14 # - DerivedCoreProperties.txt
15 # - DerivedNormalizationProps.txt
16 # - EastAsianWidth.txt
17 # - auxiliary/GraphemeBreakProperty.txt
23 # Since this should not require frequent updates, we just store this
24 # out-of-line and check the unicode.rs file into git.
26 import fileinput
, re
, os
, sys
, operator
28 preamble
= '''// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
29 // file at the top-level directory of this distribution and at
30 // http://rust-lang.org/COPYRIGHT.
32 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
33 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
34 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
35 // option. This file may not be copied, modified, or distributed
36 // except according to those terms.
38 // NOTE: The following code was generated by "src/etc/unicode.py", do not edit directly
40 #![allow(missing_docs, non_upper_case_globals, non_snake_case)]
43 # Mapping taken from Table 12 from:
44 # http://www.unicode.org/reports/tr44/#General_Category_Values
45 expanded_categories
= {
46 'Lu': ['LC', 'L'], 'Ll': ['LC', 'L'], 'Lt': ['LC', 'L'],
47 'Lm': ['L'], 'Lo': ['L'],
48 'Mn': ['M'], 'Mc': ['M'], 'Me': ['M'],
49 'Nd': ['N'], 'Nl': ['N'], 'No': ['No'],
50 'Pc': ['P'], 'Pd': ['P'], 'Ps': ['P'], 'Pe': ['P'],
51 'Pi': ['P'], 'Pf': ['P'], 'Po': ['P'],
52 'Sm': ['S'], 'Sc': ['S'], 'Sk': ['S'], 'So': ['S'],
53 'Zs': ['Z'], 'Zl': ['Z'], 'Zp': ['Z'],
54 'Cc': ['C'], 'Cf': ['C'], 'Cs': ['C'], 'Co': ['C'], 'Cn': ['C'],
57 # these are the surrogate codepoints, which are not valid rust characters
58 surrogate_codepoints
= (0xd800, 0xdfff)
61 if not os
.path
.exists(os
.path
.basename(f
)):
62 os
.system("curl -O http://www.unicode.org/Public/UNIDATA/%s"
65 if not os
.path
.exists(os
.path
.basename(f
)):
66 sys
.stderr
.write("cannot load %s" % f
)
70 return surrogate_codepoints
[0] <= n
<= surrogate_codepoints
[1]
72 def load_unicode_data(f
):
84 for line
in fileinput
.input(f
):
85 data
= line
.split(';');
88 cp
= int(data
[0], 16);
92 for i
in xrange(range_start
, cp
):
95 if data
[1].endswith(", First>"):
101 [code_org
, name
, gencat
, combine
, bidi
,
102 decomp
, deci
, digit
, num
, mirror
,
103 old
, iso
, upcase
, lowcase
, titlecase
] = udict
[code
];
105 # generate char to char direct common and simple conversions
106 # uppercase to lowercase
107 if lowcase
!= "" and code_org
!= lowcase
:
108 to_lower
[code
] = (int(lowcase
, 16), 0, 0)
110 # lowercase to uppercase
111 if upcase
!= "" and code_org
!= upcase
:
112 to_upper
[code
] = (int(upcase
, 16), 0, 0)
115 if titlecase
.strip() != "" and code_org
!= titlecase
:
116 to_title
[code
] = (int(titlecase
, 16), 0, 0)
118 # store decomposition, if given
120 if decomp
.startswith('<'):
122 for i
in decomp
.split()[1:]:
123 seq
.append(int(i
, 16))
124 compat_decomp
[code
] = seq
127 for i
in decomp
.split():
128 seq
.append(int(i
, 16))
129 canon_decomp
[code
] = seq
131 # place letter in categories as appropriate
132 for cat
in [gencat
, "Assigned"] + expanded_categories
.get(gencat
, []):
133 if cat
not in gencats
:
135 gencats
[cat
].append(code
)
137 # record combining class, if any
139 if combine
not in combines
:
140 combines
[combine
] = []
141 combines
[combine
].append(code
)
143 # generate Not_Assigned from Assigned
144 gencats
["Cn"] = gen_unassigned(gencats
["Assigned"])
145 # Assigned is not a real category
146 del(gencats
["Assigned"])
147 # Other contains Not_Assigned
148 gencats
["C"].extend(gencats
["Cn"])
149 gencats
= group_cats(gencats
)
150 combines
= to_combines(group_cats(combines
))
152 return (canon_decomp
, compat_decomp
, gencats
, combines
, to_upper
, to_lower
, to_title
)
154 def load_special_casing(f
, to_upper
, to_lower
, to_title
):
156 for line
in fileinput
.input(f
):
157 data
= line
.split('#')[0].split(';')
159 code
, lower
, title
, upper
, _comment
= data
161 code
, lower
, title
, upper
, condition
, _comment
= data
162 if condition
.strip(): # Only keep unconditional mappins
167 lower
= lower
.strip()
168 title
= title
.strip()
169 upper
= upper
.strip()
171 for (map_
, values
) in [(to_lower
, lower
), (to_upper
, upper
), (to_title
, title
)]:
173 values
= [int(i
, 16) for i
in values
.split()]
174 for _
in range(len(values
), 3):
176 assert len(values
) == 3
179 def group_cats(cats
):
182 cats_out
[cat
] = group_cat(cats
[cat
])
187 letters
= sorted(set(cat
))
188 cur_start
= letters
.pop(0)
190 for letter
in letters
:
191 assert letter
> cur_end
, \
192 "cur_end: %s, letter: %s" % (hex(cur_end
), hex(letter
))
193 if letter
== cur_end
+ 1:
196 cat_out
.append((cur_start
, cur_end
))
197 cur_start
= cur_end
= letter
198 cat_out
.append((cur_start
, cur_end
))
201 def ungroup_cat(cat
):
209 def gen_unassigned(assigned
):
210 assigned
= set(assigned
)
211 return ([i
for i
in range(0, 0xd800) if i
not in assigned
] +
212 [i
for i
in range(0xe000, 0x110000) if i
not in assigned
])
214 def to_combines(combs
):
217 for (lo
, hi
) in combs
[comb
]:
218 combs_out
.append((lo
, hi
, comb
))
219 combs_out
.sort(key
=lambda comb
: comb
[0])
222 def format_table_content(f
, content
, indent
):
225 for chunk
in content
.split(","):
226 if len(line
) + len(chunk
) < 98:
233 f
.write(line
+ ",\n")
234 line
= " "*indent
+ chunk
237 def load_properties(f
, interestingprops
):
240 re1
= re
.compile("^ *([0-9A-F]+) *; *(\w+)")
241 re2
= re
.compile("^ *([0-9A-F]+)\.\.([0-9A-F]+) *; *(\w+)")
243 for line
in fileinput
.input(os
.path
.basename(f
)):
260 if interestingprops
and prop
not in interestingprops
:
264 if prop
not in props
:
266 props
[prop
].append((d_lo
, d_hi
))
268 # optimize if possible
270 props
[prop
] = group_cat(ungroup_cat(props
[prop
]))
274 # load all widths of want_widths, except those in except_cats
275 def load_east_asian_width(want_widths
, except_cats
):
276 f
= "EastAsianWidth.txt"
279 re1
= re
.compile("^([0-9A-F]+);(\w+) +# (\w+)")
280 re2
= re
.compile("^([0-9A-F]+)\.\.([0-9A-F]+);(\w+) +# (\w+)")
282 for line
in fileinput
.input(f
):
302 if cat
in except_cats
or width
not in want_widths
:
306 if width
not in widths
:
308 widths
[width
].append((d_lo
, d_hi
))
312 return "'\\u{%x}'" % c
if c
!= 0 else "'\\0'"
314 def emit_bsearch_range_table(f
):
316 fn bsearch_range_table(c: char, r: &'static [(char, char)]) -> bool {
317 use core::cmp::Ordering::{Equal, Less, Greater};
318 r.binary_search_by(|&(lo, hi)| {
319 if lo <= c && c <= hi {
331 def emit_table(f
, name
, t_data
, t_type
= "&'static [(char, char)]", is_pub
=True,
332 pfun
=lambda x
: "(%s,%s)" % (escape_char(x
[0]), escape_char(x
[1]))):
336 f
.write(" %sconst %s: %s = &[\n" % (pub_string
, name
, t_type
))
344 format_table_content(f
, data
, 8)
347 def emit_property_module(f
, mod
, tbl
, emit
):
348 f
.write("pub mod %s {\n" % mod
)
349 for cat
in sorted(emit
):
350 emit_table(f
, "%s_table" % cat
, tbl
[cat
])
351 f
.write(" pub fn %s(c: char) -> bool {\n" % cat
)
352 f
.write(" super::bsearch_range_table(c, %s_table)\n" % cat
)
356 def emit_conversions_module(f
, to_upper
, to_lower
, to_title
):
357 f
.write("pub mod conversions {")
359 use core::cmp::Ordering::{Equal, Less, Greater};
360 use core::option::Option;
361 use core::option::Option::{Some, None};
362 use core::result::Result::{Ok, Err};
364 pub fn to_lower(c: char) -> [char; 3] {
365 match bsearch_case_table(c, to_lowercase_table) {
366 None => [c, '\\0', '\\0'],
367 Some(index) => to_lowercase_table[index].1
371 pub fn to_upper(c: char) -> [char; 3] {
372 match bsearch_case_table(c, to_uppercase_table) {
373 None => [c, '\\0', '\\0'],
374 Some(index) => to_uppercase_table[index].1
378 fn bsearch_case_table(c: char, table: &'static [(char, [char; 3])]) -> Option<usize> {
379 match table.binary_search_by(|&(key, _)| {
380 if c == key { Equal }
381 else if key < c { Less }
390 t_type
= "&'static [(char, [char; 3])]"
391 pfun
= lambda x
: "(%s,[%s,%s,%s])" % (
392 escape_char(x
[0]), escape_char(x
[1][0]), escape_char(x
[1][1]), escape_char(x
[1][2]))
393 emit_table(f
, "to_lowercase_table",
394 sorted(to_lower
.iteritems(), key
=operator
.itemgetter(0)),
395 is_pub
=False, t_type
= t_type
, pfun
=pfun
)
396 emit_table(f
, "to_uppercase_table",
397 sorted(to_upper
.iteritems(), key
=operator
.itemgetter(0)),
398 is_pub
=False, t_type
= t_type
, pfun
=pfun
)
401 def emit_charwidth_module(f
, width_table
):
402 f
.write("pub mod charwidth {\n")
403 f
.write(" use core::option::Option;\n")
404 f
.write(" use core::option::Option::{Some, None};\n")
405 f
.write(" use core::result::Result::{Ok, Err};\n")
407 fn bsearch_range_value_table(c: char, is_cjk: bool, r: &'static [(char, char, u8, u8)]) -> u8 {
408 use core::cmp::Ordering::{Equal, Less, Greater};
409 match r.binary_search_by(|&(lo, hi, _, _)| {
410 if lo <= c && c <= hi { Equal }
411 else if hi < c { Less }
415 let (_, _, r_ncjk, r_cjk) = r[idx];
416 if is_cjk { r_cjk } else { r_ncjk }
424 pub fn width(c: char, is_cjk: bool) -> Option<usize> {
426 _c @ 0 => Some(0), // null is zero width
427 cu if cu < 0x20 => None, // control sequences have no width
428 cu if cu < 0x7F => Some(1), // ASCII
429 cu if cu < 0xA0 => None, // more control sequences
430 _ => Some(bsearch_range_value_table(c, is_cjk, charwidth_table) as usize)
436 f
.write(" // character width table. Based on Markus Kuhn's free wcwidth() implementation,\n")
437 f
.write(" // http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c\n")
438 emit_table(f
, "charwidth_table", width_table
, "&'static [(char, char, u8, u8)]", is_pub
=False,
439 pfun
=lambda x
: "(%s,%s,%s,%s)" % (escape_char(x
[0]), escape_char(x
[1]), x
[2], x
[3]))
442 def emit_norm_module(f
, canon
, compat
, combine
, norm_props
):
443 canon_keys
= canon
.keys()
446 compat_keys
= compat
.keys()
450 comp_exclusions
= norm_props
["Full_Composition_Exclusion"]
451 for char
in canon_keys
:
452 if True in map(lambda (lo
, hi
): lo
<= char
<= hi
, comp_exclusions
):
456 if not canon_comp
.has_key(decomp
[0]):
457 canon_comp
[decomp
[0]] = []
458 canon_comp
[decomp
[0]].append( (decomp
[1], char
) )
459 canon_comp_keys
= canon_comp
.keys()
460 canon_comp_keys
.sort()
462 def remove_from_wtable(wtable
, val
):
465 if wtable
[0][1] < val
:
466 wtable_out
.append(wtable
.pop(0))
467 elif wtable
[0][0] > val
:
470 (wt_lo
, wt_hi
, width
, width_cjk
) = wtable
.pop(0)
471 if wt_lo
== wt_hi
== val
:
474 wtable_out
.append((wt_lo
+1, wt_hi
, width
, width_cjk
))
476 wtable_out
.append((wt_lo
, wt_hi
-1, width
, width_cjk
))
478 wtable_out
.append((wt_lo
, val
-1, width
, width_cjk
))
479 wtable_out
.append((val
+1, wt_hi
, width
, width_cjk
))
481 wtable_out
.extend(wtable
)
486 def optimize_width_table(wtable
):
488 w_this
= wtable
.pop(0)
490 if w_this
[1] == wtable
[0][0] - 1 and w_this
[2:3] == wtable
[0][2:3]:
491 w_tmp
= wtable
.pop(0)
492 w_this
= (w_this
[0], w_tmp
[1], w_tmp
[2], w_tmp
[3])
494 wtable_out
.append(w_this
)
495 w_this
= wtable
.pop(0)
496 wtable_out
.append(w_this
)
499 if __name__
== "__main__":
501 if os
.path
.exists(r
):
503 with
open(r
, "w") as rf
:
504 # write the file's preamble
507 # download and parse all the data
509 with
open("ReadMe.txt") as readme
:
510 pattern
= "for Version (\d+)\.(\d+)\.(\d+) of the Unicode"
511 unicode_version
= re
.search(pattern
, readme
.read()).groups()
513 /// The version of [Unicode](http://www.unicode.org/)
514 /// that the unicode parts of `CharExt` and `UnicodeStrPrelude` traits are based on.
515 pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s);
516 """ % unicode_version
)
517 (canon_decomp
, compat_decomp
, gencats
, combines
,
518 to_upper
, to_lower
, to_title
) = load_unicode_data("UnicodeData.txt")
519 load_special_casing("SpecialCasing.txt", to_upper
, to_lower
, to_title
)
520 want_derived
= ["XID_Start", "XID_Continue", "Alphabetic", "Lowercase", "Uppercase",
521 "Cased", "Case_Ignorable"]
522 derived
= load_properties("DerivedCoreProperties.txt", want_derived
)
523 scripts
= load_properties("Scripts.txt", [])
524 props
= load_properties("PropList.txt",
525 ["White_Space", "Join_Control", "Noncharacter_Code_Point"])
526 norm_props
= load_properties("DerivedNormalizationProps.txt",
527 ["Full_Composition_Exclusion"])
529 # bsearch_range_table is used in all the property modules below
530 emit_bsearch_range_table(rf
)
533 for (name
, cat
, pfuns
) in ("general_category", gencats
, ["N", "Cc"]), \
534 ("derived_property", derived
, want_derived
), \
535 ("property", props
, ["White_Space"]):
536 emit_property_module(rf
, name
, cat
, pfuns
)
538 # normalizations and conversions module
539 emit_norm_module(rf
, canon_decomp
, compat_decomp
, combines
, norm_props
)
540 emit_conversions_module(rf
, to_upper
, to_lower
, to_title
)