]> git.proxmox.com Git - rustc.git/blob - src/etc/unicode.py
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / etc / unicode.py
1 #!/usr/bin/env python
2 #
3 # Copyright 2011-2013 The Rust Project Developers. See the COPYRIGHT
4 # file at the top-level directory of this distribution and at
5 # http://rust-lang.org/COPYRIGHT.
6 #
7 # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
8 # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
9 # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
10 # option. This file may not be copied, modified, or distributed
11 # except according to those terms.
12
13 # This script uses the following Unicode tables:
14 # - DerivedCoreProperties.txt
15 # - DerivedNormalizationProps.txt
16 # - EastAsianWidth.txt
17 # - auxiliary/GraphemeBreakProperty.txt
18 # - PropList.txt
19 # - ReadMe.txt
20 # - Scripts.txt
21 # - UnicodeData.txt
22 #
23 # Since this should not require frequent updates, we just store this
24 # out-of-line and check the unicode.rs file into git.
25
26 import fileinput, re, os, sys, operator
27
28 preamble = '''// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
29 // file at the top-level directory of this distribution and at
30 // http://rust-lang.org/COPYRIGHT.
31 //
32 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
33 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
34 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
35 // option. This file may not be copied, modified, or distributed
36 // except according to those terms.
37
38 // NOTE: The following code was generated by "src/etc/unicode.py", do not edit directly
39
40 #![allow(missing_docs, non_upper_case_globals, non_snake_case)]
41 '''
42
43 # Mapping taken from Table 12 from:
44 # http://www.unicode.org/reports/tr44/#General_Category_Values
45 expanded_categories = {
46 'Lu': ['LC', 'L'], 'Ll': ['LC', 'L'], 'Lt': ['LC', 'L'],
47 'Lm': ['L'], 'Lo': ['L'],
48 'Mn': ['M'], 'Mc': ['M'], 'Me': ['M'],
49 'Nd': ['N'], 'Nl': ['N'], 'No': ['No'],
50 'Pc': ['P'], 'Pd': ['P'], 'Ps': ['P'], 'Pe': ['P'],
51 'Pi': ['P'], 'Pf': ['P'], 'Po': ['P'],
52 'Sm': ['S'], 'Sc': ['S'], 'Sk': ['S'], 'So': ['S'],
53 'Zs': ['Z'], 'Zl': ['Z'], 'Zp': ['Z'],
54 'Cc': ['C'], 'Cf': ['C'], 'Cs': ['C'], 'Co': ['C'], 'Cn': ['C'],
55 }
56
57 # these are the surrogate codepoints, which are not valid rust characters
58 surrogate_codepoints = (0xd800, 0xdfff)
59
60 def fetch(f):
61 if not os.path.exists(os.path.basename(f)):
62 os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s"
63 % f)
64
65 if not os.path.exists(os.path.basename(f)):
66 sys.stderr.write("cannot load %s" % f)
67 exit(1)
68
69 def is_surrogate(n):
70 return surrogate_codepoints[0] <= n <= surrogate_codepoints[1]
71
72 def load_unicode_data(f):
73 fetch(f)
74 gencats = {}
75 to_lower = {}
76 to_upper = {}
77 to_title = {}
78 combines = {}
79 canon_decomp = {}
80 compat_decomp = {}
81
82 udict = {};
83 range_start = -1;
84 for line in fileinput.input(f):
85 data = line.split(';');
86 if len(data) != 15:
87 continue
88 cp = int(data[0], 16);
89 if is_surrogate(cp):
90 continue
91 if range_start >= 0:
92 for i in xrange(range_start, cp):
93 udict[i] = data;
94 range_start = -1;
95 if data[1].endswith(", First>"):
96 range_start = cp;
97 continue;
98 udict[cp] = data;
99
100 for code in udict:
101 [code_org, name, gencat, combine, bidi,
102 decomp, deci, digit, num, mirror,
103 old, iso, upcase, lowcase, titlecase ] = udict[code];
104
105 # generate char to char direct common and simple conversions
106 # uppercase to lowercase
107 if lowcase != "" and code_org != lowcase:
108 to_lower[code] = (int(lowcase, 16), 0, 0)
109
110 # lowercase to uppercase
111 if upcase != "" and code_org != upcase:
112 to_upper[code] = (int(upcase, 16), 0, 0)
113
114 # title case
115 if titlecase.strip() != "" and code_org != titlecase:
116 to_title[code] = (int(titlecase, 16), 0, 0)
117
118 # store decomposition, if given
119 if decomp != "":
120 if decomp.startswith('<'):
121 seq = []
122 for i in decomp.split()[1:]:
123 seq.append(int(i, 16))
124 compat_decomp[code] = seq
125 else:
126 seq = []
127 for i in decomp.split():
128 seq.append(int(i, 16))
129 canon_decomp[code] = seq
130
131 # place letter in categories as appropriate
132 for cat in [gencat, "Assigned"] + expanded_categories.get(gencat, []):
133 if cat not in gencats:
134 gencats[cat] = []
135 gencats[cat].append(code)
136
137 # record combining class, if any
138 if combine != "0":
139 if combine not in combines:
140 combines[combine] = []
141 combines[combine].append(code)
142
143 # generate Not_Assigned from Assigned
144 gencats["Cn"] = gen_unassigned(gencats["Assigned"])
145 # Assigned is not a real category
146 del(gencats["Assigned"])
147 # Other contains Not_Assigned
148 gencats["C"].extend(gencats["Cn"])
149 gencats = group_cats(gencats)
150 combines = to_combines(group_cats(combines))
151
152 return (canon_decomp, compat_decomp, gencats, combines, to_upper, to_lower, to_title)
153
154 def load_special_casing(f, to_upper, to_lower, to_title):
155 fetch(f)
156 for line in fileinput.input(f):
157 data = line.split('#')[0].split(';')
158 if len(data) == 5:
159 code, lower, title, upper, _comment = data
160 elif len(data) == 6:
161 code, lower, title, upper, condition, _comment = data
162 if condition.strip(): # Only keep unconditional mappins
163 continue
164 else:
165 continue
166 code = code.strip()
167 lower = lower.strip()
168 title = title.strip()
169 upper = upper.strip()
170 key = int(code, 16)
171 for (map_, values) in [(to_lower, lower), (to_upper, upper), (to_title, title)]:
172 if values != code:
173 values = [int(i, 16) for i in values.split()]
174 for _ in range(len(values), 3):
175 values.append(0)
176 assert len(values) == 3
177 map_[key] = values
178
179 def group_cats(cats):
180 cats_out = {}
181 for cat in cats:
182 cats_out[cat] = group_cat(cats[cat])
183 return cats_out
184
185 def group_cat(cat):
186 cat_out = []
187 letters = sorted(set(cat))
188 cur_start = letters.pop(0)
189 cur_end = cur_start
190 for letter in letters:
191 assert letter > cur_end, \
192 "cur_end: %s, letter: %s" % (hex(cur_end), hex(letter))
193 if letter == cur_end + 1:
194 cur_end = letter
195 else:
196 cat_out.append((cur_start, cur_end))
197 cur_start = cur_end = letter
198 cat_out.append((cur_start, cur_end))
199 return cat_out
200
201 def ungroup_cat(cat):
202 cat_out = []
203 for (lo, hi) in cat:
204 while lo <= hi:
205 cat_out.append(lo)
206 lo += 1
207 return cat_out
208
209 def gen_unassigned(assigned):
210 assigned = set(assigned)
211 return ([i for i in range(0, 0xd800) if i not in assigned] +
212 [i for i in range(0xe000, 0x110000) if i not in assigned])
213
214 def to_combines(combs):
215 combs_out = []
216 for comb in combs:
217 for (lo, hi) in combs[comb]:
218 combs_out.append((lo, hi, comb))
219 combs_out.sort(key=lambda comb: comb[0])
220 return combs_out
221
222 def format_table_content(f, content, indent):
223 line = " "*indent
224 first = True
225 for chunk in content.split(","):
226 if len(line) + len(chunk) < 98:
227 if first:
228 line += chunk
229 else:
230 line += ", " + chunk
231 first = False
232 else:
233 f.write(line + ",\n")
234 line = " "*indent + chunk
235 f.write(line)
236
237 def load_properties(f, interestingprops):
238 fetch(f)
239 props = {}
240 re1 = re.compile("^ *([0-9A-F]+) *; *(\w+)")
241 re2 = re.compile("^ *([0-9A-F]+)\.\.([0-9A-F]+) *; *(\w+)")
242
243 for line in fileinput.input(os.path.basename(f)):
244 prop = None
245 d_lo = 0
246 d_hi = 0
247 m = re1.match(line)
248 if m:
249 d_lo = m.group(1)
250 d_hi = m.group(1)
251 prop = m.group(2)
252 else:
253 m = re2.match(line)
254 if m:
255 d_lo = m.group(1)
256 d_hi = m.group(2)
257 prop = m.group(3)
258 else:
259 continue
260 if interestingprops and prop not in interestingprops:
261 continue
262 d_lo = int(d_lo, 16)
263 d_hi = int(d_hi, 16)
264 if prop not in props:
265 props[prop] = []
266 props[prop].append((d_lo, d_hi))
267
268 # optimize if possible
269 for prop in props:
270 props[prop] = group_cat(ungroup_cat(props[prop]))
271
272 return props
273
274 def escape_char(c):
275 return "'\\u{%x}'" % c if c != 0 else "'\\0'"
276
277 def emit_bsearch_range_table(f):
278 f.write("""
279 fn bsearch_range_table(c: char, r: &'static [(char, char)]) -> bool {
280 use core::cmp::Ordering::{Equal, Less, Greater};
281 r.binary_search_by(|&(lo, hi)| {
282 if c < lo {
283 Greater
284 } else if hi < c {
285 Less
286 } else {
287 Equal
288 }
289 })
290 .is_ok()
291 }\n
292 """)
293
294 def emit_table(f, name, t_data, t_type = "&'static [(char, char)]", is_pub=True,
295 pfun=lambda x: "(%s,%s)" % (escape_char(x[0]), escape_char(x[1]))):
296 pub_string = ""
297 if is_pub:
298 pub_string = "pub "
299 f.write(" %sconst %s: %s = &[\n" % (pub_string, name, t_type))
300 data = ""
301 first = True
302 for dat in t_data:
303 if not first:
304 data += ","
305 first = False
306 data += pfun(dat)
307 format_table_content(f, data, 8)
308 f.write("\n ];\n\n")
309
310 def emit_property_module(f, mod, tbl, emit):
311 f.write("pub mod %s {\n" % mod)
312 for cat in sorted(emit):
313 emit_table(f, "%s_table" % cat, tbl[cat])
314 f.write(" pub fn %s(c: char) -> bool {\n" % cat)
315 f.write(" super::bsearch_range_table(c, %s_table)\n" % cat)
316 f.write(" }\n\n")
317 f.write("}\n\n")
318
319 def emit_conversions_module(f, to_upper, to_lower, to_title):
320 f.write("pub mod conversions {")
321 f.write("""
322 use core::option::Option;
323 use core::option::Option::{Some, None};
324
325 pub fn to_lower(c: char) -> [char; 3] {
326 match bsearch_case_table(c, to_lowercase_table) {
327 None => [c, '\\0', '\\0'],
328 Some(index) => to_lowercase_table[index].1,
329 }
330 }
331
332 pub fn to_upper(c: char) -> [char; 3] {
333 match bsearch_case_table(c, to_uppercase_table) {
334 None => [c, '\\0', '\\0'],
335 Some(index) => to_uppercase_table[index].1,
336 }
337 }
338
339 fn bsearch_case_table(c: char, table: &'static [(char, [char; 3])]) -> Option<usize> {
340 table.binary_search_by(|&(key, _)| key.cmp(&c)).ok()
341 }
342
343 """)
344 t_type = "&'static [(char, [char; 3])]"
345 pfun = lambda x: "(%s,[%s,%s,%s])" % (
346 escape_char(x[0]), escape_char(x[1][0]), escape_char(x[1][1]), escape_char(x[1][2]))
347 emit_table(f, "to_lowercase_table",
348 sorted(to_lower.iteritems(), key=operator.itemgetter(0)),
349 is_pub=False, t_type = t_type, pfun=pfun)
350 emit_table(f, "to_uppercase_table",
351 sorted(to_upper.iteritems(), key=operator.itemgetter(0)),
352 is_pub=False, t_type = t_type, pfun=pfun)
353 f.write("}\n\n")
354
355 def emit_norm_module(f, canon, compat, combine, norm_props):
356 canon_keys = canon.keys()
357 canon_keys.sort()
358
359 compat_keys = compat.keys()
360 compat_keys.sort()
361
362 canon_comp = {}
363 comp_exclusions = norm_props["Full_Composition_Exclusion"]
364 for char in canon_keys:
365 if True in map(lambda (lo, hi): lo <= char <= hi, comp_exclusions):
366 continue
367 decomp = canon[char]
368 if len(decomp) == 2:
369 if not canon_comp.has_key(decomp[0]):
370 canon_comp[decomp[0]] = []
371 canon_comp[decomp[0]].append( (decomp[1], char) )
372 canon_comp_keys = canon_comp.keys()
373 canon_comp_keys.sort()
374
375 if __name__ == "__main__":
376 r = "tables.rs"
377 if os.path.exists(r):
378 os.remove(r)
379 with open(r, "w") as rf:
380 # write the file's preamble
381 rf.write(preamble)
382
383 # download and parse all the data
384 fetch("ReadMe.txt")
385 with open("ReadMe.txt") as readme:
386 pattern = "for Version (\d+)\.(\d+)\.(\d+) of the Unicode"
387 unicode_version = re.search(pattern, readme.read()).groups()
388 rf.write("""
389 /// The version of [Unicode](http://www.unicode.org/)
390 /// that the unicode parts of `CharExt` and `UnicodeStrPrelude` traits are based on.
391 pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s);
392 """ % unicode_version)
393 (canon_decomp, compat_decomp, gencats, combines,
394 to_upper, to_lower, to_title) = load_unicode_data("UnicodeData.txt")
395 load_special_casing("SpecialCasing.txt", to_upper, to_lower, to_title)
396 want_derived = ["XID_Start", "XID_Continue", "Alphabetic", "Lowercase", "Uppercase",
397 "Cased", "Case_Ignorable"]
398 derived = load_properties("DerivedCoreProperties.txt", want_derived)
399 scripts = load_properties("Scripts.txt", [])
400 props = load_properties("PropList.txt",
401 ["White_Space", "Join_Control", "Noncharacter_Code_Point", "Pattern_White_Space"])
402 norm_props = load_properties("DerivedNormalizationProps.txt",
403 ["Full_Composition_Exclusion"])
404
405 # bsearch_range_table is used in all the property modules below
406 emit_bsearch_range_table(rf)
407
408 # category tables
409 for (name, cat, pfuns) in ("general_category", gencats, ["N", "Cc"]), \
410 ("derived_property", derived, want_derived), \
411 ("property", props, ["White_Space", "Pattern_White_Space"]):
412 emit_property_module(rf, name, cat, pfuns)
413
414 # normalizations and conversions module
415 emit_norm_module(rf, canon_decomp, compat_decomp, combines, norm_props)
416 emit_conversions_module(rf, to_upper, to_lower, to_title)