aboutsummaryrefslogtreecommitdiff
path: root/localedata
diff options
context:
space:
mode:
authorThorsten Glaser <tg@mirbsd.de>2017-07-14 14:02:37 +0200
committerMike FABIAN <mfabian@redhat.com>2017-08-17 11:06:08 +0200
commit580be3035d2e0f479c4ac955bf719b0bf936f5cf (patch)
tree9cd75f6c3c3dd06ba0df188550202a38f42a64cd /localedata
parent038d1cafafb3094a9fbebd35f4aa8d0ebae0e55b (diff)
downloadglibc-580be3035d2e0f479c4ac955bf719b0bf936f5cf.zip
glibc-580be3035d2e0f479c4ac955bf719b0bf936f5cf.tar.gz
glibc-580be3035d2e0f479c4ac955bf719b0bf936f5cf.tar.bz2
UnicodeData has precedence over EastAsianWidth
[BZ #19852] [BZ #21750] * unicode-gen/utf8_gen.py: Process EastAsianWidth lines before UnicodeData lines so the latter have precedence; remove hack to group output by EastAsianWidth ranges.
Diffstat (limited to 'localedata')
-rwxr-xr-xlocaledata/unicode-gen/utf8_gen.py26
1 files changed, 9 insertions, 17 deletions
diff --git a/localedata/unicode-gen/utf8_gen.py b/localedata/unicode-gen/utf8_gen.py
index ab03e75..12f01d8 100755
--- a/localedata/unicode-gen/utf8_gen.py
+++ b/localedata/unicode-gen/utf8_gen.py
@@ -221,28 +221,20 @@ def process_width(outfile, ulines, elines):
'''
width_dict = {}
- for line in ulines:
- fields = line.split(";")
- if fields[4] == "NSM" or fields[2] == "Cf":
- width_dict[int(fields[0], 16)] = unicode_utils.ucs_symbol(
- int(fields[0], 16)) + '\t0'
-
for line in elines:
- # If an entry in EastAsianWidth.txt is found, it overrides entries in
- # UnicodeData.txt:
fields = line.split(";")
if not '..' in fields[0]:
- width_dict[int(fields[0], 16)] = unicode_utils.ucs_symbol(
- int(fields[0], 16)) + '\t2'
+ code_points = (fields[0], fields[0])
else:
code_points = fields[0].split("..")
- for key in range(int(code_points[0], 16),
- int(code_points[1], 16)+1):
- if key in width_dict:
- del width_dict[key]
- width_dict[int(code_points[0], 16)] = '{:s}...{:s}\t2'.format(
- unicode_utils.ucs_symbol(int(code_points[0], 16)),
- unicode_utils.ucs_symbol(int(code_points[1], 16)))
+ for key in range(int(code_points[0], 16),
+ int(code_points[1], 16)+1):
+ width_dict[key] = unicode_utils.ucs_symbol(key) + '\t2'
+ for line in ulines:
+ fields = line.split(";")
+ if fields[4] == "NSM" or fields[2] == "Cf":
+ width_dict[int(fields[0], 16)] = unicode_utils.ucs_symbol(
+ int(fields[0], 16)) + '\t0'
for key in sorted(width_dict):
outfile.write(width_dict[key]+'\n')