3 # This file is part of DisOrder.
4 # Copyright (C) 2007 Richard Kettlewell
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 # Generate Unicode support tables
22 # This script will download data from unicode.org if the required files
23 # aren't in the current directory.
25 # After modifying this script you should run:
26 # make -C lib rebuild-unicode check
28 # Things not supported yet:
29 # - SpecialCasing.txt data for case mapping
30 # - Title case offsets
31 # - Some kind of hinting for composition
34 # NB the generated files DO NOT offer a stable ABI and so are not immediately
35 # suitable for use in a general-purpose library. Things that would need to
37 # - Hide unidata.h from applications; it will never be ABI- or even API-stable.
38 # - Stablized General_Category values
39 # - Extend the unicode.h API to general utility rather than just what
47 print @_ or die "$!\n";
54 return join("-", map($d->{$_}, sort keys %$d));
59 # This can be varied to trade off the number of subtables against their size.
60 # 16 gave the smallest results last time I checked (on a Mac with a 32-bit
68 # Where to break the table. There is a huge empty section of the Unicode
69 # code space and we deal with this by simply leaving it out of the table.
70 # This complicates the lookup function a little but should not affect
71 # performance in the cases we care about.
72 our $break_start = 0x30000;
73 our $break_end = 0xE0000;
75 # Similarly we simply omit the very top of the table and sort it out in the
77 our $break_top = 0xE0200;
79 my %cats = (); # known general categories
80 my %data = (); # mapping of codepoints to information
81 my $max = 0; # maximum codepoint
82 my $maxccc = 0; # maximum combining class
84 my $minud = 0; # max/min upper case offset
86 my $minld = 0; # max/min lower case offset
88 # Make sure we have our desired input files. We explicitly specify a
89 # Unicode standard version to make sure that a given version of DisOrder
90 # supports a given version of Unicode.
93 my $lpath = basename
($path);
95 system("wget http://www.unicode.org/Public/6.0.0/ucd/$path");
96 chmod(0444, $lpath) or die "$lpath: $!\n";
98 open(STDIN
, "<$lpath") or die "$lpath: $!\n";
99 print STDERR
"Reading $lpath...\n";
103 # Read the main data file
104 input
("UnicodeData.txt");
108 my $hangul_syllable_decomps = 0;
109 my $hangul_choseong_decomps = 0;
111 my @f = split(/;/, $_);
112 my $c = hex($f[0]); # codepoint
114 die "$f[0] $name is in the break\n"
115 if $c >= $break_start && $c < $break_end;
116 my $gc = $f[2]; # General_Category
117 # Variuos GCs we don't expect to see in UnicodeData.txt
118 $cats{$gc} = 1; # always record all GCs
119 if($name =~ /first>/i) {
122 } elsif($name =~ /last>/i) {
127 die "unexpected Cn" if $gc eq 'Cn';
128 my $ccc = $f[3]; # Canonical_Combining_Class
129 my $dm = $f[5]; # Decomposition_Type + Decomposition_Mapping
130 my $sum = hex($f[12]) || $c; # Simple_Uppercase_Mapping
131 my $slm = hex($f[13]) || $c; # Simple_Lowercase_Mapping
132 # recalculate the upper/lower case mappings as offsets
135 # update bounds on various values
136 $maxccc = $ccc if $ccc > $maxccc; # assumed never to be -ve
137 $minud = $ud if $ud < $minud;
138 $maxud = $ud if $ud > $maxud;
139 $minld = $ld if $ld < $minld;
140 $maxld = $ld if $ld > $maxld;
142 printf STDERR
"> range %04X-%04X is %s\n", $start, $end, $gc;
144 for($c = $start; $c <= $end; ++$c) {
154 # This is a compatibility decomposition
157 $maxref = \
$maxcompat;
159 $maxref = \
$maxcanon;
161 $d->{decomp
} = [map(hex($_), split(/\s+/, $dm))];
162 my $len = scalar @
{$d->{decomp
}};
163 $$maxref = $len if $len > $$maxref;
165 if(${$d->{decomp
}}[0] >= 0xAC00 && ${$d->{decomp
}}[0] <= 0xD7A3) {
166 ++$hangul_syllable_decomps;
168 if(${$d->{decomp
}}[0] >= 0x1100 && ${$d->{decomp
}}[0] <= 0x115F) {
169 ++$hangul_choseong_decomps;
176 $max = $end if $end > $max;
179 sub read_prop_with_ranges
{
187 my ($range, $propval) = split(/\s*;\s*/, $_);
188 if($range =~ /(.*)\.\.(.*)/) {
189 for my $c (hex($1) .. hex($2)) {
190 $data{$c}->{$propkey} = $propval;
194 $data{$c}->{$propkey} = $propval;
200 read_prop_with_ranges
("auxiliary/GraphemeBreakProperty.txt", "gbreak");
201 read_prop_with_ranges
("auxiliary/WordBreakProperty.txt", "wbreak");
202 read_prop_with_ranges
("auxiliary/SentenceBreakProperty.txt", "sbreak");
204 # Compute the full list and fill in the Extend category properly
208 for my $c (keys %data) {
209 if(!exists $data{$c}->{gbreak
}) {
210 $data{$c}->{gbreak
} = 'Other';
212 $gbreak{$data{$c}->{gbreak
}} = 1;
214 if(!exists $data{$c}->{wbreak
}) {
215 if($data{$c}->{gbreak
} eq 'Extend') {
216 $data{$c}->{wbreak
} = 'Extend';
218 $data{$c}->{wbreak
} = 'Other';
221 $wbreak{$data{$c}->{wbreak
}} = 1;
223 if(!exists $data{$c}->{sbreak
}) {
224 if($data{$c}->{gbreak
} eq 'Extend') {
225 $data{$c}->{sbreak
} = 'Extend';
227 $data{$c}->{sbreak
} = 'Other';
230 $sbreak{$data{$c}->{sbreak
}} = 1;
233 # Various derived properties
234 input
("DerivedNormalizationProps.txt");
239 my @f = split(/\s*;\s*/, $_);
243 my ($range, $propkey, $propval) = @f;
244 if($range =~ /(.*)\.\.(.*)/) {
245 for my $c (hex($1) .. hex($2)) {
246 $data{$c}->{$propkey} = $propval
250 $data{$c}->{$propkey} = $propval
254 # Round up the maximum value to a whole number of subtables
255 $max += ($modulus - 1) - ($max % $modulus);
257 # Private use characters
258 # We only fill in values below $max, utf32__unidata()
265 for(my $c = 0xE000; $c <= 0xF8FF && $c <= $max; ++$c) {
268 for(my $c = 0xF0000; $c <= 0xFFFFD && $c <= $max; ++$c) {
271 for(my $c = 0x100000; $c <= 0x10FFFD && $c <= $max; ++$c) {
275 # Anything left is not assigned
277 "gc" => "Cn", # not assigned
282 for(my $c = 0; $c <= $max; ++$c) {
283 if(!exists $data{$c}) {
286 if(!exists $data{$c}->{wbreak
}) {
287 $data{$c}->{wbreak
} = 'Other';
289 if(!exists $data{$c}->{gbreak
}) {
290 $data{$c}->{gbreak
} = 'Other';
292 if(!exists $data{$c}->{sbreak
}) {
293 $data{$c}->{sbreak
} = 'Other';
298 # Read the casefolding data too
299 input
("CaseFolding.txt");
302 next if /^\#/ or $_ eq '';
303 my @f = split(/\s*;\s*/, $_);
304 # Full case folding means use status C and F.
305 # We discard status T, Turkish users may wish to change this.
306 if($f[1] eq 'C' or $f[1] eq 'F') {
308 $data{$c}->{casefold
} = $f[2];
309 # We are particularly interest in combining characters that
310 # case-fold to non-combining characters, or characters that
311 # case-fold to sequences with combining characters in non-initial
312 # positions, as these required decomposiiton before case-folding
313 my @d = map(hex($_), split(/\s+/, $data{$c}->{casefold
}));
314 if($data{$c}->{ccc
} != 0) {
315 # This is a combining character
316 if($data{$d[0]}->{ccc
} == 0) {
317 # The first character of its case-folded form is NOT
318 # a combining character. The field name is the example
319 # explicitly mentioned in the spec.
320 $data{$c}->{ypogegrammeni
} = 1;
323 # This is a non-combining character; inspect the non-initial
324 # code points of the case-folded sequence
326 if(grep($data{$_}->{ccc
} != 0, @d)) {
327 # Some non-initial code point in the case-folded for is NOT a
328 # a combining character.
329 $data{$c}->{ypogegrammeni
} = 1;
335 # Generate the header file
336 print STDERR
"Generating unidata.h...\n";
337 open(STDOUT
, ">unidata.h") or die "unidata.h: $!\n";
339 out
("/** \@file lib/unidata.h\n",
340 " * \@brief Unicode tables\n",
342 " * Automatically generated file, see scripts/make-unidata\n",
346 "#ifndef UNIDATA_H\n",
347 "#define UNIDATA_H\n");
349 # TODO choose stable values for General_Category
350 out
("enum unicode_General_Category {\n",
352 map(" unicode_General_Category_$_", sort keys %cats)), "\n};\n");
354 out
("enum unicode_Grapheme_Break {\n",
356 map(" unicode_Grapheme_Break_$_", sort keys %gbreak)),
358 out
("extern const char *const unicode_Grapheme_Break_names[];\n");
360 out
("enum unicode_Word_Break {\n",
362 map(" unicode_Word_Break_$_", sort keys %wbreak)),
364 out
("extern const char *const unicode_Word_Break_names[];\n");
366 out
("enum unicode_Sentence_Break {\n",
368 map(" unicode_Sentence_Break_$_", sort keys %sbreak)),
370 out
("extern const char *const unicode_Sentence_Break_names[];\n");
372 out
("enum unicode_flags {\n",
373 " unicode_normalize_before_casefold = 1,\n",
374 " unicode_compatibility_decomposition = 2\n",
378 # Choose the narrowest type that will fit the required values
380 my ($min, $max) = @_;
382 return "char" if $max <= 127;
383 return "unsigned char" if $max <= 255;
384 return "int16_t" if $max < 32767;
385 return "uint16_t" if $max < 65535;
388 return "char" if $min >= -127 && $max <= 127;
389 return "int16_t" if $min >= -32767 && $max <= 32767;
394 out
("struct unidata {\n",
395 # decomposition (canonical or compatibility;
396 # unicode_compatibility_decomposition distinguishes) or NULL
397 " const uint32_t *decomp;\n",
399 # case-folded string or NULL
400 " const uint32_t *casefold;\n",
402 # composed characters that start with this code point. This only
403 # includes primary composites, i.e. the decomposition mapping is
404 # canonical and this code point is not in the exclusion table.
405 " const uint32_t *composed;\n",
407 # " ".choosetype($minud, $maxud)." upper_offset;\n",
408 # " ".choosetype($minld, $maxld)." lower_offset;\n",
410 # canonical combining class
411 " ".choosetype
(0, $maxccc)." ccc;\n",
412 " char general_category;\n",
414 # see unicode_flags enum
416 " char grapheme_break;\n",
417 " char word_break;\n",
418 " char sentence_break;\n",
420 # decomp and casefold do have have non-BMP characters, so we
421 # can't use a simple 16-bit table. We could use UTF-8 or UTF-16
422 # though, saving a bit of space (probably not that much...) at the
423 # cost of marginally reduced performance and additional complexity
425 out
("extern const struct unidata *const unidata[];\n");
427 out
("extern const struct unicode_utf8_row {\n",
429 " uint8_t min2, max2;\n",
430 "} unicode_utf8_valid[];\n");
432 out
("#define UNICODE_NCHARS ", ($max + 1), "\n");
433 out
("#define UNICODE_MODULUS $modulus\n");
434 out
("#define UNICODE_BREAK_START $break_start\n");
435 out
("#define UNICODE_BREAK_END $break_end\n");
436 out
("#define UNICODE_BREAK_TOP $break_top\n");
440 close STDOUT
or die "unidata.h: $!\n";
442 print STDERR
"Generating unidata.c...\n";
443 open(STDOUT
, ">unidata.c") or die "unidata.c: $!\n";
445 out
("/** \@file lib/unidata.c\n",
446 " * \@brief Unicode tables\n",
448 " * Automatically generated file, see scripts/make-unidata\n",
452 "#include \"common.h\"\n",
453 "#include \"unidata.h\"\n");
455 # Short aliases to keep .c file small
457 out
(map(sprintf("#define %s unicode_General_Category_%s\n", $_, $_),
459 out
(map(sprintf("#define GB%s unicode_Grapheme_Break_%s\n", $_, $_),
461 out
(map(sprintf("#define WB%s unicode_Word_Break_%s\n", $_, $_),
463 out
(map(sprintf("#define SB%s unicode_Sentence_Break_%s\n", $_, $_),
465 out
("#define NBC unicode_normalize_before_casefold\n");
466 out
("#define CD unicode_compatibility_decomposition\n");
468 # Names for *_Break properties
469 out
("const char *const unicode_Grapheme_Break_names[] = {\n",
471 map(" \"$_\"", sort keys %gbreak)),
473 out
("const char *const unicode_Word_Break_names[] = {\n",
475 map(" \"$_\"", sort keys %wbreak)),
477 out
("const char *const unicode_Sentence_Break_names[] = {\n",
479 map(" \"$_\"", sort keys %sbreak)),
486 out
("static const uint32_t ");
488 my $s = join(",", @_);
489 if(!exists $ddnums{$s}) {
495 out
("dd$ddnum\[]={$s}");
496 $ddnums{$s} = $ddnum++;
500 return "dd$ddnums{$s}";
503 # Generate the decomposition mapping tables.
504 print STDERR
"> decomposition mappings\n";
505 for(my $c = 0; $c <= $max; ++$c) {
506 if(exists $data{$c} && exists $data{$c}->{decomp
}) {
507 $data{$c}->{decompsym
} = dedupe
(@
{$data{$c}->{decomp
}}, 0);
511 print STDERR
"> composition mappings\n";
512 # First we must generate the mapping of each code point to possible
514 for(my $c = 0; $c <= $max; ++$c) {
516 && exists $data{$c}->{decomp
}
517 && !exists $data{$c}->{compat
}
518 && !$data{$c}->{Full_Composition_Exclusion
}) {
519 # $c has a non-excluded canonical decomposition, i.e. it is
520 # a primary composite. Find the first code point of the decomposition
521 my $first = ${$data{$c}->{decomp
}}[0];
522 if(!exists $data{$first}->{compose
}) {
523 $data{$first}->{compose
} = [$c];
525 push(@
{$data{$first}->{compose
}}, $c);
529 # Then we can generate the tables.
530 for(my $c = 0; $c <= $max; ++$c) {
531 if(exists $data{$c} && exists $data{$c}->{compose
}) {
532 $data{$c}->{compsym
} = dedupe
(@
{$data{$c}->{compose
}}, 0);
536 # The case folding table.
537 print STDERR
"> case-fold mappings\n";
538 for(my $c = 0; $c <= $max; ++$c) {
539 if(exists $data{$c} && exists $data{$c}->{casefold
}) {
540 $data{$c}->{cfsym
} = dedupe
(map(hex($_), split(/\s+/,
541 $data{$c}->{casefold
})),
546 # End of de-dupable arrays
549 # Visit all the $modulus-character blocks in turn and generate the
550 # required subtables. As above we spot duplicates to save space. In
551 # Unicode 5.0.0 with $modulus=128 and current table data this saves
552 # 1372 subtables or at least three and a half megabytes on 32-bit
554 print STDERR
"> subtables\n";
555 my %subtable = (); # base->subtable number
556 my %subtableno = (); # subtable number -> content
557 my $subtablecounter = 0; # counter for subtable numbers
558 my $subtablessaved = 0; # number of tables saved
559 for(my $base = 0; $base <= $max; $base += $modulus) {
560 next if $base >= $break_start && $base < $break_end;
561 next if $base >= $break_top;
563 for(my $c = $base; $c < $base + $modulus; ++$c) {
565 my $decompsym = ($d->{decompsym
} or "0");
566 my $cfsym = ($d->{cfsym
} or "0");
567 my $compsym = ($d->{compsym
} or "0");
568 my $ccc = ($d->{ccc
} or "0");
569 my $gc = ($d->{gc
} or "Cn");
571 if($data{$c}->{ypogegrammeni
}) {
574 if($data{$c}->{compat
}) {
577 my $flags = @flags ?
join("|", @flags) : 0;
593 my $t = join(",\n", @t);
594 if(!exists $subtable{$t}) {
595 out
(sprintf("/* %04X-%04X */\n", $base, $base + $modulus - 1));
596 out
("static const struct unidata st$subtablecounter\[] = {\n",
599 $subtable{$t} = $subtablecounter++;
603 $subtableno{$base} = $subtable{$t};
606 print STDERR
"> main table\n";
607 out
("const struct unidata *const unidata[]={\n");
608 for(my $base = 0; $base <= $max; $base += $modulus) {
609 next if $base >= $break_start && $base < $break_end;
610 next if $base >= $break_top;
611 #out("st$subtableno{$base} /* ".sprintf("%04x", $base)." */,\n");
612 out
("st$subtableno{$base},\n");
616 print STDERR
"> UTF-8 table\n";
617 out
("const struct unicode_utf8_row unicode_utf8_valid[] = {\n");
618 for(my $c = 0; $c <= 0x7F; ++$c) {
619 out
(" { 1, 0, 0 }, /* $c */\n");
621 for(my $c = 0x80; $c < 0xC2; ++$c) {
622 out
(" { 0, 0, 0 }, /* $c */\n");
624 for(my $c = 0xC2; $c <= 0xDF; ++$c) {
625 out
(" { 2, 0x80, 0xBF }, /* $c */\n");
627 for(my $c = 0xE0; $c <= 0xE0; ++$c) {
628 out
(" { 3, 0xA0, 0xBF }, /* $c */\n");
630 for(my $c = 0xE1; $c <= 0xEC; ++$c) {
631 out
(" { 3, 0x80, 0xBF }, /* $c */\n");
633 for(my $c = 0xED; $c <= 0xED; ++$c) {
634 out
(" { 3, 0x80, 0x9F }, /* $c */\n");
636 for(my $c = 0xEE; $c <= 0xEF; ++$c) {
637 out
(" { 3, 0x80, 0xBF }, /* $c */\n");
639 for(my $c = 0xF0; $c <= 0xF0; ++$c) {
640 out
(" { 4, 0x90, 0xBF }, /* $c */\n");
642 for(my $c = 0xF1; $c <= 0xF3; ++$c) {
643 out
(" { 4, 0x80, 0xBF }, /* $c */\n");
645 for(my $c = 0xF4; $c <= 0xF4; ++$c) {
646 out
(" { 4, 0x80, 0x8F }, /* $c */\n");
648 for(my $c = 0xF5; $c <= 0xFF; ++$c) {
649 out
(" { 0, 0, 0 }, /* $c */\n");
653 close STDOUT
or die "unidata.c: $!\n";
655 print STDERR
"Done.\n\n";
656 printf STDERR
"modulus=%d\n", $modulus;
657 printf STDERR
"max=%04X\n", $max;
658 print STDERR
"subtables=$subtablecounter, subtablessaved=$subtablessaved\n";
659 print STDERR
"ddsaved=$ddsaved\n";
660 print STDERR
"maxcompat=$maxcompat maxcanon=$maxcanon\n";
661 print STDERR
"$hangul_syllable_decomps canonical decompositions to Hangul syllables\n";
662 print STDERR
"$hangul_choseong_decomps canonical decompositions to Hangul Choseong\n";
664 die "We assumed that canonical decompositions were never more than 2 long!\n"
667 die "We assumed no canonical decompositions to Hangul syllables/Choseong!\n"
668 if $hangul_syllable_decomps || $hangul_choseong_decomps;