Add scripts/setup.anjou for local test installs
[disorder] / scripts / make-unidata
1 #! /usr/bin/perl -w
2 #
3 # This file is part of DisOrder.
4 # Copyright (C) 2007 Richard Kettlewell
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 # USA
20 #
21 #
22 # Generate Unicode support tables
23 #
24 # This script will download data from unicode.org if the required files
25 # aren't in the current directory.
26 #
27 # After modifying this script you should run:
28 # make -C lib rebuild-unicode check
29 #
30 # Things not supported yet:
31 # - SpecialCasing.txt data for case mapping
32 # - Title case offsets
33 # - Some kind of hinting for composition
34 # - ...
35 #
36 # NB the generated files DO NOT offer a stable ABI and so are not immediately
37 # suitable for use in a general-purpose library. Things that would need to
38 # be done:
39 # - Hide unidata.h from applications; it will never be ABI- or even API-stable.
40 # - Stablized General_Category values
41 # - Extend the unicode.h API to general utility rather than just what
42 # DisOrder needs.
43 # - ...
44 #
45 use strict;
46 use File::Basename;
47
48 sub out {
49 print @_ or die "$!\n";
50 }
51
52 sub key {
53 my $d = shift;
54 local $_;
55
56 return join("-", map($d->{$_}, sort keys %$d));
57 }
58
59 # Size of a subtable
60 #
61 # This can be varied to trade off the number of subtables against their size.
62 # 16 gave the smallest results last time I checked (on a Mac with a 32-bit
63 # build).
64 our $modulus = 16;
65
66 if(@ARGV) {
67 $modulus = shift;
68 }
69
70 # Where to break the table. There is a huge empty section of the Unicode
71 # code space and we deal with this by simply leaving it out of the table.
72 # This complicates the lookup function a little but should not affect
73 # performance in the cases we care about.
74 our $break_start = 0x30000;
75 our $break_end = 0xE0000;
76
77 # Similarly we simply omit the very top of the table and sort it out in the
78 # lookup function.
79 our $break_top = 0xE0200;
80
81 my %cats = (); # known general categories
82 my %data = (); # mapping of codepoints to information
83 my $max = 0; # maximum codepoint
84 my $maxccc = 0; # maximum combining class
85 my $maxud = 0;
86 my $minud = 0; # max/min upper case offset
87 my $maxld = 0;
88 my $minld = 0; # max/min lower case offset
89
90 # Make sure we have our desired input files. We explicitly specify a
91 # Unicode standard version to make sure that a given version of DisOrder
92 # supports a given version of Unicode.
93 sub input {
94 my $path = shift;
95 my $lpath = basename($path);
96 if(!-e $lpath) {
97 system("wget http://www.unicode.org/Public/5.0.0/ucd/$path");
98 chmod(0444, $lpath) or die "$lpath: $!\n";
99 }
100 open(STDIN, "<$lpath") or die "$lpath: $!\n";
101 print STDERR "Reading $lpath...\n";
102 }
103
104
105 # Read the main data file
106 input("UnicodeData.txt");
107 my ($start, $end);
108 my $maxcompat = 0;
109 my $maxcanon = 0;
110 my $hangul_syllable_decomps = 0;
111 my $hangul_choseong_decomps = 0;
112 while(<>) {
113 my @f = split(/;/, $_);
114 my $c = hex($f[0]); # codepoint
115 my $name = $f[1];
116 die "$f[0] $name is in the break\n"
117 if $c >= $break_start && $c < $break_end;
118 my $gc = $f[2]; # General_Category
119 # Variuos GCs we don't expect to see in UnicodeData.txt
120 $cats{$gc} = 1; # always record all GCs
121 if($name =~ /first>/i) {
122 $start = $c;
123 next;
124 } elsif($name =~ /last>/i) {
125 $end = $c;
126 } else {
127 $start = $end = $c;
128 }
129 die "unexpected Cn" if $gc eq 'Cn';
130 my $ccc = $f[3]; # Canonical_Combining_Class
131 my $dm = $f[5]; # Decomposition_Type + Decomposition_Mapping
132 my $sum = hex($f[12]) || $c; # Simple_Uppercase_Mapping
133 my $slm = hex($f[13]) || $c; # Simple_Lowercase_Mapping
134 # recalculate the upper/lower case mappings as offsets
135 my $ud = $sum - $c;
136 my $ld = $slm - $c;
137 # update bounds on various values
138 $maxccc = $ccc if $ccc > $maxccc; # assumed never to be -ve
139 $minud = $ud if $ud < $minud;
140 $maxud = $ud if $ud > $maxud;
141 $minld = $ld if $ld < $minld;
142 $maxld = $ld if $ld > $maxld;
143 if($start != $end) {
144 printf STDERR "> range %04X-%04X is %s\n", $start, $end, $gc;
145 }
146 for($c = $start; $c <= $end; ++$c) {
147 my $d = {
148 "gc" => $gc,
149 "ccc" => $ccc,
150 "ud" => $ud,
151 "ld" => $ld,
152 };
153 if($dm ne '') {
154 my $maxref;
155 if($dm =~ /</) {
156 # This is a compatibility decomposition
157 $dm =~ s/^<.*>\s*//;
158 $d->{compat} = 1;
159 $maxref = \$maxcompat;
160 } else {
161 $maxref = \$maxcanon;
162 }
163 $d->{decomp} = [map(hex($_), split(/\s+/, $dm))];
164 my $len = scalar @{$d->{decomp}};
165 $$maxref = $len if $len > $$maxref;
166 if(!$d->{compat}) {
167 if(${$d->{decomp}}[0] >= 0xAC00 && ${$d->{decomp}}[0] <= 0xD7A3) {
168 ++$hangul_syllable_decomps;
169 }
170 if(${$d->{decomp}}[0] >= 0x1100 && ${$d->{decomp}}[0] <= 0x115F) {
171 ++$hangul_choseong_decomps;
172 }
173 }
174 }
175 $data{$c} = $d;
176 }
177 $cats{$gc} = 1;
178 $max = $end if $end > $max;
179 }
180
181 sub read_prop_with_ranges {
182 my $path = shift;
183 my $propkey = shift;
184 input($path);
185 while(<>) {
186 chomp;
187 s/\s*\#.*//;
188 next if $_ eq '';
189 my ($range, $propval) = split(/\s*;\s*/, $_);
190 if($range =~ /(.*)\.\.(.*)/) {
191 for my $c (hex($1) .. hex($2)) {
192 $data{$c}->{$propkey} = $propval;
193 }
194 } else {
195 my $c = hex($range);
196 $data{$c}->{$propkey} = $propval;
197 }
198 }
199 }
200
201 # Grapheme_Break etc
202 read_prop_with_ranges("auxiliary/GraphemeBreakProperty.txt", "gbreak");
203 read_prop_with_ranges("auxiliary/WordBreakProperty.txt", "wbreak");
204 read_prop_with_ranges("auxiliary/SentenceBreakProperty.txt", "sbreak");
205
206 # Compute the full list and fill in the Extend category properly
207 my %gbreak = ();
208 my %wbreak = ();
209 my %sbreak = ();
210 for my $c (keys %data) {
211 if(!exists $data{$c}->{gbreak}) {
212 $data{$c}->{gbreak} = 'Other';
213 }
214 $gbreak{$data{$c}->{gbreak}} = 1;
215
216 if(!exists $data{$c}->{wbreak}) {
217 if($data{$c}->{gbreak} eq 'Extend') {
218 $data{$c}->{wbreak} = 'Extend';
219 } else {
220 $data{$c}->{wbreak} = 'Other';
221 }
222 }
223 $wbreak{$data{$c}->{wbreak}} = 1;
224
225 if(!exists $data{$c}->{sbreak}) {
226 if($data{$c}->{gbreak} eq 'Extend') {
227 $data{$c}->{sbreak} = 'Extend';
228 } else {
229 $data{$c}->{sbreak} = 'Other';
230 }
231 }
232 $sbreak{$data{$c}->{sbreak}} = 1;
233 }
234
235 # Various derived properties
236 input("DerivedNormalizationProps.txt");
237 while(<>) {
238 chomp;
239 s/\s*\#.*//;
240 next if $_ eq '';
241 my @f = split(/\s*;\s*/, $_);
242 if(@f == 2) {
243 push(@f, 1);
244 }
245 my ($range, $propkey, $propval) = @f;
246 if($range =~ /(.*)\.\.(.*)/) {
247 for my $c (hex($1) .. hex($2)) {
248 $data{$c}->{$propkey} = $propval
249 }
250 } else {
251 my $c = hex($range);
252 $data{$c}->{$propkey} = $propval
253 }
254 }
255
256 # Round up the maximum value to a whole number of subtables
257 $max += ($modulus - 1) - ($max % $modulus);
258
259 # Private use characters
260 # We only fill in values below $max, utf32__unidata()
261 my $Co = {
262 "gc" => "Co",
263 "ccc" => 0,
264 "ud" => 0,
265 "ld" => 0
266 };
267 for(my $c = 0xE000; $c <= 0xF8FF && $c <= $max; ++$c) {
268 $data{$c} = $Co;
269 }
270 for(my $c = 0xF0000; $c <= 0xFFFFD && $c <= $max; ++$c) {
271 $data{$c} = $Co;
272 }
273 for(my $c = 0x100000; $c <= 0x10FFFD && $c <= $max; ++$c) {
274 $data{$c} = $Co;
275 }
276
277 # Anything left is not assigned
278 my $Cn = {
279 "gc" => "Cn", # not assigned
280 "ccc" => 0,
281 "ud" => 0,
282 "ld" => 0
283 };
284 for(my $c = 0; $c <= $max; ++$c) {
285 if(!exists $data{$c}) {
286 $data{$c} = $Cn;
287 }
288 if(!exists $data{$c}->{wbreak}) {
289 $data{$c}->{wbreak} = 'Other';
290 }
291 if(!exists $data{$c}->{gbreak}) {
292 $data{$c}->{gbreak} = 'Other';
293 }
294 if(!exists $data{$c}->{sbreak}) {
295 $data{$c}->{sbreak} = 'Other';
296 }
297 }
298 $cats{'Cn'} = 1;
299
300 # Read the casefolding data too
301 input("CaseFolding.txt");
302 while(<>) {
303 chomp;
304 next if /^\#/ or $_ eq '';
305 my @f = split(/\s*;\s*/, $_);
306 # Full case folding means use status C and F.
307 # We discard status T, Turkish users may wish to change this.
308 if($f[1] eq 'C' or $f[1] eq 'F') {
309 my $c = hex($f[0]);
310 $data{$c}->{casefold} = $f[2];
311 # We are particularly interest in combining characters that
312 # case-fold to non-combining characters, or characters that
313 # case-fold to sequences with combining characters in non-initial
314 # positions, as these required decomposiiton before case-folding
315 my @d = map(hex($_), split(/\s+/, $data{$c}->{casefold}));
316 if($data{$c}->{ccc} != 0) {
317 # This is a combining character
318 if($data{$d[0]}->{ccc} == 0) {
319 # The first character of its case-folded form is NOT
320 # a combining character. The field name is the example
321 # explicitly mentioned in the spec.
322 $data{$c}->{ypogegrammeni} = 1;
323 }
324 } else {
325 # This is a non-combining character; inspect the non-initial
326 # code points of the case-folded sequence
327 shift(@d);
328 if(grep($data{$_}->{ccc} != 0, @d)) {
329 # Some non-initial code point in the case-folded for is NOT a
330 # a combining character.
331 $data{$c}->{ypogegrammeni} = 1;
332 }
333 }
334 }
335 }
336
337 # Generate the header file
338 print STDERR "Generating unidata.h...\n";
339 open(STDOUT, ">unidata.h") or die "unidata.h: $!\n";
340
341 out("/* Automatically generated file, see scripts/make-unidata */\n",
342 "#ifndef UNIDATA_H\n",
343 "#define UNIDATA_H\n");
344
345 # TODO choose stable values for General_Category
346 out("enum unicode_General_Category {\n",
347 join(",\n",
348 map(" unicode_General_Category_$_", sort keys %cats)), "\n};\n");
349
350 out("enum unicode_Grapheme_Break {\n",
351 join(",\n",
352 map(" unicode_Grapheme_Break_$_", sort keys %gbreak)),
353 "\n};\n");
354 out("extern const char *const unicode_Grapheme_Break_names[];\n");
355
356 out("enum unicode_Word_Break {\n",
357 join(",\n",
358 map(" unicode_Word_Break_$_", sort keys %wbreak)),
359 "\n};\n");
360 out("extern const char *const unicode_Word_Break_names[];\n");
361
362 out("enum unicode_Sentence_Break {\n",
363 join(",\n",
364 map(" unicode_Sentence_Break_$_", sort keys %sbreak)),
365 "\n};\n");
366 out("extern const char *const unicode_Sentence_Break_names[];\n");
367
368 out("enum unicode_flags {\n",
369 " unicode_normalize_before_casefold = 1,\n",
370 " unicode_compatibility_decomposition = 2\n",
371 "};\n",
372 "\n");
373
374 # Choose the narrowest type that will fit the required values
375 sub choosetype {
376 my ($min, $max) = @_;
377 if($min >= 0) {
378 return "char" if $max <= 127;
379 return "unsigned char" if $max <= 255;
380 return "int16_t" if $max < 32767;
381 return "uint16_t" if $max < 65535;
382 return "int32_t";
383 } else {
384 return "char" if $min >= -127 && $max <= 127;
385 return "int16_t" if $min >= -32767 && $max <= 32767;
386 return "int32_t";
387 }
388 }
389
390 out("struct unidata {\n",
391 # decomposition (canonical or compatibility;
392 # unicode_compatibility_decomposition distinguishes) or NULL
393 " const uint32_t *decomp;\n",
394
395 # case-folded string or NULL
396 " const uint32_t *casefold;\n",
397
398 # composed characters that start with this code point. This only
399 # includes primary composites, i.e. the decomposition mapping is
400 # canonical and this code point is not in the exclusion table.
401 " const uint32_t *composed;\n",
402
403 # " ".choosetype($minud, $maxud)." upper_offset;\n",
404 # " ".choosetype($minld, $maxld)." lower_offset;\n",
405
406 # canonical combining class
407 " ".choosetype(0, $maxccc)." ccc;\n",
408 " char general_category;\n",
409
410 # see unicode_flags enum
411 " uint8_t flags;\n",
412 " char grapheme_break;\n",
413 " char word_break;\n",
414 " char sentence_break;\n",
415 "};\n");
416 # decomp and casefold do have have non-BMP characters, so we
417 # can't use a simple 16-bit table. We could use UTF-8 or UTF-16
418 # though, saving a bit of space (probably not that much...) at the
419 # cost of marginally reduced performance and additional complexity
420
421 out("extern const struct unidata *const unidata[];\n");
422
423 out("extern const struct unicode_utf8_row {\n",
424 " uint8_t count;\n",
425 " uint8_t min2, max2;\n",
426 "} unicode_utf8_valid[];\n");
427
428 out("#define UNICODE_NCHARS ", ($max + 1), "\n");
429 out("#define UNICODE_MODULUS $modulus\n");
430 out("#define UNICODE_BREAK_START $break_start\n");
431 out("#define UNICODE_BREAK_END $break_end\n");
432 out("#define UNICODE_BREAK_TOP $break_top\n");
433
434 out("#endif\n");
435
436 close STDOUT or die "unidata.h: $!\n";
437
438 print STDERR "Generating unidata.c...\n";
439 open(STDOUT, ">unidata.c") or die "unidata.c: $!\n";
440
441 out("/* Automatically generated file, see scripts/make-unidata */\n",
442 "#include \"common.h\"\n",
443 "#include \"unidata.h\"\n");
444
445 # Short aliases to keep .c file small
446
447 out(map(sprintf("#define %s unicode_General_Category_%s\n", $_, $_),
448 sort keys %cats));
449 out(map(sprintf("#define GB%s unicode_Grapheme_Break_%s\n", $_, $_),
450 sort keys %gbreak));
451 out(map(sprintf("#define WB%s unicode_Word_Break_%s\n", $_, $_),
452 sort keys %wbreak));
453 out(map(sprintf("#define SB%s unicode_Sentence_Break_%s\n", $_, $_),
454 sort keys %sbreak));
455 out("#define NBC unicode_normalize_before_casefold\n");
456 out("#define CD unicode_compatibility_decomposition\n");
457
458 # Names for *_Break properties
459 out("const char *const unicode_Grapheme_Break_names[] = {\n",
460 join(",\n",
461 map(" \"$_\"", sort keys %gbreak)),
462 "\n};\n");
463 out("const char *const unicode_Word_Break_names[] = {\n",
464 join(",\n",
465 map(" \"$_\"", sort keys %wbreak)),
466 "\n};\n");
467 out("const char *const unicode_Sentence_Break_names[] = {\n",
468 join(",\n",
469 map(" \"$_\"", sort keys %sbreak)),
470 "\n};\n");
471
472 our $ddnum = 0;
473 our $ddsaved = 0;
474 our %ddnums = ();
475 my $ddfirst = 1;
476 out("static const uint32_t ");
477 sub dedupe {
478 my $s = join(",", @_);
479 if(!exists $ddnums{$s}) {
480 if($ddfirst) {
481 $ddfirst = 0;
482 } else {
483 out(",\n");
484 }
485 out("dd$ddnum\[]={$s}");
486 $ddnums{$s} = $ddnum++;
487 } else {
488 ++$ddsaved;
489 }
490 return "dd$ddnums{$s}";
491 }
492
493 # Generate the decomposition mapping tables.
494 print STDERR "> decomposition mappings\n";
495 for(my $c = 0; $c <= $max; ++$c) {
496 if(exists $data{$c} && exists $data{$c}->{decomp}) {
497 $data{$c}->{decompsym} = dedupe(@{$data{$c}->{decomp}}, 0);
498 }
499 }
500
501 print STDERR "> composition mappings\n";
502 # First we must generate the mapping of each code point to possible
503 # compositions.
504 for(my $c = 0; $c <= $max; ++$c) {
505 if(exists $data{$c}
506 && exists $data{$c}->{decomp}
507 && !exists $data{$c}->{compat}
508 && !$data{$c}->{Full_Composition_Exclusion}) {
509 # $c has a non-excluded canonical decomposition, i.e. it is
510 # a primary composite. Find the first code point of the decomposition
511 my $first = ${$data{$c}->{decomp}}[0];
512 if(!exists $data{$first}->{compose}) {
513 $data{$first}->{compose} = [$c];
514 } else {
515 push(@{$data{$first}->{compose}}, $c);
516 }
517 }
518 }
519 # Then we can generate the tables.
520 for(my $c = 0; $c <= $max; ++$c) {
521 if(exists $data{$c} && exists $data{$c}->{compose}) {
522 $data{$c}->{compsym} = dedupe(@{$data{$c}->{compose}}, 0);
523 }
524 }
525
526 # The case folding table.
527 print STDERR "> case-fold mappings\n";
528 for(my $c = 0; $c <= $max; ++$c) {
529 if(exists $data{$c} && exists $data{$c}->{casefold}) {
530 $data{$c}->{cfsym} = dedupe(map(hex($_), split(/\s+/,
531 $data{$c}->{casefold})),
532 0);
533 }
534 }
535
536 # End of de-dupable arrays
537 out(";\n");
538
539 # Visit all the $modulus-character blocks in turn and generate the
540 # required subtables. As above we spot duplicates to save space. In
541 # Unicode 5.0.0 with $modulus=128 and current table data this saves
542 # 1372 subtables or at least three and a half megabytes on 32-bit
543 # platforms.
544 print STDERR "> subtables\n";
545 my %subtable = (); # base->subtable number
546 my %subtableno = (); # subtable number -> content
547 my $subtablecounter = 0; # counter for subtable numbers
548 my $subtablessaved = 0; # number of tables saved
549 for(my $base = 0; $base <= $max; $base += $modulus) {
550 next if $base >= $break_start && $base < $break_end;
551 next if $base >= $break_top;
552 my @t;
553 for(my $c = $base; $c < $base + $modulus; ++$c) {
554 my $d = $data{$c};
555 my $decompsym = ($data{$c}->{decompsym} or "0");
556 my $cfsym = ($data{$c}->{cfsym} or "0");
557 my $compsym = ($data{$c}->{compsym} or "0");
558 my @flags = ();
559 if($data{$c}->{ypogegrammeni}) {
560 push(@flags, "NBC");
561 }
562 if($data{$c}->{compat}) {
563 push(@flags, "CD");
564 }
565 my $flags = @flags ? join("|", @flags) : 0;
566 push(@t, "{".
567 join(",",
568 $decompsym,
569 $cfsym,
570 $compsym,
571 # $d->{ud},
572 # $d->{ld},
573 $d->{ccc},
574 $d->{gc},
575 $flags,
576 "GB$d->{gbreak}",
577 "WB$d->{wbreak}",
578 "SB$d->{sbreak}",
579 )."}");
580 }
581 my $t = join(",\n", @t);
582 if(!exists $subtable{$t}) {
583 out(sprintf("/* %04X-%04X */\n", $base, $base + $modulus - 1));
584 out("static const struct unidata st$subtablecounter\[] = {\n",
585 "$t\n",
586 "};\n");
587 $subtable{$t} = $subtablecounter++;
588 } else {
589 ++$subtablessaved;
590 }
591 $subtableno{$base} = $subtable{$t};
592 }
593
594 print STDERR "> main table\n";
595 out("const struct unidata *const unidata[]={\n");
596 for(my $base = 0; $base <= $max; $base += $modulus) {
597 next if $base >= $break_start && $base < $break_end;
598 next if $base >= $break_top;
599 #out("st$subtableno{$base} /* ".sprintf("%04x", $base)." */,\n");
600 out("st$subtableno{$base},\n");
601 }
602 out("};\n");
603
604 print STDERR "> UTF-8 table\n";
605 out("const struct unicode_utf8_row unicode_utf8_valid[] = {\n");
606 for(my $c = 0; $c <= 0x7F; ++$c) {
607 out(" { 1, 0, 0 }, /* $c */\n");
608 }
609 for(my $c = 0x80; $c < 0xC2; ++$c) {
610 out(" { 0, 0, 0 }, /* $c */\n");
611 }
612 for(my $c = 0xC2; $c <= 0xDF; ++$c) {
613 out(" { 2, 0x80, 0xBF }, /* $c */\n");
614 }
615 for(my $c = 0xE0; $c <= 0xE0; ++$c) {
616 out(" { 3, 0xA0, 0xBF }, /* $c */\n");
617 }
618 for(my $c = 0xE1; $c <= 0xEC; ++$c) {
619 out(" { 3, 0x80, 0xBF }, /* $c */\n");
620 }
621 for(my $c = 0xED; $c <= 0xED; ++$c) {
622 out(" { 3, 0x80, 0x9F }, /* $c */\n");
623 }
624 for(my $c = 0xEE; $c <= 0xEF; ++$c) {
625 out(" { 3, 0x80, 0xBF }, /* $c */\n");
626 }
627 for(my $c = 0xF0; $c <= 0xF0; ++$c) {
628 out(" { 4, 0x90, 0xBF }, /* $c */\n");
629 }
630 for(my $c = 0xF1; $c <= 0xF3; ++$c) {
631 out(" { 4, 0x80, 0xBF }, /* $c */\n");
632 }
633 for(my $c = 0xF4; $c <= 0xF4; ++$c) {
634 out(" { 4, 0x80, 0x8F }, /* $c */\n");
635 }
636 for(my $c = 0xF5; $c <= 0xFF; ++$c) {
637 out(" { 0, 0, 0 }, /* $c */\n");
638 }
639 out("};\n");
640
641 close STDOUT or die "unidata.c: $!\n";
642
643 print STDERR "Done.\n\n";
644 printf STDERR "modulus=%d\n", $modulus;
645 printf STDERR "max=%04X\n", $max;
646 print STDERR "subtables=$subtablecounter, subtablessaved=$subtablessaved\n";
647 print STDERR "ddsaved=$ddsaved\n";
648 print STDERR "maxcompat=$maxcompat maxcanon=$maxcanon\n";
649 print STDERR "$hangul_syllable_decomps canonical decompositions to Hangul syllables\n";
650 print STDERR "$hangul_choseong_decomps canonical decompositions to Hangul Choseong\n";
651
652 die "We assumed that canonical decompositions were never more than 2 long!\n"
653 if $maxcanon > 2;
654
655 die "We assumed no canonical decompositions to Hangul syllables/Choseong!\n"
656 if $hangul_syllable_decomps || $hangul_choseong_decomps;