PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
generate_unaccent_rules.py
Go to the documentation of this file.
1#!/usr/bin/python
2# -*- coding: utf-8 -*-
3#
4# This script builds unaccent.rules on standard output when given the
5# contents of UnicodeData.txt [1] and Latin-ASCII.xml [2] given as
6# arguments. Optionally includes ligature expansion and Unicode CLDR
7# Latin-ASCII transliterator, enabled by default, this can be disabled
8# with "--no-ligatures-expansion" command line option.
9#
10# The approach is to use the Unicode decomposition data to identify
11# precomposed codepoints that are equivalent to a ligature of several
12# letters, or a base letter with any number of diacritical marks.
13#
14# This approach handles most letters with diacritical marks and some
15# ligatures. However, several characters (notably a majority of
16# ligatures) don't have decomposition. To handle all these cases, one can
17# use a standard Unicode transliterator available in Common Locale Data
18# Repository (CLDR): Latin-ASCII. This transliterator associates Unicode
19# characters to ASCII-range equivalent. Unless "--no-ligatures-expansion"
20# option is enabled, the XML file of this transliterator [2] -- given as a
21# command line argument -- will be parsed and used.
22#
23# Ideally you should use the latest release for each data set. This
24# script is compatible with at least CLDR release 29.
25#
26# [1] https://www.unicode.org/Public/${UNICODE_VERSION}/ucd/UnicodeData.txt
27# [2] https://raw.githubusercontent.com/unicode-org/cldr/${TAG}/common/transforms/Latin-ASCII.xml
28
29import argparse
30import codecs
31import re
32import sys
33import xml.etree.ElementTree as ET
34
35sys.stdout = codecs.getwriter('utf8')(sys.stdout.buffer)
36
37# The ranges of Unicode characters that we consider to be "plain letters".
38# For now we are being conservative by including only Latin and Greek. This
39# could be extended in future based on feedback from people with relevant
40# language knowledge.
41PLAIN_LETTER_RANGES = ((ord('a'), ord('z')), # Latin lower case
42 (ord('A'), ord('Z')), # Latin upper case
43 (0x03b1, 0x03c9), # GREEK SMALL LETTER ALPHA, GREEK SMALL LETTER OMEGA
44 (0x0391, 0x03a9)) # GREEK CAPITAL LETTER ALPHA, GREEK CAPITAL LETTER OMEGA
45
46# Combining marks follow a "base" character, and result in a composite
47# character. Example: "U&'A\0300'"produces "À".There are three types of
48# combining marks: enclosing (Me), non-spacing combining (Mn), spacing
49# combining (Mc). We identify the ranges of marks we feel safe removing.
50# References:
51# https://en.wikipedia.org/wiki/Combining_character
52# https://www.unicode.org/charts/PDF/U0300.pdf
53# https://www.unicode.org/charts/PDF/U20D0.pdf
54COMBINING_MARK_RANGES = ((0x0300, 0x0362), # Mn: Accents, IPA
55 (0x20dd, 0x20E0), # Me: Symbols
56 (0x20e2, 0x20e4),) # Me: Screen, keycap, triangle
57
58
59def print_record(codepoint, letter):
60 if letter:
61 # If the letter has whitespace or double quotes, escape double
62 # quotes and apply more quotes around it.
63 if (' ' in letter) or ('"' in letter):
64 letter = '"' + letter.replace('"', '""') + '"'
65 output = chr(codepoint) + "\t" + letter
66 else:
67 output = chr(codepoint)
68
69 print(output)
70
71
73 def __init__(self, id, general_category, combining_ids):
74 self.id = id
75 self.general_category = general_category
76 self.combining_ids = combining_ids
77
78
79def is_mark_to_remove(codepoint):
80 """Return true if this is a combining mark to remove."""
81 if not is_mark(codepoint):
82 return False
83
84 for begin, end in COMBINING_MARK_RANGES:
85 if codepoint.id >= begin and codepoint.id <= end:
86 return True
87 return False
88
89
90def is_plain_letter(codepoint):
91 """Return true if codepoint represents a "plain letter"."""
92 for begin, end in PLAIN_LETTER_RANGES:
93 if codepoint.id >= begin and codepoint.id <= end:
94 return True
95 return False
96
97
98def is_mark(codepoint):
99 """Returns true for diacritical marks (combining codepoints)."""
100 return codepoint.general_category in ("Mn", "Me", "Mc")
101
102
103def is_letter_with_marks(codepoint, table):
104 """Returns true for letters combined with one or more marks."""
105 # See https://www.unicode.org/reports/tr44/tr44-14.html#General_Category_Values
106
107 # Some codepoints redirect directly to another, instead of doing any
108 # "combining"... but sometimes they redirect to a codepoint that doesn't
109 # exist, so ignore those.
110 if len(codepoint.combining_ids) == 1 and codepoint.combining_ids[0] in table:
111 return is_letter_with_marks(table[codepoint.combining_ids[0]], table)
112
113 # A letter without diacritical marks has none of them.
114 if any(is_mark(table[i]) for i in codepoint.combining_ids[1:]) is False:
115 return False
116
117 # Check if the base letter of this letter has marks.
118 codepoint_base = codepoint.combining_ids[0]
119 if is_plain_letter(table[codepoint_base]) is False and \
120 is_letter_with_marks(table[codepoint_base], table) is False:
121 return False
122
123 return True
124
125
126def is_letter(codepoint, table):
127 """Return true for letter with or without diacritical marks."""
128 return is_plain_letter(codepoint) or is_letter_with_marks(codepoint, table)
129
130
131def get_plain_letter(codepoint, table):
132 """Return the base codepoint without marks. If this codepoint has more
133 than one combining character, do a recursive lookup on the table to
134 find out its plain base letter."""
135 if is_letter_with_marks(codepoint, table):
136 if len(table[codepoint.combining_ids[0]].combining_ids) > 1:
137 return get_plain_letter(table[codepoint.combining_ids[0]], table)
138 elif is_plain_letter(table[codepoint.combining_ids[0]]):
139 return table[codepoint.combining_ids[0]]
140
141 # Should not come here
142 assert False, 'Codepoint U+%0.2X' % codepoint.id
143 elif is_plain_letter(codepoint):
144 return codepoint
145
146 # Should not come here
147 assert False, 'Codepoint U+%0.2X' % codepoint.id
148
149
150def is_ligature(codepoint, table):
151 """Return true for letters combined with letters."""
152 return all(i in table and is_letter(table[i], table) for i in codepoint.combining_ids)
153
154def get_plain_letters(codepoint, table):
155 """Return a list of plain letters from a ligature."""
156 assert(is_ligature(codepoint, table))
157 return [get_plain_letter(table[id], table) for id in codepoint.combining_ids]
158
159
161 """Parse the XML file and return a set of tuples (src, trg), where "src"
162 is the original character and "trg" the substitute."""
163 charactersSet = set()
164
165 # RegEx to parse rules
166 rulePattern = re.compile(r'^(?:(.)|(\\u[0-9a-fA-F]{4})) \u2192 (?:\'(.+)\'|(.+)) ;')
167
168 # construct tree from XML
169 transliterationTree = ET.parse(latinAsciiFilePath)
170 transliterationTreeRoot = transliterationTree.getroot()
171
172 # Fetch all the transliteration rules. Since release 29 of Latin-ASCII.xml
173 # all the transliteration rules are located in a single tRule block with
174 # all rules separated into separate lines.
175 blockRules = transliterationTreeRoot.findall("./transforms/transform/tRule")
176 assert(len(blockRules) == 1)
177
178 # Split the block of rules into one element per line.
179 rules = blockRules[0].text.splitlines()
180
181 # And finish the processing of each individual rule.
182 for rule in rules:
183 matches = rulePattern.search(rule)
184
185 # The regular expression capture four groups corresponding
186 # to the characters.
187 #
188 # Group 1: plain "src" char. Empty if group 2 is not.
189 # Group 2: unicode-escaped "src" char (e.g. "\u0110"). Empty if group 1 is not.
190 #
191 # Group 3: plain "trg" char. Empty if group 4 is not.
192 # Group 4: plain "trg" char between quotes. Empty if group 3 is not.
193 if matches is not None:
194 src = matches.group(1) if matches.group(1) is not None else bytes(matches.group(2), 'UTF-8').decode('unicode-escape')
195 trg = matches.group(3) if matches.group(3) is not None else matches.group(4)
196
197 # "'" and """ are escaped
198 trg = trg.replace("\\'", "'").replace('\\"', '"')
199
200 # the parser of unaccent only accepts non-whitespace characters
201 # for "src" and "trg" (see unaccent.c)
202 if not src.isspace() and not trg.isspace():
203 if src == "\u210c":
204 # This mapping seems to be in error, and causes a collision
205 # by disagreeing with the main Unicode database file:
206 # https://unicode-org.atlassian.net/browse/CLDR-17656
207 continue
208 charactersSet.add((ord(src), trg))
209
210 return charactersSet
211
212
214 """Returns the special cases which are not handled by other methods"""
215 charactersSet = set()
216
217 # Cyrillic
218 charactersSet.add((0x0401, "\u0415")) # CYRILLIC CAPITAL LETTER IO
219 charactersSet.add((0x0451, "\u0435")) # CYRILLIC SMALL LETTER IO
220
221 # Symbols of "Letterlike Symbols" Unicode Block (U+2100 to U+214F)
222 charactersSet.add((0x2103, "\xb0C")) # DEGREE CELSIUS
223 charactersSet.add((0x2109, "\xb0F")) # DEGREE FAHRENHEIT
224
225 return charactersSet
226
227
228def main(args):
229 # https://www.unicode.org/reports/tr44/tr44-14.html#Character_Decomposition_Mappings
230 decomposition_type_pattern = re.compile(" *<[^>]*> *")
231
232 table = {}
233 all = []
234
235 # unordered set for ensure uniqueness
236 charactersSet = set()
237
238 # read file UnicodeData.txt
239 with codecs.open(
240 args.unicodeDataFilePath, mode='r', encoding='UTF-8',
241 ) as unicodeDataFile:
242 # read everything we need into memory
243 for line in unicodeDataFile:
244 fields = line.split(";")
245 if len(fields) > 5:
246 # https://www.unicode.org/reports/tr44/tr44-14.html#UnicodeData.txt
247 general_category = fields[2]
248 decomposition = fields[5]
249 decomposition = re.sub(decomposition_type_pattern, ' ', decomposition)
250 id = int(fields[0], 16)
251 combining_ids = [int(s, 16) for s in decomposition.split(" ") if s != ""]
252 codepoint = Codepoint(id, general_category, combining_ids)
253 table[id] = codepoint
254 all.append(codepoint)
255
256 # walk through all the codepoints looking for interesting mappings
257 for codepoint in all:
258 if codepoint.general_category.startswith('L') and \
259 len(codepoint.combining_ids) > 0:
260 if is_letter_with_marks(codepoint, table):
261 charactersSet.add((codepoint.id,
262 chr(get_plain_letter(codepoint, table).id)))
263 elif args.noLigaturesExpansion is False and is_ligature(codepoint, table):
264 charactersSet.add((codepoint.id,
265 "".join(chr(combining_codepoint.id)
266 for combining_codepoint
267 in get_plain_letters(codepoint, table))))
268 elif is_mark_to_remove(codepoint):
269 charactersSet.add((codepoint.id, None))
270
271 # add CLDR Latin-ASCII characters
272 if not args.noLigaturesExpansion:
273 charactersSet |= parse_cldr_latin_ascii_transliterator(args.latinAsciiFilePath)
274 charactersSet |= special_cases()
275
276 # sort for more convenient display
277 charactersList = sorted(charactersSet, key=lambda characterPair: characterPair[0])
278
279 for characterPair in charactersList:
280 print_record(characterPair[0], characterPair[1])
281
282
283if __name__ == "__main__":
284 parser = argparse.ArgumentParser(description='This script builds unaccent.rules on standard output when given the contents of UnicodeData.txt and Latin-ASCII.xml given as arguments.')
285 parser.add_argument("--unicode-data-file", help="Path to formatted text file corresponding to UnicodeData.txt.", type=str, required=True, dest='unicodeDataFilePath')
286 parser.add_argument("--latin-ascii-file", help="Path to XML file from Unicode Common Locale Data Repository (CLDR) corresponding to Latin-ASCII transliterator (Latin-ASCII.xml).", type=str, dest='latinAsciiFilePath')
287 parser.add_argument("--no-ligatures-expansion", help="Do not expand ligatures and do not use Unicode CLDR Latin-ASCII transliterator. By default, this option is not enabled and \"--latin-ascii-file\" argument is required. If this option is enabled, \"--latin-ascii-file\" argument is optional and ignored.", action="store_true", dest='noLigaturesExpansion')
288 args = parser.parse_args()
289
290 if args.noLigaturesExpansion is False and args.latinAsciiFilePath is None:
291 sys.stderr.write('You must specify the path to Latin-ASCII transliterator file with \"--latin-ascii-file\" option or use \"--no-ligatures-expansion\" option. Use \"-h\" option for help.')
292 sys.exit(1)
293
294 main(args)
void print(const void *obj)
Definition: print.c:36
def __init__(self, id, general_category, combining_ids)
def get_plain_letters(codepoint, table)
def print_record(codepoint, letter)
def get_plain_letter(codepoint, table)
def is_letter_with_marks(codepoint, table)
def is_ligature(codepoint, table)
def is_letter(codepoint, table)
def parse_cldr_latin_ascii_transliterator(latinAsciiFilePath)
const void size_t len
#define assert(x)
Definition: regcustom.h:56