PostgreSQL Source Code  git master
generate_unaccent_rules.py
Go to the documentation of this file.
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 #
4 # This script builds unaccent.rules on standard output when given the
5 # contents of UnicodeData.txt [1] and Latin-ASCII.xml [2] given as
6 # arguments. Optionally includes ligature expansion and Unicode CLDR
7 # Latin-ASCII transliterator, enabled by default, this can be disabled
8 # with "--no-ligatures-expansion" command line option.
9 #
10 # The approach is to use the Unicode decomposition data to identify
11 # precomposed codepoints that are equivalent to a ligature of several
12 # letters, or a base letter with any number of diacritical marks.
13 #
14 # This approach handles most letters with diacritical marks and some
15 # ligatures. However, several characters (notably a majority of
16 # ligatures) don't have decomposition. To handle all these cases, one can
17 # use a standard Unicode transliterator available in Common Locale Data
18 # Repository (CLDR): Latin-ASCII. This transliterator associates Unicode
19 # characters to ASCII-range equivalent. Unless "--no-ligatures-expansion"
20 # option is enabled, the XML file of this transliterator [2] -- given as a
21 # command line argument -- will be parsed and used.
22 #
23 # Ideally you should use the latest release for each data set. For
24 # Latin-ASCII.xml, the latest data sets released can be browsed directly
25 # via [3]. Note that this script is compatible with at least release 29.
26 #
27 # [1] https://www.unicode.org/Public/8.0.0/ucd/UnicodeData.txt
28 # [2] https://raw.githubusercontent.com/unicode-org/cldr/release-34/common/transforms/Latin-ASCII.xml
29 # [3] https://github.com/unicode-org/cldr/tags
30 
31 # BEGIN: Python 2/3 compatibility - remove when Python 2 compatibility dropped
32 # The approach is to be Python3 compatible with Python2 "backports".
33 from __future__ import print_function
34 from __future__ import unicode_literals
35 # END: Python 2/3 compatibility - remove when Python 2 compatibility dropped
36 
37 import argparse
38 import codecs
39 import re
40 import sys
41 import xml.etree.ElementTree as ET
42 
43 # BEGIN: Python 2/3 compatibility - remove when Python 2 compatibility dropped
44 if sys.version_info[0] <= 2:
45  # Encode stdout as UTF-8, so we can just print to it
46  sys.stdout = codecs.getwriter('utf8')(sys.stdout)
47 
48  # Map Python 2's chr to unichr
49  chr = unichr
50 
51  # Python 2 and 3 compatible bytes call
52  def bytes(source, encoding='ascii', errors='strict'):
53  return source.encode(encoding=encoding, errors=errors)
54 else:
55 # END: Python 2/3 compatibility - remove when Python 2 compatibility dropped
56  sys.stdout = codecs.getwriter('utf8')(sys.stdout.buffer)
57 
58 # The ranges of Unicode characters that we consider to be "plain letters".
59 # For now we are being conservative by including only Latin and Greek. This
60 # could be extended in future based on feedback from people with relevant
61 # language knowledge.
62 PLAIN_LETTER_RANGES = ((ord('a'), ord('z')), # Latin lower case
63  (ord('A'), ord('Z')), # Latin upper case
64  (0x03b1, 0x03c9), # GREEK SMALL LETTER ALPHA, GREEK SMALL LETTER OMEGA
65  (0x0391, 0x03a9)) # GREEK CAPITAL LETTER ALPHA, GREEK CAPITAL LETTER OMEGA
66 
67 # Combining marks follow a "base" character, and result in a composite
68 # character. Example: "U&'A\0300'"produces "À".There are three types of
69 # combining marks: enclosing (Me), non-spacing combining (Mn), spacing
70 # combining (Mc). We identify the ranges of marks we feel safe removing.
71 # References:
72 # https://en.wikipedia.org/wiki/Combining_character
73 # https://www.unicode.org/charts/PDF/U0300.pdf
74 # https://www.unicode.org/charts/PDF/U20D0.pdf
75 COMBINING_MARK_RANGES = ((0x0300, 0x0362), # Mn: Accents, IPA
76  (0x20dd, 0x20E0), # Me: Symbols
77  (0x20e2, 0x20e4),) # Me: Screen, keycap, triangle
78 
79 def print_record(codepoint, letter):
80  if letter:
81  output = chr(codepoint) + "\t" + letter
82  else:
83  output = chr(codepoint)
84 
85  print(output)
86 
87 class Codepoint:
88  def __init__(self, id, general_category, combining_ids):
89  self.id = id
90  self.general_category = general_category
91  self.combining_ids = combining_ids
92 
93 def is_mark_to_remove(codepoint):
94  """Return true if this is a combining mark to remove."""
95  if not is_mark(codepoint):
96  return False
97 
98  for begin, end in COMBINING_MARK_RANGES:
99  if codepoint.id >= begin and codepoint.id <= end:
100  return True
101  return False
102 
103 def is_plain_letter(codepoint):
104  """Return true if codepoint represents a "plain letter"."""
105  for begin, end in PLAIN_LETTER_RANGES:
106  if codepoint.id >= begin and codepoint.id <= end:
107  return True
108  return False
109 
110 def is_mark(codepoint):
111  """Returns true for diacritical marks (combining codepoints)."""
112  return codepoint.general_category in ("Mn", "Me", "Mc")
113 
114 def is_letter_with_marks(codepoint, table):
115  """Returns true for letters combined with one or more marks."""
116  # See https://www.unicode.org/reports/tr44/tr44-14.html#General_Category_Values
117 
118  # Letter may have no combining characters, in which case it has
119  # no marks.
120  if len(codepoint.combining_ids) == 1:
121  return False
122 
123  # A letter without diacritical marks has none of them.
124  if any(is_mark(table[i]) for i in codepoint.combining_ids[1:]) is False:
125  return False
126 
127  # Check if the base letter of this letter has marks.
128  codepoint_base = codepoint.combining_ids[0]
129  if (is_plain_letter(table[codepoint_base]) is False and \
130  is_letter_with_marks(table[codepoint_base], table) is False):
131  return False
132 
133  return True
134 
135 def is_letter(codepoint, table):
136  """Return true for letter with or without diacritical marks."""
137  return is_plain_letter(codepoint) or is_letter_with_marks(codepoint, table)
138 
139 def get_plain_letter(codepoint, table):
140  """Return the base codepoint without marks. If this codepoint has more
141  than one combining character, do a recursive lookup on the table to
142  find out its plain base letter."""
143  if is_letter_with_marks(codepoint, table):
144  if len(table[codepoint.combining_ids[0]].combining_ids) > 1:
145  return get_plain_letter(table[codepoint.combining_ids[0]], table)
146  elif is_plain_letter(table[codepoint.combining_ids[0]]):
147  return table[codepoint.combining_ids[0]]
148 
149  # Should not come here
150  assert(False)
151  elif is_plain_letter(codepoint):
152  return codepoint
153 
154  # Should not come here
155  assert(False)
156 
157 def is_ligature(codepoint, table):
158  """Return true for letters combined with letters."""
159  return all(is_letter(table[i], table) for i in codepoint.combining_ids)
160 
161 def get_plain_letters(codepoint, table):
162  """Return a list of plain letters from a ligature."""
163  assert(is_ligature(codepoint, table))
164  return [get_plain_letter(table[id], table) for id in codepoint.combining_ids]
165 
167  """Parse the XML file and return a set of tuples (src, trg), where "src"
168  is the original character and "trg" the substitute."""
169  charactersSet = set()
170 
171  # RegEx to parse rules
172  rulePattern = re.compile(r'^(?:(.)|(\\u[0-9a-fA-F]{4})) \u2192 (?:\'(.+)\'|(.+)) ;')
173 
174  # construct tree from XML
175  transliterationTree = ET.parse(latinAsciiFilePath)
176  transliterationTreeRoot = transliterationTree.getroot()
177 
178  # Fetch all the transliteration rules. Since release 29 of Latin-ASCII.xml
179  # all the transliteration rules are located in a single tRule block with
180  # all rules separated into separate lines.
181  blockRules = transliterationTreeRoot.findall("./transforms/transform/tRule")
182  assert(len(blockRules) == 1)
183 
184  # Split the block of rules into one element per line.
185  rules = blockRules[0].text.splitlines()
186 
187  # And finish the processing of each individual rule.
188  for rule in rules:
189  matches = rulePattern.search(rule)
190 
191  # The regular expression capture four groups corresponding
192  # to the characters.
193  #
194  # Group 1: plain "src" char. Empty if group 2 is not.
195  # Group 2: unicode-escaped "src" char (e.g. "\u0110"). Empty if group 1 is not.
196  #
197  # Group 3: plain "trg" char. Empty if group 4 is not.
198  # Group 4: plain "trg" char between quotes. Empty if group 3 is not.
199  if matches is not None:
200  src = matches.group(1) if matches.group(1) is not None else bytes(matches.group(2), 'UTF-8').decode('unicode-escape')
201  trg = matches.group(3) if matches.group(3) is not None else matches.group(4)
202 
203  # "'" and """ are escaped
204  trg = trg.replace("\\'", "'").replace('\\"', '"')
205 
206  # the parser of unaccent only accepts non-whitespace characters
207  # for "src" and "trg" (see unaccent.c)
208  if not src.isspace() and not trg.isspace():
209  charactersSet.add((ord(src), trg))
210 
211  return charactersSet
212 
214  """Returns the special cases which are not handled by other methods"""
215  charactersSet = set()
216 
217  # Cyrillic
218  charactersSet.add((0x0401, u"\u0415")) # CYRILLIC CAPITAL LETTER IO
219  charactersSet.add((0x0451, u"\u0435")) # CYRILLIC SMALL LETTER IO
220 
221  # Symbols of "Letterlike Symbols" Unicode Block (U+2100 to U+214F)
222  charactersSet.add((0x2103, u"\xb0C")) # DEGREE CELSIUS
223  charactersSet.add((0x2109, u"\xb0F")) # DEGREE FAHRENHEIT
224  charactersSet.add((0x2117, "(P)")) # SOUND RECORDING COPYRIGHT
225 
226  return charactersSet
227 
228 def main(args):
229  # https://www.unicode.org/reports/tr44/tr44-14.html#Character_Decomposition_Mappings
230  decomposition_type_pattern = re.compile(" *<[^>]*> *")
231 
232  table = {}
233  all = []
234 
235  # unordered set for ensure uniqueness
236  charactersSet = set()
237 
238  # read file UnicodeData.txt
239  with codecs.open(
240  args.unicodeDataFilePath, mode='r', encoding='UTF-8',
241  ) as unicodeDataFile:
242  # read everything we need into memory
243  for line in unicodeDataFile:
244  fields = line.split(";")
245  if len(fields) > 5:
246  # https://www.unicode.org/reports/tr44/tr44-14.html#UnicodeData.txt
247  general_category = fields[2]
248  decomposition = fields[5]
249  decomposition = re.sub(decomposition_type_pattern, ' ', decomposition)
250  id = int(fields[0], 16)
251  combining_ids = [int(s, 16) for s in decomposition.split(" ") if s != ""]
252  codepoint = Codepoint(id, general_category, combining_ids)
253  table[id] = codepoint
254  all.append(codepoint)
255 
256  # walk through all the codepoints looking for interesting mappings
257  for codepoint in all:
258  if codepoint.general_category.startswith('L') and \
259  len(codepoint.combining_ids) > 1:
260  if is_letter_with_marks(codepoint, table):
261  charactersSet.add((codepoint.id,
262  chr(get_plain_letter(codepoint, table).id)))
263  elif args.noLigaturesExpansion is False and is_ligature(codepoint, table):
264  charactersSet.add((codepoint.id,
265  "".join(chr(combining_codepoint.id)
266  for combining_codepoint \
267  in get_plain_letters(codepoint, table))))
268  elif is_mark_to_remove(codepoint):
269  charactersSet.add((codepoint.id, None))
270 
271  # add CLDR Latin-ASCII characters
272  if not args.noLigaturesExpansion:
273  charactersSet |= parse_cldr_latin_ascii_transliterator(args.latinAsciiFilePath)
274  charactersSet |= special_cases()
275 
276  # sort for more convenient display
277  charactersList = sorted(charactersSet, key=lambda characterPair: characterPair[0])
278 
279  for characterPair in charactersList:
280  print_record(characterPair[0], characterPair[1])
281 
282 if __name__ == "__main__":
283  parser = argparse.ArgumentParser(description='This script builds unaccent.rules on standard output when given the contents of UnicodeData.txt and Latin-ASCII.xml given as arguments.')
284  parser.add_argument("--unicode-data-file", help="Path to formatted text file corresponding to UnicodeData.txt.", type=str, required=True, dest='unicodeDataFilePath')
285  parser.add_argument("--latin-ascii-file", help="Path to XML file from Unicode Common Locale Data Repository (CLDR) corresponding to Latin-ASCII transliterator (Latin-ASCII.xml).", type=str, dest='latinAsciiFilePath')
286  parser.add_argument("--no-ligatures-expansion", help="Do not expand ligatures and do not use Unicode CLDR Latin-ASCII transliterator. By default, this option is not enabled and \"--latin-ascii-file\" argument is required. If this option is enabled, \"--latin-ascii-file\" argument is optional and ignored.", action="store_true", dest='noLigaturesExpansion')
287  args = parser.parse_args()
288 
289  if args.noLigaturesExpansion is False and args.latinAsciiFilePath is None:
290  sys.stderr.write('You must specify the path to Latin-ASCII transliterator file with \"--latin-ascii-file\" option or use \"--no-ligatures-expansion\" option. Use \"-h\" option for help.')
291  sys.exit(1)
292 
293  main(args)
void print(const void *obj)
Definition: print.c:36
def bytes(source, encoding='ascii', errors='strict')
def __init__(self, id, general_category, combining_ids)
def is_ligature(codepoint, table)
def get_plain_letters(codepoint, table)
#define assert(TEST)
Definition: imath.c:73
def parse_cldr_latin_ascii_transliterator(latinAsciiFilePath)
def print_record(codepoint, letter)
def get_plain_letter(codepoint, table)
def is_letter_with_marks(codepoint, table)
def is_letter(codepoint, table)