PostgreSQL Source Code  git master
generate_unaccent_rules Namespace Reference

Data Structures

class  Codepoint
 

Functions

def print_record (codepoint, letter)
 
def is_mark_to_remove (codepoint)
 
def is_plain_letter (codepoint)
 
def is_mark (codepoint)
 
def is_letter_with_marks (codepoint, table)
 
def is_letter (codepoint, table)
 
def get_plain_letter (codepoint, table)
 
def is_ligature (codepoint, table)
 
def get_plain_letters (codepoint, table)
 
def parse_cldr_latin_ascii_transliterator (latinAsciiFilePath)
 
def special_cases ()
 
def main (args)
 

Variables

 stdout
 
tuple PLAIN_LETTER_RANGES
 
tuple COMBINING_MARK_RANGES
 
 parser = argparse.ArgumentParser(description='This script builds unaccent.rules on standard output when given the contents of UnicodeData.txt and Latin-ASCII.xml given as arguments.')
 
 help
 
 type
 
 str
 
 required
 
 True
 
 dest
 
 action
 
 args = parser.parse_args()
 

Function Documentation

◆ get_plain_letter()

def generate_unaccent_rules.get_plain_letter (   codepoint,
  table 
)
Return the base codepoint without marks. If this codepoint has more
than one combining character, do a recursive lookup on the table to
find out its plain base letter.

Definition at line 130 of file generate_unaccent_rules.py.

130 def get_plain_letter(codepoint, table):
131  """Return the base codepoint without marks. If this codepoint has more
132  than one combining character, do a recursive lookup on the table to
133  find out its plain base letter."""
134  if is_letter_with_marks(codepoint, table):
135  if len(table[codepoint.combining_ids[0]].combining_ids) > 1:
136  return get_plain_letter(table[codepoint.combining_ids[0]], table)
137  elif is_plain_letter(table[codepoint.combining_ids[0]]):
138  return table[codepoint.combining_ids[0]]
139 
140  # Should not come here
141  assert False, 'Codepoint U+%0.2X' % codepoint.id
142  elif is_plain_letter(codepoint):
143  return codepoint
144 
145  # Should not come here
146  assert False, 'Codepoint U+%0.2X' % codepoint.id
147 
148 
def get_plain_letter(codepoint, table)
def is_letter_with_marks(codepoint, table)
const void size_t len

References is_letter_with_marks(), is_plain_letter(), and len.

Referenced by get_plain_letters(), and main().

◆ get_plain_letters()

def generate_unaccent_rules.get_plain_letters (   codepoint,
  table 
)
Return a list of plain letters from a ligature.

Definition at line 154 of file generate_unaccent_rules.py.

154 def get_plain_letters(codepoint, table):
155  """Return a list of plain letters from a ligature."""
156  assert(is_ligature(codepoint, table))
157  return [get_plain_letter(table[id], table) for id in codepoint.combining_ids]
158 
159 
def get_plain_letters(codepoint, table)
def is_ligature(codepoint, table)
#define assert(x)
Definition: regcustom.h:56

References assert, get_plain_letter(), and is_ligature().

Referenced by main().

◆ is_letter()

def generate_unaccent_rules.is_letter (   codepoint,
  table 
)
Return true for letter with or without diacritical marks.

Definition at line 125 of file generate_unaccent_rules.py.

125 def is_letter(codepoint, table):
126  """Return true for letter with or without diacritical marks."""
127  return is_plain_letter(codepoint) or is_letter_with_marks(codepoint, table)
128 
129 
def is_letter(codepoint, table)

References is_letter_with_marks(), and is_plain_letter().

Referenced by is_ligature().

◆ is_letter_with_marks()

def generate_unaccent_rules.is_letter_with_marks (   codepoint,
  table 
)
Returns true for letters combined with one or more marks.

Definition at line 103 of file generate_unaccent_rules.py.

103 def is_letter_with_marks(codepoint, table):
104  """Returns true for letters combined with one or more marks."""
105  # See https://www.unicode.org/reports/tr44/tr44-14.html#General_Category_Values
106 
107  # Letter may have no combining characters, in which case it has
108  # no marks.
109  if len(codepoint.combining_ids) == 1:
110  return False
111 
112  # A letter without diacritical marks has none of them.
113  if any(is_mark(table[i]) for i in codepoint.combining_ids[1:]) is False:
114  return False
115 
116  # Check if the base letter of this letter has marks.
117  codepoint_base = codepoint.combining_ids[0]
118  if is_plain_letter(table[codepoint_base]) is False and \
119  is_letter_with_marks(table[codepoint_base], table) is False:
120  return False
121 
122  return True
123 
124 

References is_mark(), is_plain_letter(), and len.

Referenced by get_plain_letter(), is_letter(), and main().

◆ is_ligature()

def generate_unaccent_rules.is_ligature (   codepoint,
  table 
)
Return true for letters combined with letters.

Definition at line 149 of file generate_unaccent_rules.py.

149 def is_ligature(codepoint, table):
150  """Return true for letters combined with letters."""
151  return all(is_letter(table[i], table) for i in codepoint.combining_ids)
152 
153 

References is_letter().

Referenced by get_plain_letters(), and main().

◆ is_mark()

def generate_unaccent_rules.is_mark (   codepoint)
Returns true for diacritical marks (combining codepoints).

Definition at line 98 of file generate_unaccent_rules.py.

98 def is_mark(codepoint):
99  """Returns true for diacritical marks (combining codepoints)."""
100  return codepoint.general_category in ("Mn", "Me", "Mc")
101 
102 

Referenced by is_letter_with_marks(), and is_mark_to_remove().

◆ is_mark_to_remove()

def generate_unaccent_rules.is_mark_to_remove (   codepoint)
Return true if this is a combining mark to remove.

Definition at line 79 of file generate_unaccent_rules.py.

79 def is_mark_to_remove(codepoint):
80  """Return true if this is a combining mark to remove."""
81  if not is_mark(codepoint):
82  return False
83 
84  for begin, end in COMBINING_MARK_RANGES:
85  if codepoint.id >= begin and codepoint.id <= end:
86  return True
87  return False
88 
89 

References is_mark().

Referenced by main().

◆ is_plain_letter()

def generate_unaccent_rules.is_plain_letter (   codepoint)
Return true if codepoint represents a "plain letter".

Definition at line 90 of file generate_unaccent_rules.py.

90 def is_plain_letter(codepoint):
91  """Return true if codepoint represents a "plain letter"."""
92  for begin, end in PLAIN_LETTER_RANGES:
93  if codepoint.id >= begin and codepoint.id <= end:
94  return True
95  return False
96 
97 

Referenced by get_plain_letter(), is_letter(), and is_letter_with_marks().

◆ main()

def generate_unaccent_rules.main (   args)

Definition at line 223 of file generate_unaccent_rules.py.

223 def main(args):
224  # https://www.unicode.org/reports/tr44/tr44-14.html#Character_Decomposition_Mappings
225  decomposition_type_pattern = re.compile(" *<[^>]*> *")
226 
227  table = {}
228  all = []
229 
230  # unordered set for ensure uniqueness
231  charactersSet = set()
232 
233  # read file UnicodeData.txt
234  with codecs.open(
235  args.unicodeDataFilePath, mode='r', encoding='UTF-8',
236  ) as unicodeDataFile:
237  # read everything we need into memory
238  for line in unicodeDataFile:
239  fields = line.split(";")
240  if len(fields) > 5:
241  # https://www.unicode.org/reports/tr44/tr44-14.html#UnicodeData.txt
242  general_category = fields[2]
243  decomposition = fields[5]
244  decomposition = re.sub(decomposition_type_pattern, ' ', decomposition)
245  id = int(fields[0], 16)
246  combining_ids = [int(s, 16) for s in decomposition.split(" ") if s != ""]
247  codepoint = Codepoint(id, general_category, combining_ids)
248  table[id] = codepoint
249  all.append(codepoint)
250 
251  # walk through all the codepoints looking for interesting mappings
252  for codepoint in all:
253  if codepoint.general_category.startswith('L') and \
254  len(codepoint.combining_ids) > 1:
255  if is_letter_with_marks(codepoint, table):
256  charactersSet.add((codepoint.id,
257  chr(get_plain_letter(codepoint, table).id)))
258  elif args.noLigaturesExpansion is False and is_ligature(codepoint, table):
259  charactersSet.add((codepoint.id,
260  "".join(chr(combining_codepoint.id)
261  for combining_codepoint
262  in get_plain_letters(codepoint, table))))
263  elif is_mark_to_remove(codepoint):
264  charactersSet.add((codepoint.id, None))
265 
266  # add CLDR Latin-ASCII characters
267  if not args.noLigaturesExpansion:
268  charactersSet |= parse_cldr_latin_ascii_transliterator(args.latinAsciiFilePath)
269  charactersSet |= special_cases()
270 
271  # sort for more convenient display
272  charactersList = sorted(charactersSet, key=lambda characterPair: characterPair[0])
273 
274  for characterPair in charactersList:
275  print_record(characterPair[0], characterPair[1])
276 
277 
def print_record(codepoint, letter)
def parse_cldr_latin_ascii_transliterator(latinAsciiFilePath)

References get_plain_letter(), get_plain_letters(), is_letter_with_marks(), is_ligature(), is_mark_to_remove(), len, parse_cldr_latin_ascii_transliterator(), print_record(), and special_cases().

◆ parse_cldr_latin_ascii_transliterator()

def generate_unaccent_rules.parse_cldr_latin_ascii_transliterator (   latinAsciiFilePath)
Parse the XML file and return a set of tuples (src, trg), where "src"
is the original character and "trg" the substitute.

Definition at line 160 of file generate_unaccent_rules.py.

160 def parse_cldr_latin_ascii_transliterator(latinAsciiFilePath):
161  """Parse the XML file and return a set of tuples (src, trg), where "src"
162  is the original character and "trg" the substitute."""
163  charactersSet = set()
164 
165  # RegEx to parse rules
166  rulePattern = re.compile(r'^(?:(.)|(\\u[0-9a-fA-F]{4})) \u2192 (?:\'(.+)\'|(.+)) ;')
167 
168  # construct tree from XML
169  transliterationTree = ET.parse(latinAsciiFilePath)
170  transliterationTreeRoot = transliterationTree.getroot()
171 
172  # Fetch all the transliteration rules. Since release 29 of Latin-ASCII.xml
173  # all the transliteration rules are located in a single tRule block with
174  # all rules separated into separate lines.
175  blockRules = transliterationTreeRoot.findall("./transforms/transform/tRule")
176  assert(len(blockRules) == 1)
177 
178  # Split the block of rules into one element per line.
179  rules = blockRules[0].text.splitlines()
180 
181  # And finish the processing of each individual rule.
182  for rule in rules:
183  matches = rulePattern.search(rule)
184 
185  # The regular expression capture four groups corresponding
186  # to the characters.
187  #
188  # Group 1: plain "src" char. Empty if group 2 is not.
189  # Group 2: unicode-escaped "src" char (e.g. "\u0110"). Empty if group 1 is not.
190  #
191  # Group 3: plain "trg" char. Empty if group 4 is not.
192  # Group 4: plain "trg" char between quotes. Empty if group 3 is not.
193  if matches is not None:
194  src = matches.group(1) if matches.group(1) is not None else bytes(matches.group(2), 'UTF-8').decode('unicode-escape')
195  trg = matches.group(3) if matches.group(3) is not None else matches.group(4)
196 
197  # "'" and """ are escaped
198  trg = trg.replace("\\'", "'").replace('\\"', '"')
199 
200  # the parser of unaccent only accepts non-whitespace characters
201  # for "src" and "trg" (see unaccent.c)
202  if not src.isspace() and not trg.isspace():
203  charactersSet.add((ord(src), trg))
204 
205  return charactersSet
206 
207 

References assert, and len.

Referenced by main().

◆ print_record()

def generate_unaccent_rules.print_record (   codepoint,
  letter 
)

Definition at line 59 of file generate_unaccent_rules.py.

59 def print_record(codepoint, letter):
60  if letter:
61  # If the letter has whitespace or double quotes, escape double
62  # quotes and apply more quotes around it.
63  if (' ' in letter) or ('"' in letter):
64  letter = '"' + letter.replace('"', '""') + '"'
65  output = chr(codepoint) + "\t" + letter
66  else:
67  output = chr(codepoint)
68 
69  print(output)
70 
71 
void print(const void *obj)
Definition: print.c:36

References print().

Referenced by main().

◆ special_cases()

def generate_unaccent_rules.special_cases ( )
Returns the special cases which are not handled by other methods

Definition at line 208 of file generate_unaccent_rules.py.

208 def special_cases():
209  """Returns the special cases which are not handled by other methods"""
210  charactersSet = set()
211 
212  # Cyrillic
213  charactersSet.add((0x0401, "\u0415")) # CYRILLIC CAPITAL LETTER IO
214  charactersSet.add((0x0451, "\u0435")) # CYRILLIC SMALL LETTER IO
215 
216  # Symbols of "Letterlike Symbols" Unicode Block (U+2100 to U+214F)
217  charactersSet.add((0x2103, "\xb0C")) # DEGREE CELSIUS
218  charactersSet.add((0x2109, "\xb0F")) # DEGREE FAHRENHEIT
219 
220  return charactersSet
221 
222 

Referenced by main().

Variable Documentation

◆ action

generate_unaccent_rules.action

Definition at line 282 of file generate_unaccent_rules.py.

Referenced by action_to_str(), apply_dispatch(), audit_attempt(), audit_failure(), audit_success(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), check_foreign_key(), computeLeafRecompressWALData(), DefineQueryRewrite(), emit_audit_message(), error_view_not_updatable(), ExecAlterDefaultPrivilegesStmt(), ExecInitMerge(), ExecInitPartitionInfo(), ExecMergeNotMatched(), ExecSetVariableStmt(), expression_tree_mutator_impl(), expression_tree_walker_impl(), fmgr_security_definer(), generic_redo(), get_merge_query_def(), gistRedoClearFollowRight(), grouping_planner(), handle_streamed_transaction(), hash_search(), hash_search_with_hash_value(), hash_xlog_delete(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_vacuum_one_page(), heap_xlog_insert(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_visible(), index_set_state_flags(), injection_points_attach(), InsertRule(), iterate_json_values(), iterate_jsonb_values(), logicalrep_message_type(), logicalrep_read_delete(), logicalrep_read_insert(), logicalrep_read_update(), make_ruledef(), parseCreateReplSlotOptions(), perform_pullup_replace_vars(), pgoutput_change(), pgoutput_row_filter(), preprocess_targetlist(), ProcessGUCArray(), push_old_value(), queue_listen(), register_on_commit_action(), REGRESS_utility_command(), RewriteQuery(), rewriteTargetView(), set_config_option(), set_config_option_ext(), set_config_with_handle(), set_plan_refs(), simple_action_list_append(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), stream_open_and_write_change(), stream_write_change(), subquery_planner(), transform_MERGE_to_join(), transformMergeStmt(), transformRuleStmt(), ts_headline_json_byid_opt(), ts_headline_jsonb_byid_opt(), view_has_instead_trigger(), and walkdir().

◆ args

generate_unaccent_rules.args = parser.parse_args()

Definition at line 283 of file generate_unaccent_rules.py.

Referenced by _int_matchsel(), _outBoolExpr(), _readBoolExpr(), add_function_defaults(), ahprintf(), appendJSONKeyValueFmt(), appendPQExpBuffer(), appendPQExpBufferVA(), appendStringInfo(), appendStringInfoVA(), archprintf(), array_unnest_support(), arraycontsel(), autoinc(), build_aggregate_deserialfn_expr(), build_aggregate_finalfn_expr(), build_aggregate_serialfn_expr(), build_aggregate_transfn_expr(), build_coercion_expression(), check_agg_arguments(), check_agglevels_and_constraints(), check_foreign_key(), check_hashjoinable(), check_memoizable(), check_mergejoinable(), check_primary_key(), clause_selectivity_ext(), clauselist_selectivity_ext(), coerce_record_to_complex(), count_rowexpr_columns(), CreateTriggerFiringOn(), decrypt_internal(), DefineAggregate(), do_copy(), does_not_exist_skipping(), dopr(), ecpg_do(), ecpg_do_prologue(), ECPGdescribe(), ECPGdo(), ECPGget_desc(), ECPGset_desc(), encrypt_internal(), eqjoinsel(), eqsel_internal(), error(), eval_const_expressions_mutator(), evalFunc(), evalLazyFunc(), evalStandardFunc(), evaluate_function(), examine_opclause_args(), ExecEvalFuncExprStrictFusage(), ExecInitExprRec(), ExecInitFunc(), ExecInterpExpr(), ExecJustApplyFuncToCase(), executeQueryOrDie(), expand_function_arguments(), expression_tree_walker_impl(), extract_variadic_args(), find_arguments(), find_duplicate_ors(), find_single_rel_for_clauses(), flatten_set_variable_args(), FuncnameGetCandidates(), function_selectivity(), gen_partprune_steps_internal(), generate_series_int4_support(), generate_series_int8_support(), generic_restriction_selectivity(), get_call_expr_arg_stable(), get_call_expr_argtype(), get_from_clause_item(), get_join_variables(), get_notclausearg(), get_oper_expr(), get_qual_for_hash(), get_restriction_variable(), get_rule_expr(), get_simple_binary_op_name(), getopt_long(), info_cb(), init_work(), inline_function(), insert_username(), isSimpleNode(), join_selectivity(), json_build_array(), json_build_array_worker(), json_build_object(), json_build_object_worker(), jsonb_build_array(), jsonb_build_array_worker(), jsonb_build_object(), jsonb_build_object_worker(), JsonTableInitOpaque(), JsonTableInitPlan(), libpq_append_conn_error(), libpq_append_error(), lo_manage(), ltreeparentsel(), make_jsp_entry_node(), make_jsp_expr_node(), make_jsp_expr_node_args(), make_op(), make_restrictinfo_internal(), make_scalar_array_op(), make_sub_restrictinfos(), makeBoolExpr(), makeFuncCall(), makeFuncExpr(), makeJsonConstructorExpr(), match_clause_to_partition_key(), matchingsel(), moddatetime(), multirangesel(), neqjoinsel(), networkjoinsel(), networksel(), parallel_exec_prog(), parse_args(), parse_slash_copy(), patternsel(), patternsel_common(), pg_fatal(), pg_fprintf(), pg_get_object_address(), pg_get_ruledef_worker(), pg_get_viewdef_worker(), pg_identify_object_as_address(), pg_log(), pg_printf(), pg_snprintf(), pg_sprintf(), pg_vfprintf(), pg_vprintf(), pg_vsnprintf(), pg_vsprintf(), PLy_cursor(), PLy_cursor_fetch(), PLy_cursor_plan(), PLy_debug(), PLy_error(), PLy_exception_set_with_details(), PLy_fatal(), PLy_function_build_args(), PLy_info(), PLy_log(), PLy_notice(), PLy_output(), PLy_plan_cursor(), PLy_plan_execute(), PLy_plan_status(), PLy_quote_ident(), PLy_quote_literal(), PLy_quote_nullable(), PLy_spi_exception_set(), PLy_spi_execute(), PLy_spi_prepare(), PLy_subtransaction_exit(), PLy_warning(), PQfn(), pqFunctionCall3(), pqInternalNotice(), prep_status(), prep_status_progress(), printfPQExpBuffer(), process_sublinks_mutator(), processTypesSpec(), psprintf(), psql_add_command(), pull_ands(), pull_ors(), pull_up_sublinks_qual_recurse(), pvsnprintf(), rangesel(), raw_expression_tree_walker_impl(), recheck_cast_function_args(), reorder_function_arguments(), report_invalid_record(), report_status(), restriction_selectivity(), scalararraysel(), scalarineqsel_wrapper(), SetPGVariable(), SetWALFileNameForCleanup(), simplify_and_arguments(), simplify_boolean_equality(), simplify_function(), simplify_or_arguments(), substitute_actual_parameters(), substitute_actual_srf_parameters(), tarPrintf(), test_support_func(), testexpr_is_hashable(), transformAExprBetween(), transformAExprIn(), transformAExprOp(), transformAggregateCall(), transformBoolExpr(), transformCoalesceExpr(), transformFuncCall(), transformGroupingFunc(), transformJsonAggConstructor(), transformJsonArrayConstructor(), transformJsonConstructorOutput(), transformJsonObjectAgg(), transformJsonObjectConstructor(), transformJsonPassingArgs(), transformMinMaxExpr(), tsmatchsel(), ttdummy(), verror(), warning(), and xmlconcat().

◆ COMBINING_MARK_RANGES

tuple generate_unaccent_rules.COMBINING_MARK_RANGES
Initial value:
1 = ((0x0300, 0x0362), # Mn: Accents, IPA
2  (0x20dd, 0x20E0), # Me: Symbols
3  (0x20e2, 0x20e4),)

Definition at line 54 of file generate_unaccent_rules.py.

◆ dest

generate_unaccent_rules.dest

Definition at line 280 of file generate_unaccent_rules.py.

Referenced by _SPI_cursor_operation(), _SPI_execute_plan(), add_pos(), AppendInvalidationMessages(), AppendInvalidationMessageSubGroup(), ArrayCastAndSet(), ascii_safe_strlcpy(), begin_tup_output_tupdesc(), BeginCopyTo(), big5_to_euc_tw(), big5_to_mic(), big5_to_utf8(), brin_copy_tuple(), compute_scalar_stats(), copy_file_by_range(), copy_file_clone(), copy_generic_path_info(), copy_key(), copy_plan_costsize(), CopyReadBinaryData(), CreateDestReceiver(), CreateQueryDesc(), CreateReplicationSlot(), DoPortalRunFetch(), dshash_memcpy(), dshash_strcpy(), EndCommand(), euc_cn_to_mic(), euc_cn_to_utf8(), euc_jis_2004_to_shift_jis_2004(), euc_jis_2004_to_utf8(), euc_jp_to_mic(), euc_jp_to_sjis(), euc_jp_to_utf8(), euc_kr_to_mic(), euc_kr_to_utf8(), euc_tw_to_big5(), euc_tw_to_mic(), euc_tw_to_utf8(), exec_execute_message(), exec_replication_command(), exec_simple_query(), ExecCreateTableAs(), ExecRefreshMatView(), execute_sql_string(), ExecuteCallStmt(), ExecutePlan(), ExecuteQuery(), ExplainOnePlan(), ExplainQuery(), from_char_parse_int(), from_char_parse_int_len(), from_char_seq_search(), from_char_set_int(), FullTransactionIdAdvance(), FullTransactionIdRetreat(), gb18030_to_utf8(), gbk_to_utf8(), get_publications_str(), GetPGVariable(), GetSerializationMetrics(), hashline_number(), heap_copytuple_with_tuple(), IdentifySystem(), init_var_from_num(), iso8859_1_to_utf8(), iso8859_to_utf8(), iso_to_koi8r(), iso_to_mic(), iso_to_win1251(), iso_to_win866(), johab_to_utf8(), koi8r_to_iso(), koi8r_to_mic(), koi8r_to_utf8(), koi8r_to_win1251(), koi8r_to_win866(), koi8u_to_utf8(), latin1_to_mic(), latin2_to_mic(), latin2_to_win1250(), latin3_to_mic(), latin4_to_mic(), ldchar(), LookupWSErrorMessage(), mbstowcs_l(), mic_to_big5(), mic_to_euc_cn(), mic_to_euc_jp(), mic_to_euc_kr(), mic_to_euc_tw(), mic_to_iso(), mic_to_koi8r(), mic_to_latin1(), mic_to_latin2(), mic_to_latin3(), mic_to_latin4(), mic_to_sjis(), mic_to_win1250(), mic_to_win1251(), mic_to_win866(), NullCommand(), partition_bounds_copy(), PerformPortalFetch(), pg_cryptohash_final(), pg_do_encoding_conversion_buf(), pg_hmac_final(), pg_md5_final(), pg_sha1_final(), pg_strnxfrm(), pg_strnxfrm_libc(), pg_strnxfrm_prefix(), pg_strxfrm(), pg_strxfrm_libc(), pg_strxfrm_prefix(), pg_to_ascii(), pglz_compress(), pglz_decompress(), pgss_ProcessUtility(), PortalRun(), PortalRunFetch(), PortalRunMulti(), PortalRunSelect(), PortalRunUtility(), postquel_start(), PQcopyResult(), printtup_create_DR(), process_pipe_input(), ProcessQuery(), ProcessUtility(), rbt_copy_data(), read_gucstate_binary(), ReadReplicationSlot(), ReadyForQuery(), refresh_matview_datafill(), REGRESS_utility_command(), RunFromStore(), SendTablespaceList(), SendTimeLineHistory(), SendXlogRecPtrResult(), sepgsql_utility_command(), SerializePendingSyncs(), set_input(), set_var_from_non_decimal_integer_str(), set_var_from_num(), set_var_from_str(), set_var_from_var(), shift_jis_2004_to_euc_jis_2004(), shift_jis_2004_to_utf8(), ShowAllGUCConfig(), ShowGUCConfigOption(), sjis_to_euc_jp(), sjis_to_mic(), sjis_to_utf8(), SnapBuildRestoreContents(), standard_ExecutorRun(), standard_ProcessUtility(), StartReplication(), store_coded_char(), str_numth(), uhc_to_utf8(), utf8_to_big5(), utf8_to_euc_cn(), utf8_to_euc_jis_2004(), utf8_to_euc_jp(), utf8_to_euc_kr(), utf8_to_euc_tw(), utf8_to_gb18030(), utf8_to_gbk(), utf8_to_iso8859(), utf8_to_iso8859_1(), utf8_to_johab(), utf8_to_koi8r(), utf8_to_koi8u(), utf8_to_shift_jis_2004(), utf8_to_sjis(), utf8_to_uhc(), utf8_to_win(), wcstombs_l(), win1250_to_latin2(), win1250_to_mic(), win1251_to_iso(), win1251_to_koi8r(), win1251_to_mic(), win1251_to_win866(), win866_to_iso(), win866_to_koi8r(), win866_to_mic(), win866_to_win1251(), win_to_utf8(), write_pipe_chunks(), and XLogCompressBackupBlock().

◆ help

generate_unaccent_rules.help ( void  )

Definition at line 280 of file generate_unaccent_rules.py.

◆ parser

generate_unaccent_rules.parser = argparse.ArgumentParser(description='This script builds unaccent.rules on standard output when given the contents of UnicodeData.txt and Latin-ASCII.xml given as arguments.')

Definition at line 279 of file generate_unaccent_rules.py.

◆ PLAIN_LETTER_RANGES

tuple generate_unaccent_rules.PLAIN_LETTER_RANGES
Initial value:
1 = ((ord('a'), ord('z')), # Latin lower case
2  (ord('A'), ord('Z')), # Latin upper case
3  (0x03b1, 0x03c9), # GREEK SMALL LETTER ALPHA, GREEK SMALL LETTER OMEGA
4  (0x0391, 0x03a9))

Definition at line 41 of file generate_unaccent_rules.py.

◆ required

◆ stdout

◆ str

generate_unaccent_rules.str

Definition at line 280 of file generate_unaccent_rules.py.

◆ True

generate_unaccent_rules.True

Definition at line 280 of file generate_unaccent_rules.py.

◆ type

generate_unaccent_rules.type

Definition at line 280 of file generate_unaccent_rules.py.