PostgreSQL Source Code  git master
pg_lzcompress.c
Go to the documentation of this file.
1 /* ----------
2  * pg_lzcompress.c -
3  *
4  * This is an implementation of LZ compression for PostgreSQL.
5  * It uses a simple history table and generates 2-3 byte tags
6  * capable of backward copy information for 3-273 bytes with
7  * a max offset of 4095.
8  *
9  * Entry routines:
10  *
11  * int32
12  * pglz_compress(const char *source, int32 slen, char *dest,
13  * const PGLZ_Strategy *strategy);
14  *
15  * source is the input data to be compressed.
16  *
17  * slen is the length of the input data.
18  *
19  * dest is the output area for the compressed result.
20  * It must be at least as big as PGLZ_MAX_OUTPUT(slen).
21  *
22  * strategy is a pointer to some information controlling
23  * the compression algorithm. If NULL, the compiled
24  * in default strategy is used.
25  *
26  * The return value is the number of bytes written in the
27  * buffer dest, or -1 if compression fails; in the latter
28  * case the contents of dest are undefined.
29  *
30  * int32
31  * pglz_decompress(const char *source, int32 slen, char *dest,
32  * int32 rawsize)
33  *
34  * source is the compressed input.
35  *
36  * slen is the length of the compressed input.
37  *
38  * dest is the area where the uncompressed data will be
39  * written to. It is the callers responsibility to
40  * provide enough space.
41  *
42  * The data is written to buff exactly as it was handed
43  * to pglz_compress(). No terminating zero byte is added.
44  *
45  * rawsize is the length of the uncompressed data.
46  *
47  * The return value is the number of bytes written in the
48  * buffer dest, or -1 if decompression fails.
49  *
50  * The decompression algorithm and internal data format:
51  *
52  * It is made with the compressed data itself.
53  *
54  * The data representation is easiest explained by describing
55  * the process of decompression.
56  *
57  * If compressed_size == rawsize, then the data
58  * is stored uncompressed as plain bytes. Thus, the decompressor
59  * simply copies rawsize bytes to the destination.
60  *
61  * Otherwise the first byte tells what to do the next 8 times.
62  * We call this the control byte.
63  *
64  * An unset bit in the control byte means, that one uncompressed
65  * byte follows, which is copied from input to output.
66  *
67  * A set bit in the control byte means, that a tag of 2-3 bytes
68  * follows. A tag contains information to copy some bytes, that
69  * are already in the output buffer, to the current location in
70  * the output. Let's call the three tag bytes T1, T2 and T3. The
71  * position of the data to copy is coded as an offset from the
72  * actual output position.
73  *
74  * The offset is in the upper nibble of T1 and in T2.
75  * The length is in the lower nibble of T1.
76  *
77  * So the 16 bits of a 2 byte tag are coded as
78  *
79  * 7---T1--0 7---T2--0
80  * OOOO LLLL OOOO OOOO
81  *
82  * This limits the offset to 1-4095 (12 bits) and the length
83  * to 3-18 (4 bits) because 3 is always added to it. To emit
84  * a tag of 2 bytes with a length of 2 only saves one control
85  * bit. But we lose one byte in the possible length of a tag.
86  *
87  * In the actual implementation, the 2 byte tag's length is
88  * limited to 3-17, because the value 0xF in the length nibble
89  * has special meaning. It means, that the next following
90  * byte (T3) has to be added to the length value of 18. That
91  * makes total limits of 1-4095 for offset and 3-273 for length.
92  *
93  * Now that we have successfully decoded a tag. We simply copy
94  * the output that occurred <offset> bytes back to the current
95  * output location in the specified <length>. Thus, a
96  * sequence of 200 spaces (think about bpchar fields) could be
97  * coded in 4 bytes. One literal space and a three byte tag to
98  * copy 199 bytes with a -1 offset. Whow - that's a compression
99  * rate of 98%! Well, the implementation needs to save the
100  * original data size too, so we need another 4 bytes for it
101  * and end up with a total compression rate of 96%, what's still
102  * worth a Whow.
103  *
104  * The compression algorithm
105  *
106  * The following uses numbers used in the default strategy.
107  *
108  * The compressor works best for attributes of a size between
109  * 1K and 1M. For smaller items there's not that much chance of
110  * redundancy in the character sequence (except for large areas
111  * of identical bytes like trailing spaces) and for bigger ones
112  * our 4K maximum look-back distance is too small.
113  *
114  * The compressor creates a table for lists of positions.
115  * For each input position (except the last 3), a hash key is
116  * built from the 4 next input bytes and the position remembered
117  * in the appropriate list. Thus, the table points to linked
118  * lists of likely to be at least in the first 4 characters
119  * matching strings. This is done on the fly while the input
120  * is compressed into the output area. Table entries are only
121  * kept for the last 4096 input positions, since we cannot use
122  * back-pointers larger than that anyway. The size of the hash
123  * table is chosen based on the size of the input - a larger table
124  * has a larger startup cost, as it needs to be initialized to
125  * zero, but reduces the number of hash collisions on long inputs.
126  *
127  * For each byte in the input, its hash key (built from this
128  * byte and the next 3) is used to find the appropriate list
129  * in the table. The lists remember the positions of all bytes
130  * that had the same hash key in the past in increasing backward
131  * offset order. Now for all entries in the used lists, the
132  * match length is computed by comparing the characters from the
133  * entries position with the characters from the actual input
134  * position.
135  *
136  * The compressor starts with a so called "good_match" of 128.
137  * It is a "prefer speed against compression ratio" optimizer.
138  * So if the first entry looked at already has 128 or more
139  * matching characters, the lookup stops and that position is
140  * used for the next tag in the output.
141  *
142  * For each subsequent entry in the history list, the "good_match"
143  * is lowered by 10%. So the compressor will be more happy with
144  * short matches the farer it has to go back in the history.
145  * Another "speed against ratio" preference characteristic of
146  * the algorithm.
147  *
148  * Thus there are 3 stop conditions for the lookup of matches:
149  *
150  * - a match >= good_match is found
151  * - there are no more history entries to look at
152  * - the next history entry is already too far back
153  * to be coded into a tag.
154  *
155  * Finally the match algorithm checks that at least a match
156  * of 3 or more bytes has been found, because that is the smallest
157  * amount of copy information to code into a tag. If so, a tag
158  * is omitted and all the input bytes covered by that are just
159  * scanned for the history add's, otherwise a literal character
160  * is omitted and only his history entry added.
161  *
162  * Acknowledgements:
163  *
164  * Many thanks to Adisak Pochanayon, who's article about SLZ
165  * inspired me to write the PostgreSQL compression this way.
166  *
167  * Jan Wieck
168  *
169  * Copyright (c) 1999-2017, PostgreSQL Global Development Group
170  *
171  * src/common/pg_lzcompress.c
172  * ----------
173  */
174 #ifndef FRONTEND
175 #include "postgres.h"
176 #else
177 #include "postgres_fe.h"
178 #endif
179 
180 #include <limits.h>
181 
182 #include "common/pg_lzcompress.h"
183 
184 
185 /* ----------
186  * Local definitions
187  * ----------
188  */
189 #define PGLZ_MAX_HISTORY_LISTS 8192 /* must be power of 2 */
190 #define PGLZ_HISTORY_SIZE 4096
191 #define PGLZ_MAX_MATCH 273
192 
193 
194 /* ----------
195  * PGLZ_HistEntry -
196  *
197  * Linked list for the backward history lookup
198  *
199  * All the entries sharing a hash key are linked in a doubly linked list.
200  * This makes it easy to remove an entry when it's time to recycle it
201  * (because it's more than 4K positions old).
202  * ----------
203  */
204 typedef struct PGLZ_HistEntry
205 {
206  struct PGLZ_HistEntry *next; /* links for my hash key's list */
208  int hindex; /* my current hash key */
209  const char *pos; /* my input position */
211 
212 
213 /* ----------
214  * The provided standard strategies
215  * ----------
216  */
218  32, /* Data chunks less than 32 bytes are not
219  * compressed */
220  INT_MAX, /* No upper limit on what we'll try to
221  * compress */
222  25, /* Require 25% compression rate, or not worth
223  * it */
224  1024, /* Give up if no compression in the first 1KB */
225  128, /* Stop history lookup if a match of 128 bytes
226  * is found */
227  10 /* Lower good match size by 10% at every loop
228  * iteration */
229 };
231 
232 
234  0, /* Chunks of any size are compressed */
235  INT_MAX,
236  0, /* It's enough to save one single byte */
237  INT_MAX, /* Never give up early */
238  128, /* Stop history lookup if a match of 128 bytes
239  * is found */
240  6 /* Look harder for a good match */
241 };
243 
244 
245 /* ----------
246  * Statically allocated work arrays for history
247  * ----------
248  */
251 
252 /*
253  * Element 0 in hist_entries is unused, and means 'invalid'. Likewise,
254  * INVALID_ENTRY_PTR in next/prev pointers mean 'invalid'.
255  */
256 #define INVALID_ENTRY 0
257 #define INVALID_ENTRY_PTR (&hist_entries[INVALID_ENTRY])
258 
259 /* ----------
260  * pglz_hist_idx -
261  *
262  * Computes the history table slot for the lookup by the next 4
263  * characters in the input.
264  *
265  * NB: because we use the next 4 characters, we are not guaranteed to
266  * find 3-character matches; they very possibly will be in the wrong
267  * hash list. This seems an acceptable tradeoff for spreading out the
268  * hash keys more.
269  * ----------
270  */
271 #define pglz_hist_idx(_s,_e, _mask) ( \
272  ((((_e) - (_s)) < 4) ? (int) (_s)[0] : \
273  (((_s)[0] << 6) ^ ((_s)[1] << 4) ^ \
274  ((_s)[2] << 2) ^ (_s)[3])) & (_mask) \
275  )
276 
277 
278 /* ----------
279  * pglz_hist_add -
280  *
281  * Adds a new entry to the history table.
282  *
283  * If _recycle is true, then we are recycling a previously used entry,
284  * and must first delink it from its old hashcode's linked list.
285  *
286  * NOTE: beware of multiple evaluations of macro's arguments, and note that
287  * _hn and _recycle are modified in the macro.
288  * ----------
289  */
290 #define pglz_hist_add(_hs,_he,_hn,_recycle,_s,_e, _mask) \
291 do { \
292  int __hindex = pglz_hist_idx((_s),(_e), (_mask)); \
293  int16 *__myhsp = &(_hs)[__hindex]; \
294  PGLZ_HistEntry *__myhe = &(_he)[_hn]; \
295  if (_recycle) { \
296  if (__myhe->prev == NULL) \
297  (_hs)[__myhe->hindex] = __myhe->next - (_he); \
298  else \
299  __myhe->prev->next = __myhe->next; \
300  if (__myhe->next != NULL) \
301  __myhe->next->prev = __myhe->prev; \
302  } \
303  __myhe->next = &(_he)[*__myhsp]; \
304  __myhe->prev = NULL; \
305  __myhe->hindex = __hindex; \
306  __myhe->pos = (_s); \
307  /* If there was an existing entry in this hash slot, link */ \
308  /* this new entry to it. However, the 0th entry in the */ \
309  /* entries table is unused, so we can freely scribble on it. */ \
310  /* So don't bother checking if the slot was used - we'll */ \
311  /* scribble on the unused entry if it was not, but that's */ \
312  /* harmless. Avoiding the branch in this critical path */ \
313  /* speeds this up a little bit. */ \
314  /* if (*__myhsp != INVALID_ENTRY) */ \
315  (_he)[(*__myhsp)].prev = __myhe; \
316  *__myhsp = _hn; \
317  if (++(_hn) >= PGLZ_HISTORY_SIZE + 1) { \
318  (_hn) = 1; \
319  (_recycle) = true; \
320  } \
321 } while (0)
322 
323 
324 /* ----------
325  * pglz_out_ctrl -
326  *
327  * Outputs the last and allocates a new control byte if needed.
328  * ----------
329  */
330 #define pglz_out_ctrl(__ctrlp,__ctrlb,__ctrl,__buf) \
331 do { \
332  if ((__ctrl & 0xff) == 0) \
333  { \
334  *(__ctrlp) = __ctrlb; \
335  __ctrlp = (__buf)++; \
336  __ctrlb = 0; \
337  __ctrl = 1; \
338  } \
339 } while (0)
340 
341 
342 /* ----------
343  * pglz_out_literal -
344  *
345  * Outputs a literal byte to the destination buffer including the
346  * appropriate control bit.
347  * ----------
348  */
349 #define pglz_out_literal(_ctrlp,_ctrlb,_ctrl,_buf,_byte) \
350 do { \
351  pglz_out_ctrl(_ctrlp,_ctrlb,_ctrl,_buf); \
352  *(_buf)++ = (unsigned char)(_byte); \
353  _ctrl <<= 1; \
354 } while (0)
355 
356 
357 /* ----------
358  * pglz_out_tag -
359  *
360  * Outputs a backward reference tag of 2-4 bytes (depending on
361  * offset and length) to the destination buffer including the
362  * appropriate control bit.
363  * ----------
364  */
365 #define pglz_out_tag(_ctrlp,_ctrlb,_ctrl,_buf,_len,_off) \
366 do { \
367  pglz_out_ctrl(_ctrlp,_ctrlb,_ctrl,_buf); \
368  _ctrlb |= _ctrl; \
369  _ctrl <<= 1; \
370  if (_len > 17) \
371  { \
372  (_buf)[0] = (unsigned char)((((_off) & 0xf00) >> 4) | 0x0f); \
373  (_buf)[1] = (unsigned char)(((_off) & 0xff)); \
374  (_buf)[2] = (unsigned char)((_len) - 18); \
375  (_buf) += 3; \
376  } else { \
377  (_buf)[0] = (unsigned char)((((_off) & 0xf00) >> 4) | ((_len) - 3)); \
378  (_buf)[1] = (unsigned char)((_off) & 0xff); \
379  (_buf) += 2; \
380  } \
381 } while (0)
382 
383 
384 /* ----------
385  * pglz_find_match -
386  *
387  * Lookup the history table if the actual input stream matches
388  * another sequence of characters, starting somewhere earlier
389  * in the input buffer.
390  * ----------
391  */
392 static inline int
393 pglz_find_match(int16 *hstart, const char *input, const char *end,
394  int *lenp, int *offp, int good_match, int good_drop, int mask)
395 {
396  PGLZ_HistEntry *hent;
397  int16 hentno;
398  int32 len = 0;
399  int32 off = 0;
400 
401  /*
402  * Traverse the linked history list until a good enough match is found.
403  */
404  hentno = hstart[pglz_hist_idx(input, end, mask)];
405  hent = &hist_entries[hentno];
406  while (hent != INVALID_ENTRY_PTR)
407  {
408  const char *ip = input;
409  const char *hp = hent->pos;
410  int32 thisoff;
411  int32 thislen;
412 
413  /*
414  * Stop if the offset does not fit into our tag anymore.
415  */
416  thisoff = ip - hp;
417  if (thisoff >= 0x0fff)
418  break;
419 
420  /*
421  * Determine length of match. A better match must be larger than the
422  * best so far. And if we already have a match of 16 or more bytes,
423  * it's worth the call overhead to use memcmp() to check if this match
424  * is equal for the same size. After that we must fallback to
425  * character by character comparison to know the exact position where
426  * the diff occurred.
427  */
428  thislen = 0;
429  if (len >= 16)
430  {
431  if (memcmp(ip, hp, len) == 0)
432  {
433  thislen = len;
434  ip += len;
435  hp += len;
436  while (ip < end && *ip == *hp && thislen < PGLZ_MAX_MATCH)
437  {
438  thislen++;
439  ip++;
440  hp++;
441  }
442  }
443  }
444  else
445  {
446  while (ip < end && *ip == *hp && thislen < PGLZ_MAX_MATCH)
447  {
448  thislen++;
449  ip++;
450  hp++;
451  }
452  }
453 
454  /*
455  * Remember this match as the best (if it is)
456  */
457  if (thislen > len)
458  {
459  len = thislen;
460  off = thisoff;
461  }
462 
463  /*
464  * Advance to the next history entry
465  */
466  hent = hent->next;
467 
468  /*
469  * Be happy with lesser good matches the more entries we visited. But
470  * no point in doing calculation if we're at end of list.
471  */
472  if (hent != INVALID_ENTRY_PTR)
473  {
474  if (len >= good_match)
475  break;
476  good_match -= (good_match * good_drop) / 100;
477  }
478  }
479 
480  /*
481  * Return match information only if it results at least in one byte
482  * reduction.
483  */
484  if (len > 2)
485  {
486  *lenp = len;
487  *offp = off;
488  return 1;
489  }
490 
491  return 0;
492 }
493 
494 
495 /* ----------
496  * pglz_compress -
497  *
498  * Compresses source into dest using strategy. Returns the number of
499  * bytes written in buffer dest, or -1 if compression fails.
500  * ----------
501  */
502 int32
503 pglz_compress(const char *source, int32 slen, char *dest,
504  const PGLZ_Strategy *strategy)
505 {
506  unsigned char *bp = (unsigned char *) dest;
507  unsigned char *bstart = bp;
508  int hist_next = 1;
509  bool hist_recycle = false;
510  const char *dp = source;
511  const char *dend = source + slen;
512  unsigned char ctrl_dummy = 0;
513  unsigned char *ctrlp = &ctrl_dummy;
514  unsigned char ctrlb = 0;
515  unsigned char ctrl = 0;
516  bool found_match = false;
517  int32 match_len;
518  int32 match_off;
519  int32 good_match;
520  int32 good_drop;
521  int32 result_size;
522  int32 result_max;
523  int32 need_rate;
524  int hashsz;
525  int mask;
526 
527  /*
528  * Our fallback strategy is the default.
529  */
530  if (strategy == NULL)
531  strategy = PGLZ_strategy_default;
532 
533  /*
534  * If the strategy forbids compression (at all or if source chunk size out
535  * of range), fail.
536  */
537  if (strategy->match_size_good <= 0 ||
538  slen < strategy->min_input_size ||
539  slen > strategy->max_input_size)
540  return -1;
541 
542  /*
543  * Limit the match parameters to the supported range.
544  */
545  good_match = strategy->match_size_good;
546  if (good_match > PGLZ_MAX_MATCH)
547  good_match = PGLZ_MAX_MATCH;
548  else if (good_match < 17)
549  good_match = 17;
550 
551  good_drop = strategy->match_size_drop;
552  if (good_drop < 0)
553  good_drop = 0;
554  else if (good_drop > 100)
555  good_drop = 100;
556 
557  need_rate = strategy->min_comp_rate;
558  if (need_rate < 0)
559  need_rate = 0;
560  else if (need_rate > 99)
561  need_rate = 99;
562 
563  /*
564  * Compute the maximum result size allowed by the strategy, namely the
565  * input size minus the minimum wanted compression rate. This had better
566  * be <= slen, else we might overrun the provided output buffer.
567  */
568  if (slen > (INT_MAX / 100))
569  {
570  /* Approximate to avoid overflow */
571  result_max = (slen / 100) * (100 - need_rate);
572  }
573  else
574  result_max = (slen * (100 - need_rate)) / 100;
575 
576  /*
577  * Experiments suggest that these hash sizes work pretty well. A large
578  * hash table minimizes collision, but has a higher startup cost. For a
579  * small input, the startup cost dominates. The table size must be a power
580  * of two.
581  */
582  if (slen < 128)
583  hashsz = 512;
584  else if (slen < 256)
585  hashsz = 1024;
586  else if (slen < 512)
587  hashsz = 2048;
588  else if (slen < 1024)
589  hashsz = 4096;
590  else
591  hashsz = 8192;
592  mask = hashsz - 1;
593 
594  /*
595  * Initialize the history lists to empty. We do not need to zero the
596  * hist_entries[] array; its entries are initialized as they are used.
597  */
598  memset(hist_start, 0, hashsz * sizeof(int16));
599 
600  /*
601  * Compress the source directly into the output buffer.
602  */
603  while (dp < dend)
604  {
605  /*
606  * If we already exceeded the maximum result size, fail.
607  *
608  * We check once per loop; since the loop body could emit as many as 4
609  * bytes (a control byte and 3-byte tag), PGLZ_MAX_OUTPUT() had better
610  * allow 4 slop bytes.
611  */
612  if (bp - bstart >= result_max)
613  return -1;
614 
615  /*
616  * If we've emitted more than first_success_by bytes without finding
617  * anything compressible at all, fail. This lets us fall out
618  * reasonably quickly when looking at incompressible input (such as
619  * pre-compressed data).
620  */
621  if (!found_match && bp - bstart >= strategy->first_success_by)
622  return -1;
623 
624  /*
625  * Try to find a match in the history
626  */
627  if (pglz_find_match(hist_start, dp, dend, &match_len,
628  &match_off, good_match, good_drop, mask))
629  {
630  /*
631  * Create the tag and add history entries for all matched
632  * characters.
633  */
634  pglz_out_tag(ctrlp, ctrlb, ctrl, bp, match_len, match_off);
635  while (match_len--)
636  {
637  pglz_hist_add(hist_start, hist_entries,
638  hist_next, hist_recycle,
639  dp, dend, mask);
640  dp++; /* Do not do this ++ in the line above! */
641  /* The macro would do it four times - Jan. */
642  }
643  found_match = true;
644  }
645  else
646  {
647  /*
648  * No match found. Copy one literal byte.
649  */
650  pglz_out_literal(ctrlp, ctrlb, ctrl, bp, *dp);
651  pglz_hist_add(hist_start, hist_entries,
652  hist_next, hist_recycle,
653  dp, dend, mask);
654  dp++; /* Do not do this ++ in the line above! */
655  /* The macro would do it four times - Jan. */
656  }
657  }
658 
659  /*
660  * Write out the last control byte and check that we haven't overrun the
661  * output size allowed by the strategy.
662  */
663  *ctrlp = ctrlb;
664  result_size = bp - bstart;
665  if (result_size >= result_max)
666  return -1;
667 
668  /* success */
669  return result_size;
670 }
671 
672 
673 /* ----------
674  * pglz_decompress -
675  *
676  * Decompresses source into dest. Returns the number of bytes
677  * decompressed in the destination buffer, or -1 if decompression
678  * fails.
679  * ----------
680  */
681 int32
682 pglz_decompress(const char *source, int32 slen, char *dest,
683  int32 rawsize)
684 {
685  const unsigned char *sp;
686  const unsigned char *srcend;
687  unsigned char *dp;
688  unsigned char *destend;
689 
690  sp = (const unsigned char *) source;
691  srcend = ((const unsigned char *) source) + slen;
692  dp = (unsigned char *) dest;
693  destend = dp + rawsize;
694 
695  while (sp < srcend && dp < destend)
696  {
697  /*
698  * Read one control byte and process the next 8 items (or as many as
699  * remain in the compressed input).
700  */
701  unsigned char ctrl = *sp++;
702  int ctrlc;
703 
704  for (ctrlc = 0; ctrlc < 8 && sp < srcend; ctrlc++)
705  {
706  if (ctrl & 1)
707  {
708  /*
709  * Otherwise it contains the match length minus 3 and the
710  * upper 4 bits of the offset. The next following byte
711  * contains the lower 8 bits of the offset. If the length is
712  * coded as 18, another extension tag byte tells how much
713  * longer the match really was (0-255).
714  */
715  int32 len;
716  int32 off;
717 
718  len = (sp[0] & 0x0f) + 3;
719  off = ((sp[0] & 0xf0) << 4) | sp[1];
720  sp += 2;
721  if (len == 18)
722  len += *sp++;
723 
724  /*
725  * Check for output buffer overrun, to ensure we don't clobber
726  * memory in case of corrupt input. Note: we must advance dp
727  * here to ensure the error is detected below the loop. We
728  * don't simply put the elog inside the loop since that will
729  * probably interfere with optimization.
730  */
731  if (dp + len > destend)
732  {
733  dp += len;
734  break;
735  }
736 
737  /*
738  * Now we copy the bytes specified by the tag from OUTPUT to
739  * OUTPUT. It is dangerous and platform dependent to use
740  * memcpy() here, because the copied areas could overlap
741  * extremely!
742  */
743  while (len--)
744  {
745  *dp = dp[-off];
746  dp++;
747  }
748  }
749  else
750  {
751  /*
752  * An unset control bit means LITERAL BYTE. So we just copy
753  * one from INPUT to OUTPUT.
754  */
755  if (dp >= destend) /* check for buffer overrun */
756  break; /* do not clobber memory */
757 
758  *dp++ = *sp++;
759  }
760 
761  /*
762  * Advance the control bit
763  */
764  ctrl >>= 1;
765  }
766  }
767 
768  /*
769  * Check we decompressed the right amount.
770  */
771  if (dp != destend || sp != srcend)
772  return -1;
773 
774  /*
775  * That's it.
776  */
777  return rawsize;
778 }
signed short int16
Definition: c.h:293
struct PGLZ_HistEntry * next
#define pglz_hist_add(_hs, _he, _hn, _recycle, _s, _e, _mask)
const char * pos
struct PGLZ_HistEntry PGLZ_HistEntry
static PGLZ_HistEntry hist_entries[PGLZ_HISTORY_SIZE+1]
signed int int32
Definition: c.h:294
static const PGLZ_Strategy strategy_default_data
int32 first_success_by
Definition: pg_lzcompress.h:62
int32 match_size_good
Definition: pg_lzcompress.h:63
#define pglz_out_tag(_ctrlp, _ctrlb, _ctrl, _buf, _len, _off)
int32 match_size_drop
Definition: pg_lzcompress.h:64
static int16 hist_start[PGLZ_MAX_HISTORY_LISTS]
static int pglz_find_match(int16 *hstart, const char *input, const char *end, int *lenp, int *offp, int good_match, int good_drop, int mask)
int32 pglz_compress(const char *source, int32 slen, char *dest, const PGLZ_Strategy *strategy)
struct PGLZ_HistEntry * prev
int32 pglz_decompress(const char *source, int32 slen, char *dest, int32 rawsize)
#define pglz_out_literal(_ctrlp, _ctrlb, _ctrl, _buf, _byte)
#define PGLZ_MAX_HISTORY_LISTS
#define PGLZ_MAX_MATCH
#define INVALID_ENTRY_PTR
#define pglz_hist_idx(_s, _e, _mask)
#define PGLZ_HISTORY_SIZE
int32 max_input_size
Definition: pg_lzcompress.h:60
const PGLZ_Strategy *const PGLZ_strategy_always
static const PGLZ_Strategy strategy_always_data
const PGLZ_Strategy *const PGLZ_strategy_default
int32 min_comp_rate
Definition: pg_lzcompress.h:61