summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/harfbuzz-ng/src/hb-set-private.hh
blob: 705f554ce698c4ff70e01d892032ee65171dc6ec (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
/*
 * Copyright © 2012  Google, Inc.
 *
 *  This is part of HarfBuzz, a text shaping library.
 *
 * Permission is hereby granted, without written agreement and without
 * license or royalty fees, to use, copy, modify, and distribute this
 * software and its documentation for any purpose, provided that the
 * above copyright notice and the following two paragraphs appear in
 * all copies of this software.
 *
 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
 * DAMAGE.
 *
 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 * FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
 *
 * Google Author(s): Behdad Esfahbod
 */

#ifndef HB_SET_PRIVATE_HH
#define HB_SET_PRIVATE_HH

#include "hb-private.hh"
#include "hb-object-private.hh"


/*
 * The set digests here implement various "filters" that support
 * "approximate member query".  Conceptually these are like Bloom
 * Filter and Quotient Filter, however, much smaller, faster, and
 * designed to fit the requirements of our uses for glyph coverage
 * queries.  As a result, our filters have much higher.
 */

template <typename mask_t, unsigned int shift>
struct hb_set_digest_lowest_bits_t
{
  ASSERT_POD ();

  static const unsigned int mask_bytes = sizeof (mask_t);
  static const unsigned int mask_bits = sizeof (mask_t) * 8;
  static const unsigned int num_bits = 0
				     + (mask_bytes >= 1 ? 3 : 0)
				     + (mask_bytes >= 2 ? 1 : 0)
				     + (mask_bytes >= 4 ? 1 : 0)
				     + (mask_bytes >= 8 ? 1 : 0)
				     + (mask_bytes >= 16? 1 : 0)
				     + 0;

  ASSERT_STATIC (shift < sizeof (hb_codepoint_t) * 8);
  ASSERT_STATIC (shift + num_bits <= sizeof (hb_codepoint_t) * 8);

  inline void init (void) {
    mask = 0;
  }

  inline void add (hb_codepoint_t g) {
    mask |= mask_for (g);
  }

  inline void add_range (hb_codepoint_t a, hb_codepoint_t b) {
    if ((b >> shift) - (a >> shift) >= mask_bits - 1)
      mask = (mask_t) -1;
    else {
      mask_t ma = mask_for (a);
      mask_t mb = mask_for (b);
      mask |= mb + (mb - ma) - (mb < ma);
    }
  }

  inline bool may_have (hb_codepoint_t g) const {
    return !!(mask & mask_for (g));
  }

  private:

  static inline mask_t mask_for (hb_codepoint_t g) {
    return ((mask_t) 1) << ((g >> shift) & (mask_bits - 1));
  }
  mask_t mask;
};

template <typename head_t, typename tail_t>
struct hb_set_digest_combiner_t
{
  ASSERT_POD ();

  inline void init (void) {
    head.init ();
    tail.init ();
  }

  inline void add (hb_codepoint_t g) {
    head.add (g);
    tail.add (g);
  }

  inline void add_range (hb_codepoint_t a, hb_codepoint_t b) {
    head.add_range (a, b);
    tail.add_range (a, b);
  }

  inline bool may_have (hb_codepoint_t g) const {
    return head.may_have (g) && tail.may_have (g);
  }

  private:
  head_t head;
  tail_t tail;
};


/*
 * hb_set_digest_t
 *
 * This is a combination of digests that performs "best".
 * There is not much science to this: it's a result of intuition
 * and testing.
 */
typedef hb_set_digest_combiner_t
<
  hb_set_digest_lowest_bits_t<unsigned long, 4>,
  hb_set_digest_combiner_t
  <
    hb_set_digest_lowest_bits_t<unsigned long, 0>,
    hb_set_digest_lowest_bits_t<unsigned long, 9>
  >
> hb_set_digest_t;



/*
 * hb_set_t
 */


/* TODO Make this faster and memmory efficient. */

struct hb_set_t
{
  hb_object_header_t header;
  ASSERT_POD ();
  bool in_error;

  inline void init (void) {
    header.init ();
    clear ();
  }
  inline void fini (void) {
  }
  inline void clear (void) {
    if (unlikely (hb_object_is_inert (this)))
      return;
    in_error = false;
    memset (elts, 0, sizeof elts);
  }
  inline bool is_empty (void) const {
    for (unsigned int i = 0; i < ARRAY_LENGTH (elts); i++)
      if (elts[i])
        return false;
    return true;
  }
  inline void add (hb_codepoint_t g)
  {
    if (unlikely (in_error)) return;
    if (unlikely (g == INVALID)) return;
    if (unlikely (g > MAX_G)) return;
    elt (g) |= mask (g);
  }
  inline void add_range (hb_codepoint_t a, hb_codepoint_t b)
  {
    if (unlikely (in_error)) return;
    /* TODO Speedup */
    for (unsigned int i = a; i < b + 1; i++)
      add (i);
  }
  inline void del (hb_codepoint_t g)
  {
    if (unlikely (in_error)) return;
    if (unlikely (g > MAX_G)) return;
    elt (g) &= ~mask (g);
  }
  inline void del_range (hb_codepoint_t a, hb_codepoint_t b)
  {
    if (unlikely (in_error)) return;
    /* TODO Speedup */
    for (unsigned int i = a; i < b + 1; i++)
      del (i);
  }
  inline bool has (hb_codepoint_t g) const
  {
    if (unlikely (g > MAX_G)) return false;
    return !!(elt (g) & mask (g));
  }
  inline bool intersects (hb_codepoint_t first,
			  hb_codepoint_t last) const
  {
    if (unlikely (first > MAX_G)) return false;
    if (unlikely (last  > MAX_G)) last = MAX_G;
    unsigned int end = last + 1;
    for (hb_codepoint_t i = first; i < end; i++)
      if (has (i))
        return true;
    return false;
  }
  inline bool is_equal (const hb_set_t *other) const
  {
    for (unsigned int i = 0; i < ELTS; i++)
      if (elts[i] != other->elts[i])
        return false;
    return true;
  }
  inline void set (const hb_set_t *other)
  {
    if (unlikely (in_error)) return;
    for (unsigned int i = 0; i < ELTS; i++)
      elts[i] = other->elts[i];
  }
  inline void union_ (const hb_set_t *other)
  {
    if (unlikely (in_error)) return;
    for (unsigned int i = 0; i < ELTS; i++)
      elts[i] |= other->elts[i];
  }
  inline void intersect (const hb_set_t *other)
  {
    if (unlikely (in_error)) return;
    for (unsigned int i = 0; i < ELTS; i++)
      elts[i] &= other->elts[i];
  }
  inline void subtract (const hb_set_t *other)
  {
    if (unlikely (in_error)) return;
    for (unsigned int i = 0; i < ELTS; i++)
      elts[i] &= ~other->elts[i];
  }
  inline void symmetric_difference (const hb_set_t *other)
  {
    if (unlikely (in_error)) return;
    for (unsigned int i = 0; i < ELTS; i++)
      elts[i] ^= other->elts[i];
  }
  inline void invert (void)
  {
    if (unlikely (in_error)) return;
    for (unsigned int i = 0; i < ELTS; i++)
      elts[i] = ~elts[i];
  }
  inline bool next (hb_codepoint_t *codepoint) const
  {
    if (unlikely (*codepoint == INVALID)) {
      hb_codepoint_t i = get_min ();
      if (i != INVALID) {
        *codepoint = i;
	return true;
      } else {
	*codepoint = INVALID;
        return false;
      }
    }
    for (hb_codepoint_t i = *codepoint + 1; i < MAX_G + 1; i++)
      if (has (i)) {
        *codepoint = i;
	return true;
      }
    *codepoint = INVALID;
    return false;
  }
  inline bool next_range (hb_codepoint_t *first, hb_codepoint_t *last) const
  {
    hb_codepoint_t i;

    i = *last;
    if (!next (&i))
    {
      *last = *first = INVALID;
      return false;
    }

    *last = *first = i;
    while (next (&i) && i == *last + 1)
      (*last)++;

    return true;
  }

  inline unsigned int get_population (void) const
  {
    unsigned int count = 0;
    for (unsigned int i = 0; i < ELTS; i++)
      count += _hb_popcount32 (elts[i]);
    return count;
  }
  inline hb_codepoint_t get_min (void) const
  {
    for (unsigned int i = 0; i < ELTS; i++)
      if (elts[i])
	for (unsigned int j = 0; j < BITS; j++)
	  if (elts[i] & (1 << j))
	    return i * BITS + j;
    return INVALID;
  }
  inline hb_codepoint_t get_max (void) const
  {
    for (unsigned int i = ELTS; i; i--)
      if (elts[i - 1])
	for (unsigned int j = BITS; j; j--)
	  if (elts[i - 1] & (1 << (j - 1)))
	    return (i - 1) * BITS + (j - 1);
    return INVALID;
  }

  typedef uint32_t elt_t;
  static const unsigned int MAX_G = 65536 - 1; /* XXX Fix this... */
  static const unsigned int SHIFT = 5;
  static const unsigned int BITS = (1 << SHIFT);
  static const unsigned int MASK = BITS - 1;
  static const unsigned int ELTS = (MAX_G + 1 + (BITS - 1)) / BITS;
  static  const hb_codepoint_t INVALID = HB_SET_VALUE_INVALID;

  elt_t &elt (hb_codepoint_t g) { return elts[g >> SHIFT]; }
  elt_t elt (hb_codepoint_t g) const { return elts[g >> SHIFT]; }
  elt_t mask (hb_codepoint_t g) const { return elt_t (1) << (g & MASK); }

  elt_t elts[ELTS]; /* XXX 8kb */

  ASSERT_STATIC (sizeof (elt_t) * 8 == BITS);
  ASSERT_STATIC (sizeof (elt_t) * 8 * ELTS > MAX_G);
};



#endif /* HB_SET_PRIVATE_HH */