summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/harfbuzz-ng/src/hb-ot-var-gvar-table.hh
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/harfbuzz-ng/src/hb-ot-var-gvar-table.hh')
-rw-r--r--src/3rdparty/harfbuzz-ng/src/hb-ot-var-gvar-table.hh1025
1 files changed, 599 insertions, 426 deletions
diff --git a/src/3rdparty/harfbuzz-ng/src/hb-ot-var-gvar-table.hh b/src/3rdparty/harfbuzz-ng/src/hb-ot-var-gvar-table.hh
index 49b5532d40..1c7a1f6c1e 100644
--- a/src/3rdparty/harfbuzz-ng/src/hb-ot-var-gvar-table.hh
+++ b/src/3rdparty/harfbuzz-ng/src/hb-ot-var-gvar-table.hh
@@ -29,6 +29,7 @@
#define HB_OT_VAR_GVAR_TABLE_HH
#include "hb-open-type.hh"
+#include "hb-ot-var-common.hh"
/*
* gvar -- Glyph Variation Table
@@ -38,348 +39,254 @@
namespace OT {
-struct contour_point_t
+struct GlyphVariationData : TupleVariationData
+{};
+
+struct glyph_variations_t
{
- void init (float x_ = 0.f, float y_ = 0.f, bool is_end_point_ = false)
- { flag = 0; x = x_; y = y_; is_end_point = is_end_point_; }
+ using tuple_variations_t = TupleVariationData::tuple_variations_t;
+ hb_vector_t<tuple_variations_t> glyph_variations;
- void translate (const contour_point_t &p) { x += p.x; y += p.y; }
+ hb_vector_t<char> compiled_shared_tuples;
+ private:
+ unsigned shared_tuples_count = 0;
- uint8_t flag;
- float x, y;
- bool is_end_point;
-};
+ /* shared coords-> index map after instantiation */
+ hb_hashmap_t<const hb_vector_t<char>*, unsigned> shared_tuples_idx_map;
-struct contour_point_vector_t : hb_vector_t<contour_point_t>
-{
- void extend (const hb_array_t<contour_point_t> &a)
- {
- unsigned int old_len = length;
- resize (old_len + a.length);
- for (unsigned int i = 0; i < a.length; i++)
- (*this)[old_len + i] = a[i];
- }
+ public:
+ unsigned compiled_shared_tuples_count () const
+ { return shared_tuples_count; }
- void transform (const float (&matrix)[4])
+ unsigned compiled_byte_size () const
{
- for (unsigned int i = 0; i < length; i++)
- {
- contour_point_t &p = (*this)[i];
- float x_ = p.x * matrix[0] + p.y * matrix[2];
- p.y = p.x * matrix[1] + p.y * matrix[3];
- p.x = x_;
- }
- }
+ unsigned byte_size = 0;
+ for (const auto& _ : glyph_variations)
+ byte_size += _.get_compiled_byte_size ();
- void translate (const contour_point_t& delta)
- {
- for (unsigned int i = 0; i < length; i++)
- (*this)[i].translate (delta);
+ return byte_size;
}
-};
-
-/* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */
-struct TupleVariationHeader
-{
- unsigned get_size (unsigned axis_count) const
- { return min_size + get_all_tuples (axis_count).get_size (); }
-
- unsigned get_data_size () const { return varDataSize; }
- const TupleVariationHeader &get_next (unsigned axis_count) const
- { return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); }
-
- float calculate_scalar (const int *coords, unsigned int coord_count,
- const hb_array_t<const F2DOT14> shared_tuples) const
+ bool create_from_glyphs_var_data (unsigned axis_count,
+ const hb_array_t<const F2DOT14> shared_tuples,
+ const hb_subset_plan_t *plan,
+ const hb_hashmap_t<hb_codepoint_t, hb_bytes_t>& new_gid_var_data_map)
{
- hb_array_t<const F2DOT14> peak_tuple;
+ if (unlikely (!glyph_variations.alloc (plan->new_to_old_gid_list.length, true)))
+ return false;
- if (has_peak ())
- peak_tuple = get_peak_tuple (coord_count);
- else
+ auto it = hb_iter (plan->new_to_old_gid_list);
+ for (auto &_ : it)
{
- unsigned int index = get_index ();
- if (unlikely (index * coord_count >= shared_tuples.length))
- return 0.f;
- peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count);
- }
+ hb_codepoint_t new_gid = _.first;
+ contour_point_vector_t *all_contour_points;
+ if (!new_gid_var_data_map.has (new_gid) ||
+ !plan->new_gid_contour_points_map.has (new_gid, &all_contour_points))
+ return false;
+ hb_bytes_t var_data = new_gid_var_data_map.get (new_gid);
+
+ const GlyphVariationData* p = reinterpret_cast<const GlyphVariationData*> (var_data.arrayZ);
+ hb_vector_t<unsigned> shared_indices;
+ GlyphVariationData::tuple_iterator_t iterator;
+ tuple_variations_t tuple_vars;
+
+ /* in case variation data is empty, push an empty struct into the vector,
+ * keep the vector in sync with the new_to_old_gid_list */
+ if (!var_data || ! p->has_data () || !all_contour_points->length ||
+ !GlyphVariationData::get_tuple_iterator (var_data, axis_count,
+ var_data.arrayZ,
+ shared_indices, &iterator))
+ {
+ glyph_variations.push (std::move (tuple_vars));
+ continue;
+ }
- hb_array_t<const F2DOT14> start_tuple;
- hb_array_t<const F2DOT14> end_tuple;
- if (has_intermediate ())
- {
- start_tuple = get_start_tuple (coord_count);
- end_tuple = get_end_tuple (coord_count);
+ if (!p->decompile_tuple_variations (all_contour_points->length, true /* is_gvar */,
+ iterator, &(plan->axes_old_index_tag_map),
+ shared_indices, shared_tuples,
+ tuple_vars /* OUT */))
+ return false;
+ glyph_variations.push (std::move (tuple_vars));
}
+ return !glyph_variations.in_error () && glyph_variations.length == plan->new_to_old_gid_list.length;
+ }
- float scalar = 1.f;
- for (unsigned int i = 0; i < coord_count; i++)
+ bool instantiate (const hb_subset_plan_t *plan)
+ {
+ unsigned count = plan->new_to_old_gid_list.length;
+ for (unsigned i = 0; i < count; i++)
{
- int v = coords[i];
- int peak = peak_tuple[i];
- if (!peak || v == peak) continue;
-
- if (has_intermediate ())
- {
- int start = start_tuple[i];
- int end = end_tuple[i];
- if (unlikely (start > peak || peak > end ||
- (start < 0 && end > 0 && peak))) continue;
- if (v < start || v > end) return 0.f;
- if (v < peak)
- { if (peak != start) scalar *= (float) (v - start) / (peak - start); }
- else
- { if (peak != end) scalar *= (float) (end - v) / (end - peak); }
- }
- else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.f;
- else
- scalar *= (float) v / peak;
+ hb_codepoint_t new_gid = plan->new_to_old_gid_list[i].first;
+ contour_point_vector_t *all_points;
+ if (!plan->new_gid_contour_points_map.has (new_gid, &all_points))
+ return false;
+ if (!glyph_variations[i].instantiate (plan->axes_location, plan->axes_triple_distances, all_points))
+ return false;
}
- return scalar;
+ return true;
}
- bool has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; }
- bool has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; }
- bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; }
- unsigned get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; }
-
- protected:
- struct TuppleIndex : HBUINT16
+ bool compile_bytes (const hb_map_t& axes_index_map,
+ const hb_map_t& axes_old_index_tag_map)
{
- enum Flags {
- EmbeddedPeakTuple = 0x8000u,
- IntermediateRegion = 0x4000u,
- PrivatePointNumbers = 0x2000u,
- TupleIndexMask = 0x0FFFu
- };
-
- DEFINE_SIZE_STATIC (2);
- };
-
- hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const
- { return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); }
- hb_array_t<const F2DOT14> get_peak_tuple (unsigned axis_count) const
- { return get_all_tuples (axis_count).sub_array (0, axis_count); }
- hb_array_t<const F2DOT14> get_start_tuple (unsigned axis_count) const
- { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); }
- hb_array_t<const F2DOT14> get_end_tuple (unsigned axis_count) const
- { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); }
-
- HBUINT16 varDataSize; /* The size in bytes of the serialized
- * data for this tuple variation table. */
- TuppleIndex tupleIndex; /* A packed field. The high 4 bits are flags (see below).
- The low 12 bits are an index into a shared tuple
- records array. */
- /* UnsizedArrayOf<F2DOT14> peakTuple - optional */
- /* Peak tuple record for this tuple variation table — optional,
- * determined by flags in the tupleIndex value.
- *
- * Note that this must always be included in the 'cvar' table. */
- /* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */
- /* Intermediate start tuple record for this tuple variation table — optional,
- determined by flags in the tupleIndex value. */
- /* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */
- /* Intermediate end tuple record for this tuple variation table — optional,
- * determined by flags in the tupleIndex value. */
- public:
- DEFINE_SIZE_MIN (4);
-};
+ if (!compile_shared_tuples (axes_index_map, axes_old_index_tag_map))
+ return false;
+ for (tuple_variations_t& vars: glyph_variations)
+ if (!vars.compile_bytes (axes_index_map, axes_old_index_tag_map,
+ true, /* use shared points*/
+ &shared_tuples_idx_map))
+ return false;
-struct GlyphVariationData
-{
- const TupleVariationHeader &get_tuple_var_header (void) const
- { return StructAfter<TupleVariationHeader> (data); }
+ return true;
+ }
- struct tuple_iterator_t
+ bool compile_shared_tuples (const hb_map_t& axes_index_map,
+ const hb_map_t& axes_old_index_tag_map)
{
- void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_)
- {
- var_data_bytes = var_data_bytes_;
- var_data = var_data_bytes_.as<GlyphVariationData> ();
- index = 0;
- axis_count = axis_count_;
- current_tuple = &var_data->get_tuple_var_header ();
- data_offset = 0;
- }
+ /* key is pointer to compiled_peak_coords inside each tuple, hashing
+ * function will always deref pointers first */
+ hb_hashmap_t<const hb_vector_t<char>*, unsigned> coords_count_map;
- bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */)
+ /* count the num of shared coords */
+ for (tuple_variations_t& vars: glyph_variations)
{
- if (var_data->has_shared_point_numbers ())
+ for (tuple_delta_t& var : vars.tuple_vars)
{
- const HBUINT8 *base = &(var_data+var_data->data);
- const HBUINT8 *p = base;
- if (!unpack_points (p, shared_indices, var_data_bytes)) return false;
- data_offset = p - base;
+ if (!var.compile_peak_coords (axes_index_map, axes_old_index_tag_map))
+ return false;
+ unsigned* count;
+ if (coords_count_map.has (&(var.compiled_peak_coords), &count))
+ coords_count_map.set (&(var.compiled_peak_coords), *count + 1);
+ else
+ coords_count_map.set (&(var.compiled_peak_coords), 1);
}
- return true;
}
- bool is_valid () const
- {
- return (index < var_data->tupleVarCount.get_count ()) &&
- var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) &&
- var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (), current_tuple->get_size (axis_count))) &&
- current_tuple->get_size (axis_count);
- }
+ if (!coords_count_map || coords_count_map.in_error ())
+ return false;
+
+ /* add only those coords that are used more than once into the vector and sort */
+ hb_vector_t<const hb_vector_t<char>*> shared_coords;
+ if (unlikely (!shared_coords.alloc (coords_count_map.get_population ())))
+ return false;
- bool move_to_next ()
+ for (const auto _ : coords_count_map.iter ())
{
- data_offset += current_tuple->get_data_size ();
- current_tuple = &current_tuple->get_next (axis_count);
- index++;
- return is_valid ();
+ if (_.second == 1) continue;
+ shared_coords.push (_.first);
}
- const HBUINT8 *get_serialized_data () const
- { return &(var_data+var_data->data) + data_offset; }
+ /* no shared tuples: no coords are used more than once */
+ if (!shared_coords) return true;
+ /* sorting based on the coords frequency first (high to low), then compare
+ * the coords bytes */
+ hb_qsort (shared_coords.arrayZ, shared_coords.length, sizeof (hb_vector_t<char>*), _cmp_coords, (void *) (&coords_count_map));
- private:
- const GlyphVariationData *var_data;
- unsigned int index;
- unsigned int axis_count;
- unsigned int data_offset;
-
- public:
- hb_bytes_t var_data_bytes;
- const TupleVariationHeader *current_tuple;
- };
+ /* build shared_coords->idx map and shared tuples byte array */
- static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count,
- hb_vector_t<unsigned int> &shared_indices /* OUT */,
- tuple_iterator_t *iterator /* OUT */)
- {
- iterator->init (var_data_bytes, axis_count);
- if (!iterator->get_shared_indices (shared_indices))
+ shared_tuples_count = hb_min (0xFFFu + 1, shared_coords.length);
+ unsigned len = shared_tuples_count * (shared_coords[0]->length);
+ if (unlikely (!compiled_shared_tuples.alloc (len)))
return false;
- return iterator->is_valid ();
- }
- bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); }
+ for (unsigned i = 0; i < shared_tuples_count; i++)
+ {
+ shared_tuples_idx_map.set (shared_coords[i], i);
+ /* add a concat() in hb_vector_t? */
+ for (char c : shared_coords[i]->iter ())
+ compiled_shared_tuples.push (c);
+ }
- static bool unpack_points (const HBUINT8 *&p /* IN/OUT */,
- hb_vector_t<unsigned int> &points /* OUT */,
- const hb_bytes_t &bytes)
+ return true;
+ }
+
+ static int _cmp_coords (const void *pa, const void *pb, void *arg)
{
- enum packed_point_flag_t
- {
- POINTS_ARE_WORDS = 0x80,
- POINT_RUN_COUNT_MASK = 0x7F
- };
+ const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* coords_count_map =
+ reinterpret_cast<const hb_hashmap_t<const hb_vector_t<char>*, unsigned>*> (arg);
- if (unlikely (!bytes.check_range (p))) return false;
+ /* shared_coords is hb_vector_t<const hb_vector_t<char>*> so casting pa/pb
+ * to be a pointer to a pointer */
+ const hb_vector_t<char>** a = reinterpret_cast<const hb_vector_t<char>**> (const_cast<void*>(pa));
+ const hb_vector_t<char>** b = reinterpret_cast<const hb_vector_t<char>**> (const_cast<void*>(pb));
- uint16_t count = *p++;
- if (count & POINTS_ARE_WORDS)
- {
- if (unlikely (!bytes.check_range (p))) return false;
- count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++;
- }
- points.resize (count);
+ bool has_a = coords_count_map->has (*a);
+ bool has_b = coords_count_map->has (*b);
- unsigned int n = 0;
- uint16_t i = 0;
- while (i < count)
+ if (has_a && has_b)
{
- if (unlikely (!bytes.check_range (p))) return false;
- uint16_t j;
- uint8_t control = *p++;
- uint16_t run_count = (control & POINT_RUN_COUNT_MASK) + 1;
- if (control & POINTS_ARE_WORDS)
- {
- for (j = 0; j < run_count && i < count; j++, i++)
- {
- if (unlikely (!bytes.check_range ((const HBUINT16 *) p)))
- return false;
- n += *(const HBUINT16 *)p;
- points[i] = n;
- p += HBUINT16::static_size;
- }
- }
- else
- {
- for (j = 0; j < run_count && i < count; j++, i++)
- {
- if (unlikely (!bytes.check_range (p))) return false;
- n += *p++;
- points[i] = n;
- }
- }
- if (j < run_count) return false;
+ unsigned a_num = coords_count_map->get (*a);
+ unsigned b_num = coords_count_map->get (*b);
+
+ if (a_num != b_num)
+ return b_num - a_num;
+
+ return (*b)->as_array().cmp ((*a)->as_array ());
}
- return true;
+ else if (has_a) return -1;
+ else if (has_b) return 1;
+ else return 0;
}
- static bool unpack_deltas (const HBUINT8 *&p /* IN/OUT */,
- hb_vector_t<int> &deltas /* IN/OUT */,
- const hb_bytes_t &bytes)
+ template<typename Iterator,
+ hb_requires (hb_is_iterator (Iterator))>
+ bool serialize_glyph_var_data (hb_serialize_context_t *c,
+ Iterator it,
+ bool long_offset,
+ unsigned num_glyphs,
+ char* glyph_var_data_offsets /* OUT: glyph var data offsets array */) const
{
- enum packed_delta_flag_t
+ TRACE_SERIALIZE (this);
+
+ if (long_offset)
{
- DELTAS_ARE_ZERO = 0x80,
- DELTAS_ARE_WORDS = 0x40,
- DELTA_RUN_COUNT_MASK = 0x3F
- };
-
- unsigned int i = 0;
- unsigned int count = deltas.length;
- while (i < count)
+ ((HBUINT32 *) glyph_var_data_offsets)[0] = 0;
+ glyph_var_data_offsets += 4;
+ }
+ else
{
- if (unlikely (!bytes.check_range (p))) return false;
- uint8_t control = *p++;
- unsigned int run_count = (control & DELTA_RUN_COUNT_MASK) + 1;
- unsigned int j;
- if (control & DELTAS_ARE_ZERO)
- for (j = 0; j < run_count && i < count; j++, i++)
- deltas[i] = 0;
- else if (control & DELTAS_ARE_WORDS)
- for (j = 0; j < run_count && i < count; j++, i++)
- {
- if (unlikely (!bytes.check_range ((const HBUINT16 *) p)))
- return false;
- deltas[i] = *(const HBINT16 *) p;
- p += HBUINT16::static_size;
- }
- else
- for (j = 0; j < run_count && i < count; j++, i++)
- {
- if (unlikely (!bytes.check_range (p)))
- return false;
- deltas[i] = *(const HBINT8 *) p++;
- }
- if (j < run_count)
- return false;
+ ((HBUINT16 *) glyph_var_data_offsets)[0] = 0;
+ glyph_var_data_offsets += 2;
}
- return true;
- }
+ unsigned glyph_offset = 0;
+ hb_codepoint_t last_gid = 0;
+ unsigned idx = 0;
- bool has_data () const { return tupleVarCount; }
+ TupleVariationData* cur_glyph = c->start_embed<TupleVariationData> ();
+ if (!cur_glyph) return_trace (false);
+ for (auto &_ : it)
+ {
+ hb_codepoint_t gid = _.first;
+ if (long_offset)
+ for (; last_gid < gid; last_gid++)
+ ((HBUINT32 *) glyph_var_data_offsets)[last_gid] = glyph_offset;
+ else
+ for (; last_gid < gid; last_gid++)
+ ((HBUINT16 *) glyph_var_data_offsets)[last_gid] = glyph_offset / 2;
- protected:
- struct TupleVarCount : HBUINT16
- {
- bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); }
- unsigned int get_count () const { return (*this) & CountMask; }
+ if (idx >= glyph_variations.length) return_trace (false);
+ if (!cur_glyph->serialize (c, true, glyph_variations[idx])) return_trace (false);
+ TupleVariationData* next_glyph = c->start_embed<TupleVariationData> ();
+ glyph_offset += (char *) next_glyph - (char *) cur_glyph;
- protected:
- enum Flags
- {
- SharedPointNumbers= 0x8000u,
- CountMask = 0x0FFFu
- };
- public:
- DEFINE_SIZE_STATIC (2);
- };
+ if (long_offset)
+ ((HBUINT32 *) glyph_var_data_offsets)[gid] = glyph_offset;
+ else
+ ((HBUINT16 *) glyph_var_data_offsets)[gid] = glyph_offset / 2;
- TupleVarCount tupleVarCount; /* A packed field. The high 4 bits are flags, and the
- * low 12 bits are the number of tuple variation tables
- * for this glyph. The number of tuple variation tables
- * can be any number between 1 and 4095. */
- Offset16To<HBUINT8>
- data; /* Offset from the start of the GlyphVariationData table
- * to the serialized data. */
- /* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
- public:
- DEFINE_SIZE_MIN (4);
+ last_gid++;
+ idx++;
+ cur_glyph = next_glyph;
+ }
+
+ if (long_offset)
+ for (; last_gid < num_glyphs; last_gid++)
+ ((HBUINT32 *) glyph_var_data_offsets)[last_gid] = glyph_offset;
+ else
+ for (; last_gid < num_glyphs; last_gid++)
+ ((HBUINT16 *) glyph_var_data_offsets)[last_gid] = glyph_offset / 2;
+ return_trace (true);
+ }
};
struct gvar
@@ -389,23 +296,116 @@ struct gvar
bool sanitize_shallow (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
- return_trace (c->check_struct (this) && (version.major == 1) &&
- (glyphCount == c->get_num_glyphs ()) &&
+ return_trace (c->check_struct (this) &&
+ hb_barrier () &&
+ (version.major == 1) &&
sharedTuples.sanitize (c, this, axisCount * sharedTupleCount) &&
(is_long_offset () ?
- c->check_array (get_long_offset_array (), glyphCount+1) :
- c->check_array (get_short_offset_array (), glyphCount+1)) &&
- c->check_array (((const HBUINT8*)&(this+dataZ)) + get_offset (0),
- get_offset (glyphCount) - get_offset (0)));
+ c->check_array (get_long_offset_array (), c->get_num_glyphs () + 1) :
+ c->check_array (get_short_offset_array (), c->get_num_glyphs () + 1)));
}
- /* GlyphVariationData not sanitized here; must be checked while accessing each glyph varation data */
+ /* GlyphVariationData not sanitized here; must be checked while accessing each glyph variation data */
bool sanitize (hb_sanitize_context_t *c) const
{ return sanitize_shallow (c); }
+ bool decompile_glyph_variations (hb_subset_context_t *c,
+ glyph_variations_t& glyph_vars /* OUT */) const
+ {
+ hb_hashmap_t<hb_codepoint_t, hb_bytes_t> new_gid_var_data_map;
+ auto it = hb_iter (c->plan->new_to_old_gid_list);
+ if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE))
+ {
+ new_gid_var_data_map.set (0, hb_bytes_t ());
+ it++;
+ }
+
+ for (auto &_ : it)
+ {
+ hb_codepoint_t new_gid = _.first;
+ hb_codepoint_t old_gid = _.second;
+ hb_bytes_t var_data_bytes = get_glyph_var_data_bytes (c->source_blob, glyphCountX, old_gid);
+ new_gid_var_data_map.set (new_gid, var_data_bytes);
+ }
+
+ if (new_gid_var_data_map.in_error ()) return false;
+
+ hb_array_t<const F2DOT14> shared_tuples = (this+sharedTuples).as_array ((unsigned) sharedTupleCount * (unsigned) axisCount);
+ return glyph_vars.create_from_glyphs_var_data (axisCount, shared_tuples, c->plan, new_gid_var_data_map);
+ }
+
+ template<typename Iterator,
+ hb_requires (hb_is_iterator (Iterator))>
+ bool serialize (hb_serialize_context_t *c,
+ const glyph_variations_t& glyph_vars,
+ Iterator it,
+ unsigned axis_count,
+ unsigned num_glyphs) const
+ {
+ TRACE_SERIALIZE (this);
+ gvar *out = c->allocate_min<gvar> ();
+ if (unlikely (!out)) return_trace (false);
+
+ out->version.major = 1;
+ out->version.minor = 0;
+ out->axisCount = axis_count;
+ out->glyphCountX = hb_min (0xFFFFu, num_glyphs);
+
+ unsigned glyph_var_data_size = glyph_vars.compiled_byte_size ();
+ bool long_offset = glyph_var_data_size & ~0xFFFFu;
+ out->flags = long_offset ? 1 : 0;
+
+ HBUINT8 *glyph_var_data_offsets = c->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1), false);
+ if (!glyph_var_data_offsets) return_trace (false);
+
+ /* shared tuples */
+ unsigned shared_tuple_count = glyph_vars.compiled_shared_tuples_count ();
+ out->sharedTupleCount = shared_tuple_count;
+
+ if (!shared_tuple_count)
+ out->sharedTuples = 0;
+ else
+ {
+ hb_array_t<const char> shared_tuples = glyph_vars.compiled_shared_tuples.as_array ().copy (c);
+ if (!shared_tuples.arrayZ) return_trace (false);
+ out->sharedTuples = shared_tuples.arrayZ - (char *) out;
+ }
+
+ char *glyph_var_data = c->start_embed<char> ();
+ if (!glyph_var_data) return_trace (false);
+ out->dataZ = glyph_var_data - (char *) out;
+
+ return_trace (glyph_vars.serialize_glyph_var_data (c, it, long_offset, num_glyphs,
+ (char *) glyph_var_data_offsets));
+ }
+
+ bool instantiate (hb_subset_context_t *c) const
+ {
+ TRACE_SUBSET (this);
+ glyph_variations_t glyph_vars;
+ if (!decompile_glyph_variations (c, glyph_vars))
+ return_trace (false);
+
+ if (!glyph_vars.instantiate (c->plan)) return_trace (false);
+ if (!glyph_vars.compile_bytes (c->plan->axes_index_map, c->plan->axes_old_index_tag_map))
+ return_trace (false);
+
+ unsigned axis_count = c->plan->axes_index_map.get_population ();
+ unsigned num_glyphs = c->plan->num_output_glyphs ();
+ auto it = hb_iter (c->plan->new_to_old_gid_list);
+ return_trace (serialize (c->serializer, glyph_vars, it, axis_count, num_glyphs));
+ }
+
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
+ if (c->plan->all_axes_pinned)
+ return_trace (false);
+
+ if (c->plan->normalized_coords)
+ return_trace (instantiate (c));
+
+ unsigned glyph_count = version.to_int () ? c->plan->source->get_num_glyphs () : 0;
gvar *out = c->serializer->allocate_min<gvar> ();
if (unlikely (!out)) return_trace (false);
@@ -416,22 +416,25 @@ struct gvar
out->sharedTupleCount = sharedTupleCount;
unsigned int num_glyphs = c->plan->num_output_glyphs ();
- out->glyphCount = num_glyphs;
+ out->glyphCountX = hb_min (0xFFFFu, num_glyphs);
+ auto it = hb_iter (c->plan->new_to_old_gid_list);
+ if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE))
+ it++;
unsigned int subset_data_size = 0;
- for (hb_codepoint_t gid = (c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE) ? 0 : 1;
- gid < num_glyphs;
- gid++)
+ for (auto &_ : it)
{
- hb_codepoint_t old_gid;
- if (!c->plan->old_gid_for_new_gid (gid, &old_gid)) continue;
- subset_data_size += get_glyph_var_data_bytes (c->source_blob, old_gid).length;
+ hb_codepoint_t old_gid = _.second;
+ subset_data_size += get_glyph_var_data_bytes (c->source_blob, glyph_count, old_gid).length;
}
- bool long_offset = subset_data_size & ~0xFFFFu;
+ bool long_offset = (subset_data_size & ~0xFFFFu);
+ #ifdef HB_EXPERIMENTAL_API
+ long_offset = long_offset || (c->plan->flags & HB_SUBSET_FLAGS_IFTB_REQUIREMENTS);
+#endif
out->flags = long_offset ? 1 : 0;
- HBUINT8 *subset_offsets = c->serializer->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1));
+ HBUINT8 *subset_offsets = c->serializer->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1), false);
if (!subset_offsets) return_trace (false);
/* shared tuples */
@@ -443,54 +446,91 @@ struct gvar
F2DOT14 *tuples = c->serializer->allocate_size<F2DOT14> (shared_tuple_size);
if (!tuples) return_trace (false);
out->sharedTuples = (char *) tuples - (char *) out;
- memcpy (tuples, this+sharedTuples, shared_tuple_size);
+ hb_memcpy (tuples, this+sharedTuples, shared_tuple_size);
}
- char *subset_data = c->serializer->allocate_size<char> (subset_data_size);
+ /* This ordering relative to the shared tuples array, which puts the glyphVariationData
+ last in the table, is required when HB_SUBSET_FLAGS_IFTB_REQUIREMENTS is set */
+ char *subset_data = c->serializer->allocate_size<char> (subset_data_size, false);
if (!subset_data) return_trace (false);
out->dataZ = subset_data - (char *) out;
+
+ if (long_offset)
+ {
+ ((HBUINT32 *) subset_offsets)[0] = 0;
+ subset_offsets += 4;
+ }
+ else
+ {
+ ((HBUINT16 *) subset_offsets)[0] = 0;
+ subset_offsets += 2;
+ }
unsigned int glyph_offset = 0;
- for (hb_codepoint_t gid = (c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE) ? 0 : 1;
- gid < num_glyphs;
- gid++)
+
+ hb_codepoint_t last = 0;
+ it = hb_iter (c->plan->new_to_old_gid_list);
+ if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE))
+ it++;
+ for (auto &_ : it)
{
- hb_codepoint_t old_gid;
- hb_bytes_t var_data_bytes = c->plan->old_gid_for_new_gid (gid, &old_gid)
- ? get_glyph_var_data_bytes (c->source_blob, old_gid)
- : hb_bytes_t ();
+ hb_codepoint_t gid = _.first;
+ hb_codepoint_t old_gid = _.second;
if (long_offset)
- ((HBUINT32 *) subset_offsets)[gid] = glyph_offset;
+ for (; last < gid; last++)
+ ((HBUINT32 *) subset_offsets)[last] = glyph_offset;
else
- ((HBUINT16 *) subset_offsets)[gid] = glyph_offset / 2;
+ for (; last < gid; last++)
+ ((HBUINT16 *) subset_offsets)[last] = glyph_offset / 2;
+
+ hb_bytes_t var_data_bytes = get_glyph_var_data_bytes (c->source_blob,
+ glyph_count,
+ old_gid);
- if (var_data_bytes.length > 0)
- memcpy (subset_data, var_data_bytes.arrayZ, var_data_bytes.length);
+ hb_memcpy (subset_data, var_data_bytes.arrayZ, var_data_bytes.length);
subset_data += var_data_bytes.length;
glyph_offset += var_data_bytes.length;
+
+ if (long_offset)
+ ((HBUINT32 *) subset_offsets)[gid] = glyph_offset;
+ else
+ ((HBUINT16 *) subset_offsets)[gid] = glyph_offset / 2;
+
+ last++; // Skip over gid
}
+
if (long_offset)
- ((HBUINT32 *) subset_offsets)[num_glyphs] = glyph_offset;
+ for (; last < num_glyphs; last++)
+ ((HBUINT32 *) subset_offsets)[last] = glyph_offset;
else
- ((HBUINT16 *) subset_offsets)[num_glyphs] = glyph_offset / 2;
+ for (; last < num_glyphs; last++)
+ ((HBUINT16 *) subset_offsets)[last] = glyph_offset / 2;
return_trace (true);
}
protected:
- const hb_bytes_t get_glyph_var_data_bytes (hb_blob_t *blob, hb_codepoint_t glyph) const
+ const hb_bytes_t get_glyph_var_data_bytes (hb_blob_t *blob,
+ unsigned glyph_count,
+ hb_codepoint_t glyph) const
{
- unsigned start_offset = get_offset (glyph);
- unsigned length = get_offset (glyph+1) - start_offset;
+ unsigned start_offset = get_offset (glyph_count, glyph);
+ unsigned end_offset = get_offset (glyph_count, glyph+1);
+ if (unlikely (end_offset < start_offset)) return hb_bytes_t ();
+ unsigned length = end_offset - start_offset;
hb_bytes_t var_data = blob->as_bytes ().sub_array (((unsigned) dataZ) + start_offset, length);
return likely (var_data.length >= GlyphVariationData::min_size) ? var_data : hb_bytes_t ();
}
bool is_long_offset () const { return flags & 1; }
- unsigned get_offset (unsigned i) const
- { return is_long_offset () ? get_long_offset_array ()[i] : get_short_offset_array ()[i] * 2; }
+ unsigned get_offset (unsigned glyph_count, unsigned i) const
+ {
+ if (unlikely (i > glyph_count)) return 0;
+ _hb_compiler_memory_r_barrier ();
+ return is_long_offset () ? get_long_offset_array ()[i] : get_short_offset_array ()[i] * 2;
+ }
const HBUINT32 * get_long_offset_array () const { return (const HBUINT32 *) &offsetZ; }
const HBUINT16 *get_short_offset_array () const { return (const HBUINT16 *) &offsetZ; }
@@ -498,24 +538,56 @@ struct gvar
public:
struct accelerator_t
{
- void init (hb_face_t *face)
- { table = hb_sanitize_context_t ().reference_table<gvar> (face); }
- void fini () { table.destroy (); }
+ accelerator_t (hb_face_t *face)
+ {
+ table = hb_sanitize_context_t ().reference_table<gvar> (face);
+ /* If sanitize failed, set glyphCount to 0. */
+ glyphCount = table->version.to_int () ? face->get_num_glyphs () : 0;
+
+ /* For shared tuples that only have one axis active, shared the index of
+ * that axis as a cache. This will speed up caclulate_scalar() a lot
+ * for fonts with lots of axes and many "monovar" tuples. */
+ hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * table->axisCount);
+ unsigned count = table->sharedTupleCount;
+ if (unlikely (!shared_tuple_active_idx.resize (count, false))) return;
+ unsigned axis_count = table->axisCount;
+ for (unsigned i = 0; i < count; i++)
+ {
+ hb_array_t<const F2DOT14> tuple = shared_tuples.sub_array (axis_count * i, axis_count);
+ int idx1 = -1, idx2 = -1;
+ for (unsigned j = 0; j < axis_count; j++)
+ {
+ const F2DOT14 &peak = tuple.arrayZ[j];
+ if (peak.to_int () != 0)
+ {
+ if (idx1 == -1)
+ idx1 = j;
+ else if (idx2 == -1)
+ idx2 = j;
+ else
+ {
+ idx1 = idx2 = -1;
+ break;
+ }
+ }
+ }
+ shared_tuple_active_idx.arrayZ[i] = {idx1, idx2};
+ }
+ }
+ ~accelerator_t () { table.destroy (); }
private:
- struct x_getter { static float get (const contour_point_t &p) { return p.x; } };
- struct y_getter { static float get (const contour_point_t &p) { return p.y; } };
- template <typename T>
static float infer_delta (const hb_array_t<contour_point_t> points,
const hb_array_t<contour_point_t> deltas,
- unsigned int target, unsigned int prev, unsigned int next)
+ unsigned int target, unsigned int prev, unsigned int next,
+ float contour_point_t::*m)
{
- float target_val = T::get (points[target]);
- float prev_val = T::get (points[prev]);
- float next_val = T::get (points[next]);
- float prev_delta = T::get (deltas[prev]);
- float next_delta = T::get (deltas[next]);
+ float target_val = points.arrayZ[target].*m;
+ float prev_val = points.arrayZ[prev].*m;
+ float next_val = points.arrayZ[next].*m;
+ float prev_delta = deltas.arrayZ[prev].*m;
+ float next_delta = deltas.arrayZ[next].*m;
if (prev_val == next_val)
return (prev_delta == next_delta) ? prev_delta : 0.f;
@@ -526,141 +598,238 @@ struct gvar
/* linear interpolation */
float r = (target_val - prev_val) / (next_val - prev_val);
- return (1.f - r) * prev_delta + r * next_delta;
+ return prev_delta + r * (next_delta - prev_delta);
}
static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end)
{ return (i >= end) ? start : (i + 1); }
public:
- bool apply_deltas_to_points (hb_codepoint_t glyph, hb_font_t *font,
- const hb_array_t<contour_point_t> points) const
+ bool apply_deltas_to_points (hb_codepoint_t glyph,
+ hb_array_t<int> coords,
+ const hb_array_t<contour_point_t> points,
+ bool phantom_only = false) const
{
- /* num_coords should exactly match gvar's axisCount due to how GlyphVariationData tuples are aligned */
- if (!font->num_coords || font->num_coords != table->axisCount) return true;
+ if (unlikely (glyph >= glyphCount)) return true;
- if (unlikely (glyph >= table->glyphCount)) return true;
-
- hb_bytes_t var_data_bytes = table->get_glyph_var_data_bytes (table.get_blob (), glyph);
+ hb_bytes_t var_data_bytes = table->get_glyph_var_data_bytes (table.get_blob (), glyphCount, glyph);
if (!var_data_bytes.as<GlyphVariationData> ()->has_data ()) return true;
hb_vector_t<unsigned int> shared_indices;
GlyphVariationData::tuple_iterator_t iterator;
if (!GlyphVariationData::get_tuple_iterator (var_data_bytes, table->axisCount,
+ var_data_bytes.arrayZ,
shared_indices, &iterator))
return true; /* so isn't applied at all */
/* Save original points for inferred delta calculation */
- contour_point_vector_t orig_points;
- orig_points.resize (points.length);
- for (unsigned int i = 0; i < orig_points.length; i++)
- orig_points[i] = points[i];
+ contour_point_vector_t orig_points_vec; // Populated lazily
+ auto orig_points = orig_points_vec.as_array ();
- contour_point_vector_t deltas; /* flag is used to indicate referenced point */
- deltas.resize (points.length);
+ /* flag is used to indicate referenced point */
+ contour_point_vector_t deltas_vec; // Populated lazily
+ auto deltas = deltas_vec.as_array ();
- hb_vector_t<unsigned> end_points;
- for (unsigned i = 0; i < points.length; ++i)
- if (points[i].is_end_point)
- end_points.push (i);
+ hb_vector_t<unsigned> end_points; // Populated lazily
- int *coords = font->coords;
- unsigned num_coords = font->num_coords;
- hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * table->axisCount);
+ unsigned num_coords = table->axisCount;
+ hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * num_coords);
+
+ hb_vector_t<unsigned int> private_indices;
+ hb_vector_t<int> x_deltas;
+ hb_vector_t<int> y_deltas;
+ unsigned count = points.length;
+ bool flush = false;
do
{
- float scalar = iterator.current_tuple->calculate_scalar (coords, num_coords, shared_tuples);
+ float scalar = iterator.current_tuple->calculate_scalar (coords, num_coords, shared_tuples,
+ &shared_tuple_active_idx);
if (scalar == 0.f) continue;
const HBUINT8 *p = iterator.get_serialized_data ();
unsigned int length = iterator.current_tuple->get_data_size ();
if (unlikely (!iterator.var_data_bytes.check_range (p, length)))
return false;
- hb_bytes_t bytes ((const char *) p, length);
- hb_vector_t<unsigned int> private_indices;
- if (iterator.current_tuple->has_private_points () &&
- !GlyphVariationData::unpack_points (p, private_indices, bytes))
+ if (!deltas)
+ {
+ if (unlikely (!deltas_vec.resize (count, false))) return false;
+ deltas = deltas_vec.as_array ();
+ hb_memset (deltas.arrayZ + (phantom_only ? count - 4 : 0), 0,
+ (phantom_only ? 4 : count) * sizeof (deltas[0]));
+ }
+
+ const HBUINT8 *end = p + length;
+
+ bool has_private_points = iterator.current_tuple->has_private_points ();
+ if (has_private_points &&
+ !GlyphVariationData::unpack_points (p, private_indices, end))
return false;
- const hb_array_t<unsigned int> &indices = private_indices.length ? private_indices : shared_indices;
+ const hb_array_t<unsigned int> &indices = has_private_points ? private_indices : shared_indices;
bool apply_to_all = (indices.length == 0);
unsigned int num_deltas = apply_to_all ? points.length : indices.length;
- hb_vector_t<int> x_deltas;
- x_deltas.resize (num_deltas);
- if (!GlyphVariationData::unpack_deltas (p, x_deltas, bytes))
- return false;
- hb_vector_t<int> y_deltas;
- y_deltas.resize (num_deltas);
- if (!GlyphVariationData::unpack_deltas (p, y_deltas, bytes))
- return false;
+ if (unlikely (!x_deltas.resize (num_deltas, false))) return false;
+ if (unlikely (!GlyphVariationData::unpack_deltas (p, x_deltas, end))) return false;
+ if (unlikely (!y_deltas.resize (num_deltas, false))) return false;
+ if (unlikely (!GlyphVariationData::unpack_deltas (p, y_deltas, end))) return false;
- for (unsigned int i = 0; i < deltas.length; i++)
- deltas[i].init ();
- for (unsigned int i = 0; i < num_deltas; i++)
+ if (!apply_to_all)
{
- unsigned int pt_index = apply_to_all ? i : indices[i];
- deltas[pt_index].flag = 1; /* this point is referenced, i.e., explicit deltas specified */
- deltas[pt_index].x += x_deltas[i] * scalar;
- deltas[pt_index].y += y_deltas[i] * scalar;
+ if (!orig_points && !phantom_only)
+ {
+ orig_points_vec.extend (points);
+ if (unlikely (orig_points_vec.in_error ())) return false;
+ orig_points = orig_points_vec.as_array ();
+ }
+
+ if (flush)
+ {
+ for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++)
+ points.arrayZ[i].translate (deltas.arrayZ[i]);
+ flush = false;
+
+ }
+ hb_memset (deltas.arrayZ + (phantom_only ? count - 4 : 0), 0,
+ (phantom_only ? 4 : count) * sizeof (deltas[0]));
+ }
+
+ if (HB_OPTIMIZE_SIZE_VAL)
+ {
+ for (unsigned int i = 0; i < num_deltas; i++)
+ {
+ unsigned int pt_index;
+ if (apply_to_all)
+ pt_index = i;
+ else
+ {
+ pt_index = indices[i];
+ if (unlikely (pt_index >= deltas.length)) continue;
+ }
+ if (phantom_only && pt_index < count - 4) continue;
+ auto &delta = deltas.arrayZ[pt_index];
+ delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */
+ delta.x += x_deltas.arrayZ[i] * scalar;
+ delta.y += y_deltas.arrayZ[i] * scalar;
+ }
+ }
+ else
+ {
+ /* Ouch. Four cases... for optimization. */
+ if (scalar != 1.0f)
+ {
+ if (apply_to_all)
+ for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++)
+ {
+ unsigned int pt_index = i;
+ auto &delta = deltas.arrayZ[pt_index];
+ delta.x += x_deltas.arrayZ[i] * scalar;
+ delta.y += y_deltas.arrayZ[i] * scalar;
+ }
+ else
+ for (unsigned int i = 0; i < num_deltas; i++)
+ {
+ unsigned int pt_index = indices[i];
+ if (unlikely (pt_index >= deltas.length)) continue;
+ if (phantom_only && pt_index < count - 4) continue;
+ auto &delta = deltas.arrayZ[pt_index];
+ delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */
+ delta.x += x_deltas.arrayZ[i] * scalar;
+ delta.y += y_deltas.arrayZ[i] * scalar;
+ }
+ }
+ else
+ {
+ if (apply_to_all)
+ for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++)
+ {
+ unsigned int pt_index = i;
+ auto &delta = deltas.arrayZ[pt_index];
+ delta.x += x_deltas.arrayZ[i];
+ delta.y += y_deltas.arrayZ[i];
+ }
+ else
+ for (unsigned int i = 0; i < num_deltas; i++)
+ {
+ unsigned int pt_index = indices[i];
+ if (unlikely (pt_index >= deltas.length)) continue;
+ if (phantom_only && pt_index < count - 4) continue;
+ auto &delta = deltas.arrayZ[pt_index];
+ delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */
+ delta.x += x_deltas.arrayZ[i];
+ delta.y += y_deltas.arrayZ[i];
+ }
+ }
}
/* infer deltas for unreferenced points */
- unsigned start_point = 0;
- for (unsigned c = 0; c < end_points.length; c++)
+ if (!apply_to_all && !phantom_only)
{
- unsigned end_point = end_points[c];
+ if (!end_points)
+ {
+ for (unsigned i = 0; i < count; ++i)
+ if (points.arrayZ[i].is_end_point)
+ end_points.push (i);
+ if (unlikely (end_points.in_error ())) return false;
+ }
- /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */
- unsigned unref_count = 0;
- for (unsigned i = start_point; i <= end_point; i++)
- if (!deltas[i].flag) unref_count++;
+ unsigned start_point = 0;
+ for (unsigned end_point : end_points)
+ {
+ /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */
+ unsigned unref_count = 0;
+ for (unsigned i = start_point; i < end_point + 1; i++)
+ unref_count += deltas.arrayZ[i].flag;
+ unref_count = (end_point - start_point + 1) - unref_count;
- unsigned j = start_point;
- if (unref_count == 0 || unref_count > end_point - start_point)
- goto no_more_gaps;
+ unsigned j = start_point;
+ if (unref_count == 0 || unref_count > end_point - start_point)
+ goto no_more_gaps;
- for (;;)
- {
- /* Locate the next gap of unreferenced points between two referenced points prev and next.
- * Note that a gap may wrap around at left (start_point) and/or at right (end_point).
- */
- unsigned int prev, next, i;
for (;;)
{
- i = j;
- j = next_index (i, start_point, end_point);
- if (deltas[i].flag && !deltas[j].flag) break;
- }
- prev = j = i;
- for (;;)
- {
- i = j;
- j = next_index (i, start_point, end_point);
- if (!deltas[i].flag && deltas[j].flag) break;
- }
- next = j;
- /* Infer deltas for all unref points in the gap between prev and next */
- i = prev;
- for (;;)
- {
- i = next_index (i, start_point, end_point);
- if (i == next) break;
- deltas[i].x = infer_delta<x_getter> (orig_points.as_array (), deltas.as_array (), i, prev, next);
- deltas[i].y = infer_delta<y_getter> (orig_points.as_array (), deltas.as_array (), i, prev, next);
- if (--unref_count == 0) goto no_more_gaps;
+ /* Locate the next gap of unreferenced points between two referenced points prev and next.
+ * Note that a gap may wrap around at left (start_point) and/or at right (end_point).
+ */
+ unsigned int prev, next, i;
+ for (;;)
+ {
+ i = j;
+ j = next_index (i, start_point, end_point);
+ if (deltas.arrayZ[i].flag && !deltas.arrayZ[j].flag) break;
+ }
+ prev = j = i;
+ for (;;)
+ {
+ i = j;
+ j = next_index (i, start_point, end_point);
+ if (!deltas.arrayZ[i].flag && deltas.arrayZ[j].flag) break;
+ }
+ next = j;
+ /* Infer deltas for all unref points in the gap between prev and next */
+ i = prev;
+ for (;;)
+ {
+ i = next_index (i, start_point, end_point);
+ if (i == next) break;
+ deltas.arrayZ[i].x = infer_delta (orig_points, deltas, i, prev, next, &contour_point_t::x);
+ deltas.arrayZ[i].y = infer_delta (orig_points, deltas, i, prev, next, &contour_point_t::y);
+ if (--unref_count == 0) goto no_more_gaps;
+ }
}
+ no_more_gaps:
+ start_point = end_point + 1;
}
-no_more_gaps:
- start_point = end_point + 1;
}
- /* apply specified / inferred deltas to points */
- for (unsigned int i = 0; i < points.length; i++)
- {
- points[i].x += deltas[i].x;
- points[i].y += deltas[i].y;
- }
+ flush = true;
+
} while (iterator.move_to_next ());
+ if (flush)
+ {
+ for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++)
+ points.arrayZ[i].translate (deltas.arrayZ[i]);
+ }
+
return true;
}
@@ -668,6 +837,8 @@ no_more_gaps:
private:
hb_blob_ptr_t<gvar> table;
+ unsigned glyphCount;
+ hb_vector_t<hb_pair_t<int, int>> shared_tuple_active_idx;
};
protected:
@@ -683,7 +854,7 @@ no_more_gaps:
NNOffset32To<UnsizedArrayOf<F2DOT14>>
sharedTuples; /* Offset from the start of this table to the shared tuple records.
* Array of tuple records shared across all glyph variation data tables. */
- HBUINT16 glyphCount; /* The number of glyphs in this font. This must match the number of
+ HBUINT16 glyphCountX; /* The number of glyphs in this font. This must match the number of
* glyphs stored elsewhere in the font. */
HBUINT16 flags; /* Bit-field that gives the format of the offset array that follows.
* If bit 0 is clear, the offsets are uint16; if bit 0 is set, the
@@ -695,10 +866,12 @@ no_more_gaps:
offsetZ; /* Offsets from the start of the GlyphVariationData array
* to each GlyphVariationData table. */
public:
- DEFINE_SIZE_MIN (20);
+ DEFINE_SIZE_ARRAY (20, offsetZ);
};
-struct gvar_accelerator_t : gvar::accelerator_t {};
+struct gvar_accelerator_t : gvar::accelerator_t {
+ gvar_accelerator_t (hb_face_t *face) : gvar::accelerator_t (face) {}
+};
} /* namespace OT */