summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/harfbuzz-ng/src/hb-ot-layout-gsubgpos.hh
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/harfbuzz-ng/src/hb-ot-layout-gsubgpos.hh')
-rw-r--r--src/3rdparty/harfbuzz-ng/src/hb-ot-layout-gsubgpos.hh262
1 files changed, 163 insertions, 99 deletions
diff --git a/src/3rdparty/harfbuzz-ng/src/hb-ot-layout-gsubgpos.hh b/src/3rdparty/harfbuzz-ng/src/hb-ot-layout-gsubgpos.hh
index 04ccefd108..10cc105de4 100644
--- a/src/3rdparty/harfbuzz-ng/src/hb-ot-layout-gsubgpos.hh
+++ b/src/3rdparty/harfbuzz-ng/src/hb-ot-layout-gsubgpos.hh
@@ -115,7 +115,7 @@ struct hb_closure_context_t :
return true;
}
- hb_set_clear (done_lookups_glyph_set->get (lookup_index));
+ done_lookups_glyph_set->get (lookup_index)->clear ();
}
hb_set_t *covered_glyph_set = done_lookups_glyph_set->get (lookup_index);
@@ -532,6 +532,30 @@ struct hb_ot_apply_context_t :
may_skip (const hb_glyph_info_t &info) const
{ return matcher.may_skip (c, info); }
+ enum match_t {
+ MATCH,
+ NOT_MATCH,
+ SKIP
+ };
+
+ match_t match (hb_glyph_info_t &info)
+ {
+ matcher_t::may_skip_t skip = matcher.may_skip (c, info);
+ if (unlikely (skip == matcher_t::SKIP_YES))
+ return SKIP;
+
+ matcher_t::may_match_t match = matcher.may_match (info, get_glyph_data ());
+ if (match == matcher_t::MATCH_YES ||
+ (match == matcher_t::MATCH_MAYBE &&
+ skip == matcher_t::SKIP_NO))
+ return MATCH;
+
+ if (skip == matcher_t::SKIP_NO)
+ return NOT_MATCH;
+
+ return SKIP;
+ }
+
bool next (unsigned *unsafe_to = nullptr)
{
assert (num_items > 0);
@@ -543,27 +567,22 @@ struct hb_ot_apply_context_t :
while ((signed) idx < stop)
{
idx++;
- hb_glyph_info_t &info = c->buffer->info[idx];
-
- matcher_t::may_skip_t skip = matcher.may_skip (c, info);
- if (unlikely (skip == matcher_t::SKIP_YES))
- continue;
-
- matcher_t::may_match_t match = matcher.may_match (info, get_glyph_data ());
- if (match == matcher_t::MATCH_YES ||
- (match == matcher_t::MATCH_MAYBE &&
- skip == matcher_t::SKIP_NO))
- {
- num_items--;
- advance_glyph_data ();
- return true;
- }
-
- if (skip == matcher_t::SKIP_NO)
+ switch (match (c->buffer->info[idx]))
{
- if (unsafe_to)
- *unsafe_to = idx + 1;
- return false;
+ case MATCH:
+ {
+ num_items--;
+ advance_glyph_data ();
+ return true;
+ }
+ case NOT_MATCH:
+ {
+ if (unsafe_to)
+ *unsafe_to = idx + 1;
+ return false;
+ }
+ case SKIP:
+ continue;
}
}
if (unsafe_to)
@@ -581,27 +600,22 @@ struct hb_ot_apply_context_t :
while (idx > stop)
{
idx--;
- hb_glyph_info_t &info = c->buffer->out_info[idx];
-
- matcher_t::may_skip_t skip = matcher.may_skip (c, info);
- if (unlikely (skip == matcher_t::SKIP_YES))
- continue;
-
- matcher_t::may_match_t match = matcher.may_match (info, get_glyph_data ());
- if (match == matcher_t::MATCH_YES ||
- (match == matcher_t::MATCH_MAYBE &&
- skip == matcher_t::SKIP_NO))
- {
- num_items--;
- advance_glyph_data ();
- return true;
- }
-
- if (skip == matcher_t::SKIP_NO)
+ switch (match (c->buffer->out_info[idx]))
{
- if (unsafe_from)
- *unsafe_from = hb_max (1u, idx) - 1u;
- return false;
+ case MATCH:
+ {
+ num_items--;
+ advance_glyph_data ();
+ return true;
+ }
+ case NOT_MATCH:
+ {
+ if (unsafe_from)
+ *unsafe_from = hb_max (1u, idx) - 1u;
+ return false;
+ }
+ case SKIP:
+ continue;
}
}
if (unsafe_from)
@@ -698,6 +712,9 @@ struct hb_ot_apply_context_t :
uint32_t random_state = 1;
unsigned new_syllables = (unsigned) -1;
+ signed last_base = -1; // GPOS uses
+ unsigned last_base_until = 0; // GPOS uses
+
hb_ot_apply_context_t (unsigned int table_index_,
hb_font_t *font_,
hb_buffer_t *buffer_) :
@@ -736,7 +753,7 @@ struct hb_ot_apply_context_t :
iter_context.init (this, true);
}
- void set_lookup_mask (hb_mask_t mask) { lookup_mask = mask; init_iters (); }
+ void set_lookup_mask (hb_mask_t mask) { lookup_mask = mask; last_base = -1; last_base_until = 0; init_iters (); }
void set_auto_zwj (bool auto_zwj_) { auto_zwj = auto_zwj_; init_iters (); }
void set_auto_zwnj (bool auto_zwnj_) { auto_zwnj = auto_zwnj_; init_iters (); }
void set_per_syllable (bool per_syllable_) { per_syllable = per_syllable_; init_iters (); }
@@ -944,8 +961,6 @@ struct hb_accelerate_subtables_context_t :
hb_set_digest_t digest;
};
- typedef hb_vector_t<hb_applicable_t> array_t;
-
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
template <typename T>
auto cache_cost (const T &obj, hb_priority<1>) HB_AUTO_RETURN ( obj.cache_cost () )
@@ -957,17 +972,15 @@ struct hb_accelerate_subtables_context_t :
template <typename T>
return_t dispatch (const T &obj)
{
- hb_applicable_t entry;
+ hb_applicable_t *entry = &array[i++];
- entry.init (obj,
- apply_to<T>
+ entry->init (obj,
+ apply_to<T>
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
- , apply_cached_to<T>
- , cache_func_to<T>
+ , apply_cached_to<T>
+ , cache_func_to<T>
#endif
- );
-
- array.push (entry);
+ );
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
/* Cache handling
@@ -979,9 +992,9 @@ struct hb_accelerate_subtables_context_t :
* and we allocate the cache opportunity to the costliest subtable.
*/
unsigned cost = cache_cost (obj, hb_prioritize);
- if (cost > cache_user_cost && !array.in_error ())
+ if (cost > cache_user_cost)
{
- cache_user_idx = array.length - 1;
+ cache_user_idx = i - 1;
cache_user_cost = cost;
}
#endif
@@ -990,10 +1003,11 @@ struct hb_accelerate_subtables_context_t :
}
static return_t default_return_value () { return hb_empty_t (); }
- hb_accelerate_subtables_context_t (array_t &array_) :
+ hb_accelerate_subtables_context_t (hb_applicable_t *array_) :
array (array_) {}
- array_t &array;
+ hb_applicable_t *array;
+ unsigned i = 0;
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
unsigned cache_user_idx = (unsigned) -1;
@@ -1538,18 +1552,19 @@ static void context_closure_recurse_lookups (hb_closure_context_t *c,
intersected_glyphs_func_t intersected_glyphs_func,
void *cache)
{
- hb_set_t *covered_seq_indicies = hb_set_create ();
+ hb_set_t covered_seq_indicies;
+ hb_set_t pos_glyphs;
for (unsigned int i = 0; i < lookupCount; i++)
{
unsigned seqIndex = lookupRecord[i].sequenceIndex;
if (seqIndex >= inputCount) continue;
bool has_pos_glyphs = false;
- hb_set_t pos_glyphs;
- if (!hb_set_has (covered_seq_indicies, seqIndex))
+ if (!covered_seq_indicies.has (seqIndex))
{
has_pos_glyphs = true;
+ pos_glyphs.clear ();
if (seqIndex == 0)
{
switch (context_format) {
@@ -1578,7 +1593,7 @@ static void context_closure_recurse_lookups (hb_closure_context_t *c,
}
}
- covered_seq_indicies->add (seqIndex);
+ covered_seq_indicies.add (seqIndex);
if (has_pos_glyphs) {
c->push_cur_active_glyphs () = std::move (pos_glyphs);
} else {
@@ -1589,12 +1604,10 @@ static void context_closure_recurse_lookups (hb_closure_context_t *c,
if (context_format == ContextFormat::CoverageBasedContext)
endIndex += 1;
- c->recurse (lookupRecord[i].lookupListIndex, covered_seq_indicies, seqIndex, endIndex);
+ c->recurse (lookupRecord[i].lookupListIndex, &covered_seq_indicies, seqIndex, endIndex);
c->pop_cur_done_glyphs ();
}
-
- hb_set_destroy (covered_seq_indicies);
}
template <typename context_t>
@@ -1651,7 +1664,7 @@ static inline void apply_lookup (hb_ot_apply_context_t *c,
if (buffer->have_output)
c->buffer->sync_so_far ();
c->buffer->message (c->font,
- "recursing to lookup %u at %d",
+ "recursing to lookup %u at %u",
(unsigned) lookupRecord[i].lookupListIndex,
buffer->idx);
}
@@ -2224,7 +2237,7 @@ struct ContextFormat1_4
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
- const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? c->plan->gsub_lookups : c->plan->gpos_lookups;
+ const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? &c->plan->gsub_lookups : &c->plan->gpos_lookups;
hb_sorted_vector_t<hb_codepoint_t> new_coverage;
+ hb_zip (this+coverage, ruleSet)
| hb_filter (glyphset, hb_first)
@@ -2461,7 +2474,7 @@ struct ContextFormat2_5
hb_set_t coverage_glyph_classes;
(this+classDef).intersected_classes (&retained_coverage_glyphs, &coverage_glyph_classes);
- const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? c->plan->gsub_lookups : c->plan->gpos_lookups;
+ const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? &c->plan->gsub_lookups : &c->plan->gpos_lookups;
bool ret = true;
int non_zero_index = -1, index = 0;
auto snapshot = c->serializer->snapshot();
@@ -2641,7 +2654,7 @@ struct ContextFormat3
}
const auto& lookupRecord = StructAfter<UnsizedArrayOf<LookupRecord>> (coverageZ.as_array (glyphCount));
- const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? c->plan->gsub_lookups : c->plan->gpos_lookups;
+ const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? &c->plan->gsub_lookups : &c->plan->gpos_lookups;
unsigned count = serialize_lookuprecord_array (c->serializer, lookupRecord.as_array (lookupCount), lookup_map);
@@ -2681,8 +2694,8 @@ struct Context
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
+ if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
- if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
@@ -3305,7 +3318,7 @@ struct ChainContextFormat1_4
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
- const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? c->plan->gsub_lookups : c->plan->gpos_lookups;
+ const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? &c->plan->gsub_lookups : &c->plan->gpos_lookups;
hb_sorted_vector_t<hb_codepoint_t> new_coverage;
+ hb_zip (this+coverage, ruleSet)
| hb_filter (glyphset, hb_first)
@@ -3541,7 +3554,7 @@ struct ChainContextFormat2_5
*/
struct ChainContextApplyLookupContext lookup_context = {
- {{cached && &backtrack_class_def == &input_class_def ? match_class_cached : match_class,
+ {{cached && &backtrack_class_def == &lookahead_class_def ? match_class_cached : match_class,
cached && &input_class_def == &lookahead_class_def ? match_class_cached : match_class,
cached ? match_class_cached : match_class}},
{&backtrack_class_def,
@@ -3585,7 +3598,7 @@ struct ChainContextFormat2_5
int non_zero_index = -1, index = 0;
bool ret = true;
- const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? c->plan->gsub_lookups : c->plan->gpos_lookups;
+ const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? &c->plan->gsub_lookups : &c->plan->gpos_lookups;
auto last_non_zero = c->serializer->snapshot ();
for (const auto& _ : + hb_enumerate (ruleSet)
| hb_filter (input_klass_map, hb_first))
@@ -3827,7 +3840,7 @@ struct ChainContextFormat3
return_trace (false);
const auto &lookup = StructAfter<decltype (lookupX)> (lookahead);
- const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? c->plan->gsub_lookups : c->plan->gpos_lookups;
+ const hb_map_t *lookup_map = c->table_tag == HB_OT_TAG_GSUB ? &c->plan->gsub_lookups : &c->plan->gpos_lookups;
HBUINT16 *lookupCount = c->serializer->copy<HBUINT16> (lookup.len);
if (!lookupCount) return_trace (false);
@@ -3875,8 +3888,8 @@ struct ChainContext
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
+ if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
- if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
@@ -3915,8 +3928,8 @@ struct ExtensionFormat1
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
+ if (unlikely (!c->may_dispatch (this, this))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, format);
- if (unlikely (!c->may_dispatch (this, this))) return_trace (c->no_dispatch_return_value ());
return_trace (get_subtable<typename T::SubTable> ().dispatch (c, get_type (), std::forward<Ts> (ds)...));
}
@@ -3994,8 +4007,8 @@ struct Extension
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
+ if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
- if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (u.format1.dispatch (c, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());
@@ -4017,35 +4030,50 @@ struct Extension
struct hb_ot_layout_lookup_accelerator_t
{
template <typename TLookup>
- void init (const TLookup &lookup)
+ static hb_ot_layout_lookup_accelerator_t *create (const TLookup &lookup)
{
- subtables.init ();
- hb_accelerate_subtables_context_t c_accelerate_subtables (subtables);
+ unsigned count = lookup.get_subtable_count ();
+
+ unsigned size = sizeof (hb_ot_layout_lookup_accelerator_t) -
+ HB_VAR_ARRAY * sizeof (hb_accelerate_subtables_context_t::hb_applicable_t) +
+ count * sizeof (hb_accelerate_subtables_context_t::hb_applicable_t);
+
+ /* The following is a calloc because when we are collecting subtables,
+ * some of them might be invalid and hence not collect; as a result,
+ * we might not fill in all the count entries of the subtables array.
+ * Zeroing it allows the set digest to gatekeep it without having to
+ * initialize it further. */
+ auto *thiz = (hb_ot_layout_lookup_accelerator_t *) hb_calloc (1, size);
+ if (unlikely (!thiz))
+ return nullptr;
+
+ hb_accelerate_subtables_context_t c_accelerate_subtables (thiz->subtables);
lookup.dispatch (&c_accelerate_subtables);
- digest.init ();
- for (auto& subtable : hb_iter (subtables))
- digest.add (subtable.digest);
+ thiz->digest.init ();
+ for (auto& subtable : hb_iter (thiz->subtables, count))
+ thiz->digest.add (subtable.digest);
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
- cache_user_idx = c_accelerate_subtables.cache_user_idx;
- for (unsigned i = 0; i < subtables.length; i++)
- if (i != cache_user_idx)
- subtables[i].apply_cached_func = subtables[i].apply_func;
+ thiz->cache_user_idx = c_accelerate_subtables.cache_user_idx;
+ for (unsigned i = 0; i < count; i++)
+ if (i != thiz->cache_user_idx)
+ thiz->subtables[i].apply_cached_func = thiz->subtables[i].apply_func;
#endif
+
+ return thiz;
}
- void fini () { subtables.fini (); }
bool may_have (hb_codepoint_t g) const
{ return digest.may_have (g); }
- bool apply (hb_ot_apply_context_t *c, bool use_cache) const
+ bool apply (hb_ot_apply_context_t *c, unsigned subtables_count, bool use_cache) const
{
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
if (use_cache)
{
return
- + hb_iter (subtables)
+ + hb_iter (hb_iter (subtables, subtables_count))
| hb_map ([&c] (const hb_accelerate_subtables_context_t::hb_applicable_t &_) { return _.apply_cached (c); })
| hb_any
;
@@ -4054,7 +4082,7 @@ struct hb_ot_layout_lookup_accelerator_t
#endif
{
return
- + hb_iter (subtables)
+ + hb_iter (hb_iter (subtables, subtables_count))
| hb_map ([&c] (const hb_accelerate_subtables_context_t::hb_applicable_t &_) { return _.apply (c); })
| hb_any
;
@@ -4081,10 +4109,10 @@ struct hb_ot_layout_lookup_accelerator_t
hb_set_digest_t digest;
private:
- hb_accelerate_subtables_context_t::array_t subtables;
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
unsigned cache_user_idx = (unsigned) -1;
#endif
+ hb_accelerate_subtables_context_t::hb_applicable_t subtables[HB_VAR_ARRAY];
};
template <typename Types>
@@ -4142,8 +4170,11 @@ struct GSUBGPOSVersion1_2
bool subset (hb_subset_layout_context_t *c) const
{
TRACE_SUBSET (this);
- auto *out = c->subset_context->serializer->embed (*this);
- if (unlikely (!out)) return_trace (false);
+
+ auto *out = c->subset_context->serializer->start_embed (this);
+ if (unlikely (!c->subset_context->serializer->extend_min (out))) return_trace (false);
+
+ out->version = version;
typedef LookupOffsetList<TLookup, typename Types::HBUINT> TLookupList;
reinterpret_cast<typename Types::template OffsetTo<TLookupList> &> (out->lookupList)
@@ -4166,9 +4197,23 @@ struct GSUBGPOSVersion1_2
#ifndef HB_NO_VAR
if (version.to_int () >= 0x00010001u)
{
- bool ret = out->featureVars.serialize_subset (c->subset_context, featureVars, this, c);
+ auto snapshot = c->subset_context->serializer->snapshot ();
+ if (!c->subset_context->serializer->extend_min (&out->featureVars))
+ return_trace (false);
+
+ // TODO(qxliu76): the current implementation doesn't correctly handle feature variations
+ // that are dropped by instancing when the associated conditions don't trigger.
+ // Since partial instancing isn't yet supported this isn't an issue yet but will
+ // need to be fixed for partial instancing.
+
+
+
+ // if all axes are pinned all feature vars are dropped.
+ bool ret = !c->subset_context->plan->all_axes_pinned
+ && out->featureVars.serialize_subset (c->subset_context, featureVars, this, c);
if (!ret && version.major == 1)
{
+ c->subset_context->serializer->revert (snapshot);
out->version.major = 1;
out->version.minor = 0;
}
@@ -4430,28 +4475,47 @@ struct GSUBGPOS
this->lookup_count = table->get_lookup_count ();
- this->accels = (hb_ot_layout_lookup_accelerator_t *) hb_calloc (this->lookup_count, sizeof (hb_ot_layout_lookup_accelerator_t));
+ this->accels = (hb_atomic_ptr_t<hb_ot_layout_lookup_accelerator_t> *) hb_calloc (this->lookup_count, sizeof (*accels));
if (unlikely (!this->accels))
{
this->lookup_count = 0;
this->table.destroy ();
this->table = hb_blob_get_empty ();
}
-
- for (unsigned int i = 0; i < this->lookup_count; i++)
- this->accels[i].init (table->get_lookup (i));
}
~accelerator_t ()
{
for (unsigned int i = 0; i < this->lookup_count; i++)
- this->accels[i].fini ();
+ hb_free (this->accels[i]);
hb_free (this->accels);
this->table.destroy ();
}
+ hb_ot_layout_lookup_accelerator_t *get_accel (unsigned lookup_index) const
+ {
+ if (unlikely (lookup_index >= lookup_count)) return nullptr;
+
+ retry:
+ auto *accel = accels[lookup_index].get_acquire ();
+ if (unlikely (!accel))
+ {
+ accel = hb_ot_layout_lookup_accelerator_t::create (table->get_lookup (lookup_index));
+ if (unlikely (!accel))
+ return nullptr;
+
+ if (unlikely (!accels[lookup_index].cmpexch (nullptr, accel)))
+ {
+ hb_free (accel);
+ goto retry;
+ }
+ }
+
+ return accel;
+ }
+
hb_blob_ptr_t<T> table;
unsigned int lookup_count;
- hb_ot_layout_lookup_accelerator_t *accels;
+ hb_atomic_ptr_t<hb_ot_layout_lookup_accelerator_t> *accels;
};
protected: