summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/harfbuzz-ng/src/hb-repacker.hh
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/harfbuzz-ng/src/hb-repacker.hh')
-rw-r--r--src/3rdparty/harfbuzz-ng/src/hb-repacker.hh1289
1 files changed, 217 insertions, 1072 deletions
diff --git a/src/3rdparty/harfbuzz-ng/src/hb-repacker.hh b/src/3rdparty/harfbuzz-ng/src/hb-repacker.hh
index 2a9e75c45b..e9cd376ad3 100644
--- a/src/3rdparty/harfbuzz-ng/src/hb-repacker.hh
+++ b/src/3rdparty/harfbuzz-ng/src/hb-repacker.hh
@@ -29,1077 +29,167 @@
#include "hb-open-type.hh"
#include "hb-map.hh"
-#include "hb-priority-queue.hh"
-#include "hb-serialize.hh"
#include "hb-vector.hh"
+#include "graph/graph.hh"
+#include "graph/gsubgpos-graph.hh"
+#include "graph/serialize.hh"
+
+using graph::graph_t;
/*
* For a detailed writeup on the overflow resolution algorithm see:
* docs/repacker.md
*/
-struct graph_t
-{
- struct vertex_t
- {
- hb_serialize_context_t::object_t obj;
- int64_t distance = 0 ;
- int64_t space = 0 ;
- hb_vector_t<unsigned> parents;
- unsigned start = 0;
- unsigned end = 0;
- unsigned priority = 0;
-
- bool is_shared () const
- {
- return parents.length > 1;
- }
-
- unsigned incoming_edges () const
- {
- return parents.length;
- }
-
- void remove_parent (unsigned parent_index)
- {
- for (unsigned i = 0; i < parents.length; i++)
- {
- if (parents[i] != parent_index) continue;
- parents.remove (i);
- break;
- }
- }
-
- void remap_parents (const hb_vector_t<unsigned>& id_map)
- {
- for (unsigned i = 0; i < parents.length; i++)
- parents[i] = id_map[parents[i]];
- }
-
- void remap_parent (unsigned old_index, unsigned new_index)
- {
- for (unsigned i = 0; i < parents.length; i++)
- {
- if (parents[i] == old_index)
- parents[i] = new_index;
- }
- }
-
- bool is_leaf () const
- {
- return !obj.real_links.length && !obj.virtual_links.length;
- }
-
- bool raise_priority ()
- {
- if (has_max_priority ()) return false;
- priority++;
- return true;
- }
-
- bool has_max_priority () const {
- return priority >= 3;
- }
-
- int64_t modified_distance (unsigned order) const
- {
- // TODO(garretrieger): once priority is high enough, should try
- // setting distance = 0 which will force to sort immediately after
- // it's parent where possible.
-
- int64_t modified_distance =
- hb_min (hb_max(distance + distance_modifier (), 0), 0x7FFFFFFFFFF);
- if (has_max_priority ()) {
- modified_distance = 0;
- }
- return (modified_distance << 18) | (0x003FFFF & order);
- }
-
- int64_t distance_modifier () const
- {
- if (!priority) return 0;
- int64_t table_size = obj.tail - obj.head;
-
- if (priority == 1)
- return -table_size / 2;
-
- return -table_size;
- }
- };
-
- struct overflow_record_t
- {
- unsigned parent;
- unsigned child;
- };
-
- /*
- * A topological sorting of an object graph. Ordered
- * in reverse serialization order (first object in the
- * serialization is at the end of the list). This matches
- * the 'packed' object stack used internally in the
- * serializer
- */
- template<typename T>
- graph_t (const T& objects)
- : parents_invalid (true),
- distance_invalid (true),
- positions_invalid (true),
- successful (true)
- {
- num_roots_for_space_.push (1);
- bool removed_nil = false;
- for (unsigned i = 0; i < objects.length; i++)
- {
- // TODO(grieger): check all links point to valid objects.
-
- // If this graph came from a serialization buffer object 0 is the
- // nil object. We don't need it for our purposes here so drop it.
- if (i == 0 && !objects[i])
- {
- removed_nil = true;
- continue;
- }
-
- vertex_t* v = vertices_.push ();
- if (check_success (!vertices_.in_error ()))
- v->obj = *objects[i];
- if (!removed_nil) continue;
- // Fix indices to account for removed nil object.
- for (auto& l : v->obj.all_links_writer ()) {
- l.objidx--;
- }
- }
- }
-
- ~graph_t ()
- {
- vertices_.fini ();
- }
-
- bool in_error () const
- {
- return !successful ||
- vertices_.in_error () ||
- num_roots_for_space_.in_error ();
- }
-
- const vertex_t& root () const
- {
- return vertices_[root_idx ()];
- }
-
- unsigned root_idx () const
- {
- // Object graphs are in reverse order, the first object is at the end
- // of the vector. Since the graph is topologically sorted it's safe to
- // assume the first object has no incoming edges.
- return vertices_.length - 1;
- }
-
- const hb_serialize_context_t::object_t& object(unsigned i) const
- {
- return vertices_[i].obj;
- }
-
- /*
- * serialize graph into the provided serialization buffer.
- */
- hb_blob_t* serialize () const
- {
- hb_vector_t<char> buffer;
- size_t size = serialized_length ();
- if (!buffer.alloc (size)) {
- DEBUG_MSG (SUBSET_REPACK, nullptr, "Unable to allocate output buffer.");
- return nullptr;
- }
- hb_serialize_context_t c((void *) buffer, size);
-
- c.start_serialize<void> ();
- for (unsigned i = 0; i < vertices_.length; i++) {
- c.push ();
-
- size_t size = vertices_[i].obj.tail - vertices_[i].obj.head;
- char* start = c.allocate_size <char> (size);
- if (!start) {
- DEBUG_MSG (SUBSET_REPACK, nullptr, "Buffer out of space.");
- return nullptr;
- }
-
- memcpy (start, vertices_[i].obj.head, size);
-
- // Only real links needs to be serialized.
- for (const auto& link : vertices_[i].obj.real_links)
- serialize_link (link, start, &c);
-
- // All duplications are already encoded in the graph, so don't
- // enable sharing during packing.
- c.pop_pack (false);
- }
- c.end_serialize ();
-
- if (c.in_error ()) {
- DEBUG_MSG (SUBSET_REPACK, nullptr, "Error during serialization. Err flag: %d",
- c.errors);
- return nullptr;
- }
-
- return c.copy_blob ();
- }
-
- /*
- * Generates a new topological sorting of graph using Kahn's
- * algorithm: https://en.wikipedia.org/wiki/Topological_sorting#Algorithms
- */
- void sort_kahn ()
- {
- positions_invalid = true;
-
- if (vertices_.length <= 1) {
- // Graph of 1 or less doesn't need sorting.
- return;
- }
-
- hb_vector_t<unsigned> queue;
- hb_vector_t<vertex_t> sorted_graph;
- if (unlikely (!check_success (sorted_graph.resize (vertices_.length)))) return;
- hb_vector_t<unsigned> id_map;
- if (unlikely (!check_success (id_map.resize (vertices_.length)))) return;
-
- hb_vector_t<unsigned> removed_edges;
- if (unlikely (!check_success (removed_edges.resize (vertices_.length)))) return;
- update_parents ();
-
- queue.push (root_idx ());
- int new_id = vertices_.length - 1;
-
- while (!queue.in_error () && queue.length)
- {
- unsigned next_id = queue[0];
- queue.remove (0);
-
- vertex_t& next = vertices_[next_id];
- sorted_graph[new_id] = next;
- id_map[next_id] = new_id--;
-
- for (const auto& link : next.obj.all_links ()) {
- removed_edges[link.objidx]++;
- if (!(vertices_[link.objidx].incoming_edges () - removed_edges[link.objidx]))
- queue.push (link.objidx);
- }
- }
-
- check_success (!queue.in_error ());
- check_success (!sorted_graph.in_error ());
- if (!check_success (new_id == -1))
- print_orphaned_nodes ();
-
- remap_all_obj_indices (id_map, &sorted_graph);
-
- hb_swap (vertices_, sorted_graph);
- sorted_graph.fini ();
- }
-
- /*
- * Generates a new topological sorting of graph ordered by the shortest
- * distance to each node.
- */
- void sort_shortest_distance ()
- {
- positions_invalid = true;
-
- if (vertices_.length <= 1) {
- // Graph of 1 or less doesn't need sorting.
- return;
- }
-
- update_distances ();
- hb_priority_queue_t queue;
- hb_vector_t<vertex_t> sorted_graph;
- if (unlikely (!check_success (sorted_graph.resize (vertices_.length)))) return;
- hb_vector_t<unsigned> id_map;
- if (unlikely (!check_success (id_map.resize (vertices_.length)))) return;
-
- hb_vector_t<unsigned> removed_edges;
- if (unlikely (!check_success (removed_edges.resize (vertices_.length)))) return;
- update_parents ();
-
- queue.insert (root ().modified_distance (0), root_idx ());
- int new_id = root_idx ();
- unsigned order = 1;
- while (!queue.in_error () && !queue.is_empty ())
- {
- unsigned next_id = queue.pop_minimum().second;
-
- vertex_t& next = vertices_[next_id];
- sorted_graph[new_id] = next;
- id_map[next_id] = new_id--;
-
- for (const auto& link : next.obj.all_links ()) {
- removed_edges[link.objidx]++;
- if (!(vertices_[link.objidx].incoming_edges () - removed_edges[link.objidx]))
- // Add the order that the links were encountered to the priority.
- // This ensures that ties between priorities objects are broken in a consistent
- // way. More specifically this is set up so that if a set of objects have the same
- // distance they'll be added to the topological order in the order that they are
- // referenced from the parent object.
- queue.insert (vertices_[link.objidx].modified_distance (order++),
- link.objidx);
- }
- }
-
- check_success (!queue.in_error ());
- check_success (!sorted_graph.in_error ());
- if (!check_success (new_id == -1))
- print_orphaned_nodes ();
-
- remap_all_obj_indices (id_map, &sorted_graph);
-
- hb_swap (vertices_, sorted_graph);
- sorted_graph.fini ();
- }
-
- /*
- * Assign unique space numbers to each connected subgraph of 32 bit offset(s).
- */
- bool assign_32bit_spaces ()
- {
- unsigned root_index = root_idx ();
- hb_set_t visited;
- hb_set_t roots;
- for (unsigned i = 0; i <= root_index; i++)
- {
- // Only real links can form 32 bit spaces
- for (auto& l : vertices_[i].obj.real_links)
- {
- if (l.width == 4 && !l.is_signed)
- {
- roots.add (l.objidx);
- find_subgraph (l.objidx, visited);
- }
- }
- }
-
- // Mark everything not in the subgraphs of 32 bit roots as visited.
- // This prevents 32 bit subgraphs from being connected via nodes not in the 32 bit subgraphs.
- visited.invert ();
-
- if (!roots) return false;
-
- while (roots)
- {
- unsigned next = HB_SET_VALUE_INVALID;
- if (unlikely (!check_success (!roots.in_error ()))) break;
- if (!roots.next (&next)) break;
-
- hb_set_t connected_roots;
- find_connected_nodes (next, roots, visited, connected_roots);
- if (unlikely (!check_success (!connected_roots.in_error ()))) break;
-
- isolate_subgraph (connected_roots);
- if (unlikely (!check_success (!connected_roots.in_error ()))) break;
-
- unsigned next_space = this->next_space ();
- num_roots_for_space_.push (0);
- for (unsigned root : connected_roots)
- {
- DEBUG_MSG (SUBSET_REPACK, nullptr, "Subgraph %u gets space %u", root, next_space);
- vertices_[root].space = next_space;
- num_roots_for_space_[next_space] = num_roots_for_space_[next_space] + 1;
- distance_invalid = true;
- positions_invalid = true;
- }
-
- // TODO(grieger): special case for GSUB/GPOS use extension promotions to move 16 bit space
- // into the 32 bit space as needed, instead of using isolation.
- }
-
-
-
- return true;
- }
-
- /*
- * Isolates the subgraph of nodes reachable from root. Any links to nodes in the subgraph
- * that originate from outside of the subgraph will be removed by duplicating the linked to
- * object.
- *
- * Indices stored in roots will be updated if any of the roots are duplicated to new indices.
- */
- bool isolate_subgraph (hb_set_t& roots)
- {
- update_parents ();
- hb_hashmap_t<unsigned, unsigned> subgraph;
-
- // incoming edges to root_idx should be all 32 bit in length so we don't need to de-dup these
- // set the subgraph incoming edge count to match all of root_idx's incoming edges
- hb_set_t parents;
- for (unsigned root_idx : roots)
- {
- subgraph.set (root_idx, wide_parents (root_idx, parents));
- find_subgraph (root_idx, subgraph);
- }
-
- unsigned original_root_idx = root_idx ();
- hb_hashmap_t<unsigned, unsigned> index_map;
- bool made_changes = false;
- for (auto entry : subgraph.iter ())
- {
- const auto& node = vertices_[entry.first];
- unsigned subgraph_incoming_edges = entry.second;
-
- if (subgraph_incoming_edges < node.incoming_edges ())
- {
- // Only de-dup objects with incoming links from outside the subgraph.
- made_changes = true;
- duplicate_subgraph (entry.first, index_map);
- }
- }
-
- if (!made_changes)
- return false;
-
- if (original_root_idx != root_idx ()
- && parents.has (original_root_idx))
- {
- // If the root idx has changed since parents was determined, update root idx in parents
- parents.add (root_idx ());
- parents.del (original_root_idx);
- }
-
- auto new_subgraph =
- + subgraph.keys ()
- | hb_map([&] (unsigned node_idx) {
- if (index_map.has (node_idx)) return index_map[node_idx];
- return node_idx;
- })
- ;
-
- remap_obj_indices (index_map, new_subgraph);
- remap_obj_indices (index_map, parents.iter (), true);
-
- // Update roots set with new indices as needed.
- unsigned next = HB_SET_VALUE_INVALID;
- while (roots.next (&next))
- {
- if (index_map.has (next))
- {
- roots.del (next);
- roots.add (index_map[next]);
- }
- }
-
- return true;
- }
-
- void find_subgraph (unsigned node_idx, hb_hashmap_t<unsigned, unsigned>& subgraph)
- {
- for (const auto& link : vertices_[node_idx].obj.all_links ())
- {
- if (subgraph.has (link.objidx))
- {
- subgraph.set (link.objidx, subgraph[link.objidx] + 1);
- continue;
- }
- subgraph.set (link.objidx, 1);
- find_subgraph (link.objidx, subgraph);
- }
- }
+struct lookup_size_t
+{
+ unsigned lookup_index;
+ size_t size;
+ unsigned num_subtables;
- void find_subgraph (unsigned node_idx, hb_set_t& subgraph)
+ static int cmp (const void* a, const void* b)
{
- if (subgraph.has (node_idx)) return;
- subgraph.add (node_idx);
- for (const auto& link : vertices_[node_idx].obj.all_links ())
- find_subgraph (link.objidx, subgraph);
+ return cmp ((const lookup_size_t*) a,
+ (const lookup_size_t*) b);
}
- /*
- * duplicates all nodes in the subgraph reachable from node_idx. Does not re-assign
- * links. index_map is updated with mappings from old id to new id. If a duplication has already
- * been performed for a given index, then it will be skipped.
- */
- void duplicate_subgraph (unsigned node_idx, hb_hashmap_t<unsigned, unsigned>& index_map)
+ static int cmp (const lookup_size_t* a, const lookup_size_t* b)
{
- if (index_map.has (node_idx))
- return;
-
- index_map.set (node_idx, duplicate (node_idx));
- for (const auto& l : object (node_idx).all_links ()) {
- duplicate_subgraph (l.objidx, index_map);
- }
- }
-
- /*
- * Creates a copy of node_idx and returns it's new index.
- */
- unsigned duplicate (unsigned node_idx)
- {
- positions_invalid = true;
- distance_invalid = true;
-
- auto* clone = vertices_.push ();
- auto& child = vertices_[node_idx];
- if (vertices_.in_error ()) {
- return -1;
- }
-
- clone->obj.head = child.obj.head;
- clone->obj.tail = child.obj.tail;
- clone->distance = child.distance;
- clone->space = child.space;
- clone->parents.reset ();
-
- unsigned clone_idx = vertices_.length - 2;
- for (const auto& l : child.obj.real_links)
- {
- clone->obj.real_links.push (l);
- vertices_[l.objidx].parents.push (clone_idx);
+ double subtables_per_byte_a = (double) a->num_subtables / (double) a->size;
+ double subtables_per_byte_b = (double) b->num_subtables / (double) b->size;
+ if (subtables_per_byte_a == subtables_per_byte_b) {
+ return b->lookup_index - a->lookup_index;
}
- for (const auto& l : child.obj.virtual_links)
- {
- clone->obj.virtual_links.push (l);
- vertices_[l.objidx].parents.push (clone_idx);
- }
-
- check_success (!clone->obj.real_links.in_error ());
- check_success (!clone->obj.virtual_links.in_error ());
-
- // The last object is the root of the graph, so swap back the root to the end.
- // The root's obj idx does change, however since it's root nothing else refers to it.
- // all other obj idx's will be unaffected.
- vertex_t root = vertices_[vertices_.length - 2];
- vertices_[clone_idx] = *clone;
- vertices_[vertices_.length - 1] = root;
- // Since the root moved, update the parents arrays of all children on the root.
- for (const auto& l : root.obj.all_links ())
- vertices_[l.objidx].remap_parent (root_idx () - 1, root_idx ());
-
- return clone_idx;
+ double cmp = subtables_per_byte_b - subtables_per_byte_a;
+ if (cmp < 0) return -1;
+ if (cmp > 0) return 1;
+ return 0;
}
+};
- /*
- * Creates a copy of child and re-assigns the link from
- * parent to the clone. The copy is a shallow copy, objects
- * linked from child are not duplicated.
- */
- bool duplicate (unsigned parent_idx, unsigned child_idx)
- {
- update_parents ();
-
- unsigned links_to_child = 0;
- for (const auto& l : vertices_[parent_idx].obj.all_links ())
- {
- if (l.objidx == child_idx) links_to_child++;
- }
-
- if (vertices_[child_idx].incoming_edges () <= links_to_child)
- {
- // Can't duplicate this node, doing so would orphan the original one as all remaining links
- // to child are from parent.
- DEBUG_MSG (SUBSET_REPACK, nullptr, " Not duplicating %d => %d",
- parent_idx, child_idx);
+static inline
+bool _presplit_subtables_if_needed (graph::gsubgpos_graph_context_t& ext_context)
+{
+ // For each lookup this will check the size of subtables and split them as needed
+ // so that no subtable is at risk of overflowing. (where we support splitting for
+ // that subtable type).
+ //
+ // TODO(grieger): de-dup newly added nodes as necessary. Probably just want a full de-dup
+ // pass after this processing is done. Not super necessary as splits are
+ // only done where overflow is likely, so de-dup probably will get undone
+ // later anyways.
+
+ // The loop below can modify the contents of ext_context.lookups if new subtables are added
+ // to a lookup during a split. So save the initial set of lookup indices so the iteration doesn't
+ // risk access free'd memory if ext_context.lookups gets resized.
+ hb_set_t lookup_indices(ext_context.lookups.keys ());
+ for (unsigned lookup_index : lookup_indices)
+ {
+ graph::Lookup* lookup = ext_context.lookups.get(lookup_index);
+ if (!lookup->split_subtables_if_needed (ext_context, lookup_index))
return false;
- }
-
- DEBUG_MSG (SUBSET_REPACK, nullptr, " Duplicating %d => %d",
- parent_idx, child_idx);
-
- unsigned clone_idx = duplicate (child_idx);
- if (clone_idx == (unsigned) -1) return false;
- // duplicate shifts the root node idx, so if parent_idx was root update it.
- if (parent_idx == clone_idx) parent_idx++;
-
- auto& parent = vertices_[parent_idx];
- for (auto& l : parent.obj.all_links_writer ())
- {
- if (l.objidx != child_idx)
- continue;
-
- reassign_link (l, parent_idx, clone_idx);
- }
-
- return true;
- }
-
- /*
- * Raises the sorting priority of all children.
- */
- bool raise_childrens_priority (unsigned parent_idx)
- {
- DEBUG_MSG (SUBSET_REPACK, nullptr, " Raising priority of all children of %d",
- parent_idx);
- // This operation doesn't change ordering until a sort is run, so no need
- // to invalidate positions. It does not change graph structure so no need
- // to update distances or edge counts.
- auto& parent = vertices_[parent_idx].obj;
- bool made_change = false;
- for (auto& l : parent.all_links_writer ())
- made_change |= vertices_[l.objidx].raise_priority ();
- return made_change;
}
- /*
- * Will any offsets overflow on graph when it's serialized?
- */
- bool will_overflow (hb_vector_t<overflow_record_t>* overflows = nullptr)
- {
- if (overflows) overflows->resize (0);
- update_positions ();
-
- for (int parent_idx = vertices_.length - 1; parent_idx >= 0; parent_idx--)
- {
- // Don't need to check virtual links for overflow
- for (const auto& link : vertices_[parent_idx].obj.real_links)
- {
- int64_t offset = compute_offset (parent_idx, link);
- if (is_valid_offset (offset, link))
- continue;
-
- if (!overflows) return true;
-
- overflow_record_t r;
- r.parent = parent_idx;
- r.child = link.objidx;
- overflows->push (r);
- }
- }
-
- if (!overflows) return false;
- return overflows->length;
- }
-
- void print_orphaned_nodes ()
- {
- if (!DEBUG_ENABLED(SUBSET_REPACK)) return;
-
- DEBUG_MSG (SUBSET_REPACK, nullptr, "Graph is not fully connected.");
- parents_invalid = true;
- update_parents();
-
- for (unsigned i = 0; i < root_idx (); i++)
- {
- const auto& v = vertices_[i];
- if (!v.parents)
- DEBUG_MSG (SUBSET_REPACK, nullptr, "Node %u is orphaned.", i);
- }
- }
-
- void print_overflows (const hb_vector_t<overflow_record_t>& overflows)
- {
- if (!DEBUG_ENABLED(SUBSET_REPACK)) return;
-
- update_parents ();
- int limit = 10;
- for (const auto& o : overflows)
- {
- if (!limit--) break;
- const auto& parent = vertices_[o.parent];
- const auto& child = vertices_[o.child];
- DEBUG_MSG (SUBSET_REPACK, nullptr,
- " overflow from "
- "%4d (%4d in, %4d out, space %2d) => "
- "%4d (%4d in, %4d out, space %2d)",
- o.parent,
- parent.incoming_edges (),
- parent.obj.real_links.length + parent.obj.virtual_links.length,
- space_for (o.parent),
- o.child,
- child.incoming_edges (),
- child.obj.real_links.length + child.obj.virtual_links.length,
- space_for (o.child));
- }
- if (overflows.length > 10) {
- DEBUG_MSG (SUBSET_REPACK, nullptr, " ... plus %d more overflows.", overflows.length - 10);
- }
- }
-
- unsigned num_roots_for_space (unsigned space) const
- {
- return num_roots_for_space_[space];
- }
-
- unsigned next_space () const
- {
- return num_roots_for_space_.length;
- }
-
- void move_to_new_space (const hb_set_t& indices)
- {
- num_roots_for_space_.push (0);
- unsigned new_space = num_roots_for_space_.length - 1;
-
- for (unsigned index : indices) {
- auto& node = vertices_[index];
- num_roots_for_space_[node.space] = num_roots_for_space_[node.space] - 1;
- num_roots_for_space_[new_space] = num_roots_for_space_[new_space] + 1;
- node.space = new_space;
- distance_invalid = true;
- positions_invalid = true;
- }
- }
-
- unsigned space_for (unsigned index, unsigned* root = nullptr) const
- {
- const auto& node = vertices_[index];
- if (node.space)
- {
- if (root != nullptr)
- *root = index;
- return node.space;
- }
-
- if (!node.parents)
- {
- if (root)
- *root = index;
- return 0;
- }
-
- return space_for (node.parents[0], root);
- }
-
- void err_other_error () { this->successful = false; }
-
- private:
-
- size_t serialized_length () const {
- size_t total_size = 0;
- for (unsigned i = 0; i < vertices_.length; i++) {
- size_t size = vertices_[i].obj.tail - vertices_[i].obj.head;
- total_size += size;
- }
- return total_size;
- }
+ return true;
+}
- /*
- * Returns the numbers of incoming edges that are 32bits wide.
- */
- unsigned wide_parents (unsigned node_idx, hb_set_t& parents) const
- {
- unsigned count = 0;
+/*
+ * Analyze the lookups in a GSUB/GPOS table and decide if any should be promoted
+ * to extension lookups.
+ */
+static inline
+bool _promote_extensions_if_needed (graph::gsubgpos_graph_context_t& ext_context)
+{
+ // Simple Algorithm (v1, current):
+ // 1. Calculate how many bytes each non-extension lookup consumes.
+ // 2. Select up to 64k of those to remain as non-extension (greedy, highest subtables per byte first)
+ // 3. Promote the rest.
+ //
+ // Advanced Algorithm (v2, not implemented):
+ // 1. Perform connected component analysis using lookups as roots.
+ // 2. Compute size of each connected component.
+ // 3. Select up to 64k worth of connected components to remain as non-extensions.
+ // (greedy, highest subtables per byte first)
+ // 4. Promote the rest.
+
+ // TODO(garretrieger): support extension demotion, then consider all lookups. Requires advanced algo.
+ // TODO(garretrieger): also support extension promotion during iterative resolution phase, then
+ // we can use a less conservative threshold here.
+ // TODO(grieger): skip this for the 24 bit case.
+ if (!ext_context.lookups) return true;
+
+ unsigned total_lookup_table_sizes = 0;
+ hb_vector_t<lookup_size_t> lookup_sizes;
+ lookup_sizes.alloc (ext_context.lookups.get_population (), true);
+
+ for (unsigned lookup_index : ext_context.lookups.keys ())
+ {
+ const auto& lookup_v = ext_context.graph.vertices_[lookup_index];
+ total_lookup_table_sizes += lookup_v.table_size ();
+
+ const graph::Lookup* lookup = ext_context.lookups.get(lookup_index);
hb_set_t visited;
- for (unsigned p : vertices_[node_idx].parents)
- {
- if (visited.has (p)) continue;
- visited.add (p);
-
- // Only real links can be wide
- for (const auto& l : vertices_[p].obj.real_links)
- {
- if (l.objidx == node_idx && l.width == 4 && !l.is_signed)
- {
- count++;
- parents.add (p);
- }
- }
- }
- return count;
- }
-
- bool check_success (bool success)
- { return this->successful && (success || (err_other_error (), false)); }
-
- /*
- * Creates a map from objid to # of incoming edges.
- */
- void update_parents ()
- {
- if (!parents_invalid) return;
-
- for (unsigned i = 0; i < vertices_.length; i++)
- vertices_[i].parents.reset ();
-
- for (unsigned p = 0; p < vertices_.length; p++)
- {
- for (auto& l : vertices_[p].obj.all_links ())
- {
- vertices_[l.objidx].parents.push (p);
- }
- }
-
- parents_invalid = false;
- }
-
- /*
- * compute the serialized start and end positions for each vertex.
- */
- void update_positions ()
- {
- if (!positions_invalid) return;
-
- unsigned current_pos = 0;
- for (int i = root_idx (); i >= 0; i--)
- {
- auto& v = vertices_[i];
- v.start = current_pos;
- current_pos += v.obj.tail - v.obj.head;
- v.end = current_pos;
- }
-
- positions_invalid = false;
+ lookup_sizes.push (lookup_size_t {
+ lookup_index,
+ ext_context.graph.find_subgraph_size (lookup_index, visited),
+ lookup->number_of_subtables (),
+ });
}
- /*
- * Finds the distance to each object in the graph
- * from the initial node.
- */
- void update_distances ()
- {
- if (!distance_invalid) return;
-
- // Uses Dijkstra's algorithm to find all of the shortest distances.
- // https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
- //
- // Implementation Note:
- // Since our priority queue doesn't support fast priority decreases
- // we instead just add new entries into the queue when a priority changes.
- // Redundant ones are filtered out later on by the visited set.
- // According to https://www3.cs.stonybrook.edu/~rezaul/papers/TR-07-54.pdf
- // for practical performance this is faster then using a more advanced queue
- // (such as a fibonacci queue) with a fast decrease priority.
- for (unsigned i = 0; i < vertices_.length; i++)
- {
- if (i == vertices_.length - 1)
- vertices_[i].distance = 0;
- else
- vertices_[i].distance = hb_int_max (int64_t);
- }
-
- hb_priority_queue_t queue;
- queue.insert (0, vertices_.length - 1);
-
- hb_vector_t<bool> visited;
- visited.resize (vertices_.length);
-
- while (!queue.in_error () && !queue.is_empty ())
- {
- unsigned next_idx = queue.pop_minimum ().second;
- if (visited[next_idx]) continue;
- const auto& next = vertices_[next_idx];
- int64_t next_distance = vertices_[next_idx].distance;
- visited[next_idx] = true;
-
- for (const auto& link : next.obj.all_links ())
- {
- if (visited[link.objidx]) continue;
-
- const auto& child = vertices_[link.objidx].obj;
- unsigned link_width = link.width ? link.width : 4; // treat virtual offsets as 32 bits wide
- int64_t child_weight = (child.tail - child.head) +
- ((int64_t) 1 << (link_width * 8)) * (vertices_[link.objidx].space + 1);
- int64_t child_distance = next_distance + child_weight;
-
- if (child_distance < vertices_[link.objidx].distance)
- {
- vertices_[link.objidx].distance = child_distance;
- queue.insert (child_distance, link.objidx);
- }
- }
- }
+ lookup_sizes.qsort ();
- check_success (!queue.in_error ());
- if (!check_success (queue.is_empty ()))
- {
- print_orphaned_nodes ();
- return;
- }
+ size_t lookup_list_size = ext_context.graph.vertices_[ext_context.lookup_list_index].table_size ();
+ size_t l2_l3_size = lookup_list_size + total_lookup_table_sizes; // Lookup List + Lookups
+ size_t l3_l4_size = total_lookup_table_sizes; // Lookups + SubTables
+ size_t l4_plus_size = 0; // SubTables + their descendants
- distance_invalid = false;
- }
-
- int64_t compute_offset (
- unsigned parent_idx,
- const hb_serialize_context_t::object_t::link_t& link) const
+ // Start by assuming all lookups are using extension subtables, this size will be removed later
+ // if it's decided to not make a lookup extension.
+ for (auto p : lookup_sizes)
{
- const auto& parent = vertices_[parent_idx];
- const auto& child = vertices_[link.objidx];
- int64_t offset = 0;
- switch ((hb_serialize_context_t::whence_t) link.whence) {
- case hb_serialize_context_t::whence_t::Head:
- offset = child.start - parent.start; break;
- case hb_serialize_context_t::whence_t::Tail:
- offset = child.start - parent.end; break;
- case hb_serialize_context_t::whence_t::Absolute:
- offset = child.start; break;
- }
-
- assert (offset >= link.bias);
- offset -= link.bias;
- return offset;
+ // TODO(garretrieger): this overestimates the extension subtables size because some extension subtables may be
+ // reused. However, we can't correct this until we have connected component analysis in place.
+ unsigned subtables_size = p.num_subtables * 8;
+ l3_l4_size += subtables_size;
+ l4_plus_size += subtables_size;
}
- bool is_valid_offset (int64_t offset,
- const hb_serialize_context_t::object_t::link_t& link) const
+ bool layers_full = false;
+ for (auto p : lookup_sizes)
{
- if (unlikely (!link.width))
- // Virtual links can't overflow.
- return link.is_signed || offset >= 0;
-
- if (link.is_signed)
- {
- if (link.width == 4)
- return offset >= -((int64_t) 1 << 31) && offset < ((int64_t) 1 << 31);
- else
- return offset >= -(1 << 15) && offset < (1 << 15);
- }
- else
- {
- if (link.width == 4)
- return offset >= 0 && offset < ((int64_t) 1 << 32);
- else if (link.width == 3)
- return offset >= 0 && offset < ((int32_t) 1 << 24);
- else
- return offset >= 0 && offset < (1 << 16);
- }
- }
-
- /*
- * Updates a link in the graph to point to a different object. Corrects the
- * parents vector on the previous and new child nodes.
- */
- void reassign_link (hb_serialize_context_t::object_t::link_t& link,
- unsigned parent_idx,
- unsigned new_idx)
- {
- unsigned old_idx = link.objidx;
- link.objidx = new_idx;
- vertices_[old_idx].remove_parent (parent_idx);
- vertices_[new_idx].parents.push (parent_idx);
- }
-
- /*
- * Updates all objidx's in all links using the provided mapping. Corrects incoming edge counts.
- */
- template<typename Iterator, hb_requires (hb_is_iterator (Iterator))>
- void remap_obj_indices (const hb_hashmap_t<unsigned, unsigned>& id_map,
- Iterator subgraph,
- bool only_wide = false)
- {
- if (!id_map) return;
- for (unsigned i : subgraph)
- {
- for (auto& link : vertices_[i].obj.all_links_writer ())
- {
- if (!id_map.has (link.objidx)) continue;
- if (only_wide && !(link.width == 4 && !link.is_signed)) continue;
-
- reassign_link (link, i, id_map[link.objidx]);
- }
- }
- }
+ const graph::Lookup* lookup = ext_context.lookups.get(p.lookup_index);
+ if (lookup->is_extension (ext_context.table_tag))
+ // already an extension so size is counted by the loop above.
+ continue;
- /*
- * Updates all objidx's in all links using the provided mapping.
- */
- void remap_all_obj_indices (const hb_vector_t<unsigned>& id_map,
- hb_vector_t<vertex_t>* sorted_graph) const
- {
- for (unsigned i = 0; i < sorted_graph->length; i++)
+ if (!layers_full)
{
- (*sorted_graph)[i].remap_parents (id_map);
- for (auto& link : (*sorted_graph)[i].obj.all_links_writer ())
- {
- link.objidx = id_map[link.objidx];
- }
- }
- }
+ size_t lookup_size = ext_context.graph.vertices_[p.lookup_index].table_size ();
+ hb_set_t visited;
+ size_t subtables_size = ext_context.graph.find_subgraph_size (p.lookup_index, visited, 1) - lookup_size;
+ size_t remaining_size = p.size - subtables_size - lookup_size;
- template <typename O> void
- serialize_link_of_type (const hb_serialize_context_t::object_t::link_t& link,
- char* head,
- hb_serialize_context_t* c) const
- {
- OT::Offset<O>* offset = reinterpret_cast<OT::Offset<O>*> (head + link.position);
- *offset = 0;
- c->add_link (*offset,
- // serializer has an extra nil object at the start of the
- // object array. So all id's are +1 of what our id's are.
- link.objidx + 1,
- (hb_serialize_context_t::whence_t) link.whence,
- link.bias);
- }
-
- void serialize_link (const hb_serialize_context_t::object_t::link_t& link,
- char* head,
- hb_serialize_context_t* c) const
- {
- switch (link.width)
- {
- case 0:
- // Virtual links aren't serialized.
- return;
- case 4:
- if (link.is_signed)
- {
- serialize_link_of_type<OT::HBINT32> (link, head, c);
- } else {
- serialize_link_of_type<OT::HBUINT32> (link, head, c);
- }
- return;
- case 2:
- if (link.is_signed)
- {
- serialize_link_of_type<OT::HBINT16> (link, head, c);
- } else {
- serialize_link_of_type<OT::HBUINT16> (link, head, c);
- }
- return;
- case 3:
- serialize_link_of_type<OT::HBUINT24> (link, head, c);
- return;
- default:
- // Unexpected link width.
- assert (0);
- }
- }
+ l3_l4_size += subtables_size;
+ l3_l4_size -= p.num_subtables * 8;
+ l4_plus_size += subtables_size + remaining_size;
- /*
- * Finds all nodes in targets that are reachable from start_idx, nodes in visited will be skipped.
- * For this search the graph is treated as being undirected.
- *
- * Connected targets will be added to connected and removed from targets. All visited nodes
- * will be added to visited.
- */
- void find_connected_nodes (unsigned start_idx,
- hb_set_t& targets,
- hb_set_t& visited,
- hb_set_t& connected)
- {
- if (unlikely (!check_success (!visited.in_error ()))) return;
- if (visited.has (start_idx)) return;
- visited.add (start_idx);
+ if (l2_l3_size < (1 << 16)
+ && l3_l4_size < (1 << 16)
+ && l4_plus_size < (1 << 16)) continue; // this lookup fits within all layers groups
- if (targets.has (start_idx))
- {
- targets.del (start_idx);
- connected.add (start_idx);
+ layers_full = true;
}
- const auto& v = vertices_[start_idx];
-
- // Graph is treated as undirected so search children and parents of start_idx
- for (const auto& l : v.obj.all_links ())
- find_connected_nodes (l.objidx, targets, visited, connected);
-
- for (unsigned p : v.parents)
- find_connected_nodes (p, targets, visited, connected);
+ if (!ext_context.lookups.get(p.lookup_index)->make_extension (ext_context, p.lookup_index))
+ return false;
}
- public:
- // TODO(garretrieger): make private, will need to move most of offset overflow code into graph.
- hb_vector_t<vertex_t> vertices_;
- private:
- bool parents_invalid;
- bool distance_invalid;
- bool positions_invalid;
- bool successful;
- hb_vector_t<unsigned> num_roots_for_space_;
-};
+ return true;
+}
static inline
-bool _try_isolating_subgraphs (const hb_vector_t<graph_t::overflow_record_t>& overflows,
+bool _try_isolating_subgraphs (const hb_vector_t<graph::overflow_record_t>& overflows,
graph_t& sorted_graph)
{
unsigned space = 0;
@@ -1107,7 +197,7 @@ bool _try_isolating_subgraphs (const hb_vector_t<graph_t::overflow_record_t>& ov
for (int i = overflows.length - 1; i >= 0; i--)
{
- const graph_t::overflow_record_t& r = overflows[i];
+ const graph::overflow_record_t& r = overflows[i];
unsigned root;
unsigned overflow_space = sorted_graph.space_for (r.parent, &root);
@@ -1129,14 +219,14 @@ bool _try_isolating_subgraphs (const hb_vector_t<graph_t::overflow_record_t>& ov
// Only move at most half of the roots in a space at a time.
unsigned extra = roots_to_isolate.get_population () - maximum_to_move;
while (extra--) {
- unsigned root = HB_SET_VALUE_INVALID;
+ uint32_t root = HB_SET_VALUE_INVALID;
roots_to_isolate.previous (&root);
roots_to_isolate.del (root);
}
}
DEBUG_MSG (SUBSET_REPACK, nullptr,
- "Overflow in space %d (%d roots). Moving %d roots to space %d.",
+ "Overflow in space %u (%u roots). Moving %u roots to space %u.",
space,
sorted_graph.num_roots_for_space (space),
roots_to_isolate.get_population (),
@@ -1149,7 +239,7 @@ bool _try_isolating_subgraphs (const hb_vector_t<graph_t::overflow_record_t>& ov
}
static inline
-bool _process_overflows (const hb_vector_t<graph_t::overflow_record_t>& overflows,
+bool _process_overflows (const hb_vector_t<graph::overflow_record_t>& overflows,
hb_set_t& priority_bumped_parents,
graph_t& sorted_graph)
{
@@ -1158,13 +248,13 @@ bool _process_overflows (const hb_vector_t<graph_t::overflow_record_t>& overflow
// Try resolving the furthest overflows first.
for (int i = overflows.length - 1; i >= 0; i--)
{
- const graph_t::overflow_record_t& r = overflows[i];
+ const graph::overflow_record_t& r = overflows[i];
const auto& child = sorted_graph.vertices_[r.child];
if (child.is_shared ())
{
// The child object is shared, we may be able to eliminate the overflow
// by duplicating it.
- if (!sorted_graph.duplicate (r.parent, r.child)) continue;
+ if (sorted_graph.duplicate (r.parent, r.child) == (unsigned) -1) continue;
return true;
}
@@ -1196,57 +286,66 @@ bool _process_overflows (const hb_vector_t<graph_t::overflow_record_t>& overflow
return resolution_attempted;
}
-/*
- * Attempts to modify the topological sorting of the provided object graph to
- * eliminate offset overflows in the links between objects of the graph. If a
- * non-overflowing ordering is found the updated graph is serialized it into the
- * provided serialization context.
- *
- * If necessary the structure of the graph may be modified in ways that do not
- * affect the functionality of the graph. For example shared objects may be
- * duplicated.
- *
- * For a detailed writeup describing how the algorithm operates see:
- * docs/repacker.md
- */
-template<typename T>
-inline hb_blob_t*
-hb_resolve_overflows (const T& packed,
- hb_tag_t table_tag,
- unsigned max_rounds = 20) {
- // Kahn sort is ~twice as fast as shortest distance sort and works for many fonts
- // so try it first to save time.
- graph_t sorted_graph (packed);
- sorted_graph.sort_kahn ();
- if (!sorted_graph.will_overflow ())
+inline bool
+hb_resolve_graph_overflows (hb_tag_t table_tag,
+ unsigned max_rounds ,
+ bool recalculate_extensions,
+ graph_t& sorted_graph /* IN/OUT */)
+{
+ sorted_graph.sort_shortest_distance ();
+ if (sorted_graph.in_error ())
{
- return sorted_graph.serialize ();
+ DEBUG_MSG (SUBSET_REPACK, nullptr, "Sorted graph in error state after initial sort.");
+ return false;
}
- sorted_graph.sort_shortest_distance ();
+ bool will_overflow = graph::will_overflow (sorted_graph);
+ if (!will_overflow)
+ return true;
+ graph::gsubgpos_graph_context_t ext_context (table_tag, sorted_graph);
if ((table_tag == HB_OT_TAG_GPOS
|| table_tag == HB_OT_TAG_GSUB)
- && sorted_graph.will_overflow ())
+ && will_overflow)
{
+ if (recalculate_extensions)
+ {
+ DEBUG_MSG (SUBSET_REPACK, nullptr, "Splitting subtables if needed.");
+ if (!_presplit_subtables_if_needed (ext_context)) {
+ DEBUG_MSG (SUBSET_REPACK, nullptr, "Subtable splitting failed.");
+ return false;
+ }
+
+ DEBUG_MSG (SUBSET_REPACK, nullptr, "Promoting lookups to extensions if needed.");
+ if (!_promote_extensions_if_needed (ext_context)) {
+ DEBUG_MSG (SUBSET_REPACK, nullptr, "Extensions promotion failed.");
+ return false;
+ }
+ }
+
DEBUG_MSG (SUBSET_REPACK, nullptr, "Assigning spaces to 32 bit subgraphs.");
- if (sorted_graph.assign_32bit_spaces ())
+ if (sorted_graph.assign_spaces ())
sorted_graph.sort_shortest_distance ();
+ else
+ sorted_graph.sort_shortest_distance_if_needed ();
}
unsigned round = 0;
- hb_vector_t<graph_t::overflow_record_t> overflows;
+ hb_vector_t<graph::overflow_record_t> overflows;
// TODO(garretrieger): select a good limit for max rounds.
while (!sorted_graph.in_error ()
- && sorted_graph.will_overflow (&overflows)
- && round++ < max_rounds) {
- DEBUG_MSG (SUBSET_REPACK, nullptr, "=== Overflow resolution round %d ===", round);
- sorted_graph.print_overflows (overflows);
+ && graph::will_overflow (sorted_graph, &overflows)
+ && round < max_rounds) {
+ DEBUG_MSG (SUBSET_REPACK, nullptr, "=== Overflow resolution round %u ===", round);
+ print_overflows (sorted_graph, overflows);
hb_set_t priority_bumped_parents;
if (!_try_isolating_subgraphs (overflows, sorted_graph))
{
+ // Don't count space isolation towards round limit. Only increment
+ // round counter if space isolation made no changes.
+ round++;
if (!_process_overflows (overflows, priority_bumped_parents, sorted_graph))
{
DEBUG_MSG (SUBSET_REPACK, nullptr, "No resolution available :(");
@@ -1260,16 +359,62 @@ hb_resolve_overflows (const T& packed,
if (sorted_graph.in_error ())
{
DEBUG_MSG (SUBSET_REPACK, nullptr, "Sorted graph in error state.");
- return nullptr;
+ return false;
}
- if (sorted_graph.will_overflow ())
+ if (graph::will_overflow (sorted_graph))
{
DEBUG_MSG (SUBSET_REPACK, nullptr, "Offset overflow resolution failed.");
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Attempts to modify the topological sorting of the provided object graph to
+ * eliminate offset overflows in the links between objects of the graph. If a
+ * non-overflowing ordering is found the updated graph is serialized it into the
+ * provided serialization context.
+ *
+ * If necessary the structure of the graph may be modified in ways that do not
+ * affect the functionality of the graph. For example shared objects may be
+ * duplicated.
+ *
+ * For a detailed writeup describing how the algorithm operates see:
+ * docs/repacker.md
+ */
+template<typename T>
+inline hb_blob_t*
+hb_resolve_overflows (const T& packed,
+ hb_tag_t table_tag,
+ unsigned max_rounds = 20,
+ bool recalculate_extensions = false) {
+ graph_t sorted_graph (packed);
+ if (sorted_graph.in_error ())
+ {
+ // Invalid graph definition.
return nullptr;
}
- return sorted_graph.serialize ();
+ if (!sorted_graph.is_fully_connected ())
+ {
+ sorted_graph.print_orphaned_nodes ();
+ return nullptr;
+ }
+
+ if (sorted_graph.in_error ())
+ {
+ // Allocations failed somewhere
+ DEBUG_MSG (SUBSET_REPACK, nullptr,
+ "Graph is in error, likely due to a memory allocation error.");
+ return nullptr;
+ }
+
+ if (!hb_resolve_graph_overflows (table_tag, max_rounds, recalculate_extensions, sorted_graph))
+ return nullptr;
+
+ return graph::serialize (sorted_graph);
}
#endif /* HB_REPACKER_HH */