Selaa lähdekoodia

Merge pull request #81327 from bruvzg/hb811

HarfBuzz: Update to version 8.1.1
Yuri Sizov 1 vuosi sitten
vanhempi
commit
59d3a36fbe
59 muutettua tiedostoa jossa 1696 lisäystä ja 1240 poistoa
  1. 1 1
      thirdparty/README.md
  2. 33 0
      thirdparty/harfbuzz/src/OT/Layout/GDEF/GDEF.hh
  3. 14 6
      thirdparty/harfbuzz/src/OT/Layout/GPOS/CursivePosFormat1.hh
  4. 1 1
      thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkMarkPosFormat1.hh
  5. 1 1
      thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPosFormat1.hh
  6. 1 1
      thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPosFormat2.hh
  7. 1 1
      thirdparty/harfbuzz/src/OT/Layout/GPOS/PairValueRecord.hh
  8. 0 1
      thirdparty/harfbuzz/src/OT/Layout/GSUB/Ligature.hh
  9. 9 10
      thirdparty/harfbuzz/src/OT/Layout/GSUB/LigatureSet.hh
  10. 1 1
      thirdparty/harfbuzz/src/graph/classdef-graph.hh
  11. 1 1
      thirdparty/harfbuzz/src/graph/coverage-graph.hh
  12. 156 59
      thirdparty/harfbuzz/src/graph/graph.hh
  13. 3 3
      thirdparty/harfbuzz/src/graph/gsubgpos-graph.hh
  14. 1 1
      thirdparty/harfbuzz/src/graph/pairpos-graph.hh
  15. 2 2
      thirdparty/harfbuzz/src/hb-aat-layout-trak-table.hh
  16. 4 4
      thirdparty/harfbuzz/src/hb-algs.hh
  17. 1 0
      thirdparty/harfbuzz/src/hb-bimap.hh
  18. 22 10
      thirdparty/harfbuzz/src/hb-bit-page.hh
  19. 13 3
      thirdparty/harfbuzz/src/hb-bit-set.hh
  20. 3 3
      thirdparty/harfbuzz/src/hb-buffer.cc
  21. 7 1
      thirdparty/harfbuzz/src/hb-buffer.hh
  22. 1 1
      thirdparty/harfbuzz/src/hb-config.hh
  23. 1 1
      thirdparty/harfbuzz/src/hb-kern.hh
  24. 0 4
      thirdparty/harfbuzz/src/hb-machinery.hh
  25. 8 6
      thirdparty/harfbuzz/src/hb-map.hh
  26. 1 10
      thirdparty/harfbuzz/src/hb-null.hh
  27. 0 24
      thirdparty/harfbuzz/src/hb-open-type.hh
  28. 2 2
      thirdparty/harfbuzz/src/hb-ot-cff-common.hh
  29. 2 0
      thirdparty/harfbuzz/src/hb-ot-cff1-table.hh
  30. 2 0
      thirdparty/harfbuzz/src/hb-ot-cff2-table.hh
  31. 3 0
      thirdparty/harfbuzz/src/hb-ot-hmtx-table.hh
  32. 42 10
      thirdparty/harfbuzz/src/hb-ot-layout-common.hh
  33. 293 94
      thirdparty/harfbuzz/src/hb-ot-layout-gsubgpos.hh
  34. 39 1
      thirdparty/harfbuzz/src/hb-ot-layout.cc
  35. 7 0
      thirdparty/harfbuzz/src/hb-ot-layout.h
  36. 15 7
      thirdparty/harfbuzz/src/hb-ot-map.cc
  37. 18 1
      thirdparty/harfbuzz/src/hb-ot-shape.cc
  38. 603 736
      thirdparty/harfbuzz/src/hb-ot-shaper-use-machine.hh
  39. 164 0
      thirdparty/harfbuzz/src/hb-ot-var-avar-table.hh
  40. 49 56
      thirdparty/harfbuzz/src/hb-ot-var-common.hh
  41. 2 1
      thirdparty/harfbuzz/src/hb-ot-var-cvar-table.hh
  42. 35 7
      thirdparty/harfbuzz/src/hb-ot-var-fvar-table.hh
  43. 20 33
      thirdparty/harfbuzz/src/hb-ot-var-hvar-table.hh
  44. 19 8
      thirdparty/harfbuzz/src/hb-priority-queue.hh
  45. 5 1
      thirdparty/harfbuzz/src/hb-sanitize.hh
  46. 1 1
      thirdparty/harfbuzz/src/hb-serialize.hh
  47. 1 1
      thirdparty/harfbuzz/src/hb-subset-cff-common.hh
  48. 4 6
      thirdparty/harfbuzz/src/hb-subset-cff1.cc
  49. 0 37
      thirdparty/harfbuzz/src/hb-subset-cff1.hh
  50. 0 37
      thirdparty/harfbuzz/src/hb-subset-cff2.hh
  51. 0 1
      thirdparty/harfbuzz/src/hb-subset-input.cc
  52. 30 22
      thirdparty/harfbuzz/src/hb-subset-instancer-solver.cc
  53. 23 1
      thirdparty/harfbuzz/src/hb-subset-instancer-solver.hh
  54. 2 0
      thirdparty/harfbuzz/src/hb-subset-plan-member-list.hh
  55. 16 11
      thirdparty/harfbuzz/src/hb-subset-plan.cc
  56. 6 4
      thirdparty/harfbuzz/src/hb-subset.cc
  57. 3 3
      thirdparty/harfbuzz/src/hb-uniscribe.cc
  58. 3 3
      thirdparty/harfbuzz/src/hb-version.h
  59. 1 0
      thirdparty/harfbuzz/src/hb.hh

+ 1 - 1
thirdparty/README.md

@@ -293,7 +293,7 @@ Files extracted from upstream source:
 ## harfbuzz
 
 - Upstream: https://github.com/harfbuzz/harfbuzz
-- Version: 8.0.0 (b4305532a7746422e0b615eee6304119c1092fd8, 2023)
+- Version: 8.1.1 (1d665c2b521512cdd56964138fc601debd1f1177, 2023)
 - License: MIT
 
 Files extracted from upstream source:

+ 33 - 0
thirdparty/harfbuzz/src/OT/Layout/GDEF/GDEF.hh

@@ -439,6 +439,16 @@ struct MarkGlyphSetsFormat1
   bool covers (unsigned int set_index, hb_codepoint_t glyph_id) const
   { return (this+coverage[set_index]).get_coverage (glyph_id) != NOT_COVERED; }
 
+  template <typename set_t>
+  void collect_coverage (hb_vector_t<set_t> &sets) const
+  {
+     for (const auto &offset : coverage)
+     {
+       const auto &cov = this+offset;
+       cov.collect_coverage (sets.push ());
+     }
+  }
+
   bool subset (hb_subset_context_t *c) const
   {
     TRACE_SUBSET (this);
@@ -492,6 +502,15 @@ struct MarkGlyphSets
     }
   }
 
+  template <typename set_t>
+  void collect_coverage (hb_vector_t<set_t> &sets) const
+  {
+    switch (u.format) {
+    case 1: u.format1.collect_coverage (sets); return;
+    default:return;
+    }
+  }
+
   bool subset (hb_subset_context_t *c) const
   {
     TRACE_SUBSET (this);
@@ -856,6 +875,10 @@ struct GDEF
 	hb_blob_destroy (table.get_blob ());
 	table = hb_blob_get_empty ();
       }
+
+#ifndef HB_NO_GDEF_CACHE
+      table->get_mark_glyph_sets ().collect_coverage (mark_glyph_set_digests);
+#endif
     }
     ~accelerator_t () { table.destroy (); }
 
@@ -879,8 +902,18 @@ struct GDEF
 
     }
 
+    bool mark_set_covers (unsigned int set_index, hb_codepoint_t glyph_id) const
+    {
+      return
+#ifndef HB_NO_GDEF_CACHE
+	     mark_glyph_set_digests[set_index].may_have (glyph_id) &&
+#endif
+	     table->mark_set_covers (set_index, glyph_id);
+    }
+
     hb_blob_ptr_t<GDEF> table;
 #ifndef HB_NO_GDEF_CACHE
+    hb_vector_t<hb_set_digest_t> mark_glyph_set_digests;
     mutable hb_cache_t<21, 3, 8> glyph_props_cache;
 #endif
   };

+ 14 - 6
thirdparty/harfbuzz/src/OT/Layout/GPOS/CursivePosFormat1.hh

@@ -91,7 +91,13 @@ struct CursivePosFormat1
   bool sanitize (hb_sanitize_context_t *c) const
   {
     TRACE_SANITIZE (this);
-    return_trace (coverage.sanitize (c, this) && entryExitRecord.sanitize (c, this));
+    if (unlikely (!coverage.sanitize (c, this)))
+      return_trace (false);
+
+    if (c->lazy_some_gpos)
+      return_trace (entryExitRecord.sanitize_shallow (c));
+    else
+      return_trace (entryExitRecord.sanitize (c, this));
   }
 
   bool intersects (const hb_set_t *glyphs) const
@@ -119,10 +125,11 @@ struct CursivePosFormat1
     hb_buffer_t *buffer = c->buffer;
 
     const EntryExitRecord &this_record = entryExitRecord[(this+coverage).get_coverage  (buffer->cur().codepoint)];
-    if (!this_record.entryAnchor) return_trace (false);
+    if (!this_record.entryAnchor ||
+	unlikely (!this_record.entryAnchor.sanitize (&c->sanitizer, this))) return_trace (false);
 
     hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
-    skippy_iter.reset_fast (buffer->idx, 1);
+    skippy_iter.reset_fast (buffer->idx);
     unsigned unsafe_from;
     if (unlikely (!skippy_iter.prev (&unsafe_from)))
     {
@@ -131,7 +138,8 @@ struct CursivePosFormat1
     }
 
     const EntryExitRecord &prev_record = entryExitRecord[(this+coverage).get_coverage  (buffer->info[skippy_iter.idx].codepoint)];
-    if (!prev_record.exitAnchor)
+    if (!prev_record.exitAnchor ||
+	unlikely (!prev_record.exitAnchor.sanitize (&c->sanitizer, this)))
     {
       buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
       return_trace (false);
@@ -200,8 +208,8 @@ struct CursivePosFormat1
      * Arabic. */
     unsigned int child  = i;
     unsigned int parent = j;
-    hb_position_t x_offset = entry_x - exit_x;
-    hb_position_t y_offset = entry_y - exit_y;
+    hb_position_t x_offset = roundf (entry_x - exit_x);
+    hb_position_t y_offset = roundf (entry_y - exit_y);
     if  (!(c->lookup_props & LookupFlag::RightToLeft))
     {
       unsigned int k = child;

+ 1 - 1
thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkMarkPosFormat1.hh

@@ -100,7 +100,7 @@ struct MarkMarkPosFormat1_2
 
     /* now we search backwards for a suitable mark glyph until a non-mark glyph */
     hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
-    skippy_iter.reset_fast (buffer->idx, 1);
+    skippy_iter.reset_fast (buffer->idx);
     skippy_iter.set_lookup_props (c->lookup_props & ~(uint32_t)LookupFlag::IgnoreFlags);
     unsigned unsafe_from;
     if (unlikely (!skippy_iter.prev (&unsafe_from)))

+ 1 - 1
thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPosFormat1.hh

@@ -110,7 +110,7 @@ struct PairPosFormat1_3
     if (likely (index == NOT_COVERED)) return_trace (false);
 
     hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
-    skippy_iter.reset_fast (buffer->idx, 1);
+    skippy_iter.reset_fast (buffer->idx);
     unsigned unsafe_to;
     if (unlikely (!skippy_iter.next (&unsafe_to)))
     {

+ 1 - 1
thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPosFormat2.hh

@@ -131,7 +131,7 @@ struct PairPosFormat2_4
     if (likely (index == NOT_COVERED)) return_trace (false);
 
     hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
-    skippy_iter.reset_fast (buffer->idx, 1);
+    skippy_iter.reset_fast (buffer->idx);
     unsigned unsafe_to;
     if (unlikely (!skippy_iter.next (&unsafe_to)))
     {

+ 1 - 1
thirdparty/harfbuzz/src/OT/Layout/GPOS/PairValueRecord.hh

@@ -22,7 +22,7 @@ struct PairValueRecord
   ValueRecord   values;                 /* Positioning data for the first glyph
                                          * followed by for second glyph */
   public:
-  DEFINE_SIZE_ARRAY (Types::size, values);
+  DEFINE_SIZE_ARRAY (Types::HBGlyphID::static_size, values);
 
   int cmp (hb_codepoint_t k) const
   { return secondGlyph.cmp (k); }

+ 0 - 1
thirdparty/harfbuzz/src/OT/Layout/GSUB/Ligature.hh

@@ -19,7 +19,6 @@ struct Ligature
                                          * in writing direction */
   public:
   DEFINE_SIZE_ARRAY (Types::size + 2, component);
-  DEFINE_SIZE_MAX (65536 * Types::HBGlyphID::static_size);
 
   bool sanitize (hb_sanitize_context_t *c) const
   {

+ 9 - 10
thirdparty/harfbuzz/src/OT/Layout/GSUB/LigatureSet.hh

@@ -72,19 +72,14 @@ struct LigatureSet
     ;
   }
 
-  static bool match_always (hb_glyph_info_t &info HB_UNUSED, unsigned value HB_UNUSED, const void *data HB_UNUSED)
-  {
-    return true;
-  }
-
   bool apply (hb_ot_apply_context_t *c) const
   {
     TRACE_APPLY (this);
 
     unsigned int num_ligs = ligature.len;
 
-#ifndef HB_NO_OT_LIGATURES_FAST_PATH
-    if (HB_OPTIMIZE_SIZE_VAL || num_ligs <= 2)
+#ifndef HB_NO_OT_RULESETS_FAST_PATH
+    if (HB_OPTIMIZE_SIZE_VAL || num_ligs <= 4)
 #endif
     {
     slow:
@@ -97,10 +92,12 @@ struct LigatureSet
     }
 
     /* This version is optimized for speed by matching the first component
-     * of the ligature here, instead of calling into the ligation code. */
+     * of the ligature here, instead of calling into the ligation code.
+     *
+     * This is replicated in ChainRuleSet and RuleSet. */
 
     hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
-    skippy_iter.reset (c->buffer->idx, 1);
+    skippy_iter.reset (c->buffer->idx);
     skippy_iter.set_match_func (match_always, nullptr);
     skippy_iter.set_glyph_data ((HBUINT16 *) nullptr);
     unsigned unsafe_to;
@@ -118,6 +115,8 @@ struct LigatureSet
         goto slow;
       }
     }
+    else
+      goto slow;
 
     bool unsafe_to_concat = false;
 
@@ -125,7 +124,7 @@ struct LigatureSet
     {
       const auto &lig = this+ligature.arrayZ[i];
       if (unlikely (lig.component.lenP1 <= 1) ||
-	  lig.component[1] == first)
+	  lig.component.arrayZ[0] == first)
       {
 	if (lig.apply (c))
 	{

+ 1 - 1
thirdparty/harfbuzz/src/graph/classdef-graph.hh

@@ -72,7 +72,7 @@ struct ClassDef : public OT::ClassDef
     class_def_link->width = SmallTypes::size;
     class_def_link->objidx = class_def_prime_id;
     class_def_link->position = link_position;
-    class_def_prime_vertex.parents.push (parent_id);
+    class_def_prime_vertex.add_parent (parent_id);
 
     return true;
   }

+ 1 - 1
thirdparty/harfbuzz/src/graph/coverage-graph.hh

@@ -96,7 +96,7 @@ struct Coverage : public OT::Layout::Common::Coverage
     coverage_link->width = SmallTypes::size;
     coverage_link->objidx = coverage_prime_id;
     coverage_link->position = link_position;
-    coverage_prime_vertex.parents.push (parent_id);
+    coverage_prime_vertex.add_parent (parent_id);
 
     return (Coverage*) coverage_prime_vertex.obj.head;
   }

+ 156 - 59
thirdparty/harfbuzz/src/graph/graph.hh

@@ -43,12 +43,28 @@ struct graph_t
   {
     hb_serialize_context_t::object_t obj;
     int64_t distance = 0 ;
-    int64_t space = 0 ;
-    hb_vector_t<unsigned> parents;
+    unsigned space = 0 ;
     unsigned start = 0;
     unsigned end = 0;
     unsigned priority = 0;
-
+    private:
+    unsigned incoming_edges_ = 0;
+    unsigned single_parent = (unsigned) -1;
+    hb_hashmap_t<unsigned, unsigned> parents;
+    public:
+
+    auto parents_iter () const HB_AUTO_RETURN
+    (
+      hb_concat (
+	hb_iter (&single_parent, single_parent != (unsigned) -1),
+	parents.keys_ref ()
+      )
+    )
+
+    bool in_error () const
+    {
+      return parents.in_error ();
+    }
 
     bool link_positions_valid (unsigned num_objects, bool removed_nil)
     {
@@ -143,7 +159,9 @@ struct graph_t
       hb_swap (a.obj, b.obj);
       hb_swap (a.distance, b.distance);
       hb_swap (a.space, b.space);
+      hb_swap (a.single_parent, b.single_parent);
       hb_swap (a.parents, b.parents);
+      hb_swap (a.incoming_edges_, b.incoming_edges_);
       hb_swap (a.start, b.start);
       hb_swap (a.end, b.end);
       hb_swap (a.priority, b.priority);
@@ -154,6 +172,7 @@ struct graph_t
     {
       hb_hashmap_t<unsigned, unsigned> result;
 
+      result.alloc (obj.real_links.length);
       for (const auto& l : obj.real_links) {
         result.set (l.position, l.objidx);
       }
@@ -163,22 +182,76 @@ struct graph_t
 
     bool is_shared () const
     {
-      return parents.length > 1;
+      return parents.get_population () > 1;
     }
 
     unsigned incoming_edges () const
     {
-      return parents.length;
+      if (HB_DEBUG_SUBSET_REPACK)
+       {
+	assert (incoming_edges_ == (single_parent != (unsigned) -1) +
+		(parents.values_ref () | hb_reduce (hb_add, 0)));
+       }
+      return incoming_edges_;
+    }
+
+    void reset_parents ()
+    {
+      incoming_edges_ = 0;
+      single_parent = (unsigned) -1;
+      parents.reset ();
+    }
+
+    void add_parent (unsigned parent_index)
+    {
+      assert (parent_index != (unsigned) -1);
+      if (incoming_edges_ == 0)
+      {
+	single_parent = parent_index;
+	incoming_edges_ = 1;
+	return;
+      }
+      else if (single_parent != (unsigned) -1)
+      {
+        assert (incoming_edges_ == 1);
+	if (!parents.set (single_parent, 1))
+	  return;
+	single_parent = (unsigned) -1;
+      }
+
+      unsigned *v;
+      if (parents.has (parent_index, &v))
+      {
+        (*v)++;
+	incoming_edges_++;
+      }
+      else if (parents.set (parent_index, 1))
+	incoming_edges_++;
     }
 
     void remove_parent (unsigned parent_index)
     {
-      unsigned count = parents.length;
-      for (unsigned i = 0; i < count; i++)
+      if (parent_index == single_parent)
       {
-        if (parents.arrayZ[i] != parent_index) continue;
-        parents.remove_unordered (i);
-        break;
+	single_parent = (unsigned) -1;
+	incoming_edges_--;
+	return;
+      }
+
+      unsigned *v;
+      if (parents.has (parent_index, &v))
+      {
+	incoming_edges_--;
+	if (*v > 1)
+	  (*v)--;
+	else
+	  parents.del (parent_index);
+
+	if (incoming_edges_ == 1)
+	{
+	  single_parent = *parents.keys ();
+	  parents.reset ();
+	}
       }
     }
 
@@ -199,20 +272,46 @@ struct graph_t
       }
     }
 
-    void remap_parents (const hb_vector_t<unsigned>& id_map)
+    bool remap_parents (const hb_vector_t<unsigned>& id_map)
     {
-      unsigned count = parents.length;
-      for (unsigned i = 0; i < count; i++)
-        parents.arrayZ[i] = id_map[parents.arrayZ[i]];
+      if (single_parent != (unsigned) -1)
+      {
+        assert (single_parent < id_map.length);
+	single_parent = id_map[single_parent];
+	return true;
+      }
+
+      hb_hashmap_t<unsigned, unsigned> new_parents;
+      new_parents.alloc (parents.get_population ());
+      for (auto _ : parents)
+      {
+	assert (_.first < id_map.length);
+	assert (!new_parents.has (id_map[_.first]));
+	new_parents.set (id_map[_.first], _.second);
+      }
+
+      if (new_parents.in_error ())
+        return false;
+
+      parents = std::move (new_parents);
+      return true;
     }
 
     void remap_parent (unsigned old_index, unsigned new_index)
     {
-      unsigned count = parents.length;
-      for (unsigned i = 0; i < count; i++)
+      if (single_parent != (unsigned) -1)
+      {
+        if (single_parent == old_index)
+	  single_parent = new_index;
+        return;
+      }
+
+      const unsigned *pv;
+      if (parents.has (old_index, &pv))
       {
-        if (parents.arrayZ[i] == old_index)
-          parents.arrayZ[i] = new_index;
+        unsigned v = *pv;
+	parents.set (new_index, v);
+	parents.del (old_index);
       }
     }
 
@@ -419,7 +518,7 @@ struct graph_t
     link->width = 2;
     link->objidx = child_id;
     link->position = (char*) offset - (char*) v.obj.head;
-    vertices_[child_id].parents.push (parent_id);
+    vertices_[child_id].add_parent (parent_id);
   }
 
   /*
@@ -465,7 +564,7 @@ struct graph_t
     {
       unsigned next_id = queue.pop_minimum().second;
 
-      hb_swap (sorted_graph[new_id], vertices_[next_id]);
+      sorted_graph[new_id] = std::move (vertices_[next_id]);
       const vertex_t& next = sorted_graph[new_id];
 
       if (unlikely (!check_success(new_id >= 0))) {
@@ -493,8 +592,8 @@ struct graph_t
     check_success (!queue.in_error ());
     check_success (!sorted_graph.in_error ());
 
-    remap_all_obj_indices (id_map, &sorted_graph);
-    hb_swap (vertices_, sorted_graph);
+    check_success (remap_all_obj_indices (id_map, &sorted_graph));
+    vertices_ = std::move (sorted_graph);
 
     if (!check_success (new_id == -1))
       print_orphaned_nodes ();
@@ -605,7 +704,7 @@ struct graph_t
   {
     unsigned child_idx = index_for_offset (node_idx, offset);
     auto& child = vertices_[child_idx];
-    for (unsigned p : child.parents)
+    for (unsigned p : child.parents_iter ())
     {
       if (p != node_idx) {
         return duplicate (node_idx, child_idx);
@@ -688,12 +787,15 @@ struct graph_t
       subgraph.set (root_idx, wide_parents (root_idx, parents));
       find_subgraph (root_idx, subgraph);
     }
+    if (subgraph.in_error ())
+      return false;
 
     unsigned original_root_idx = root_idx ();
     hb_map_t index_map;
     bool made_changes = false;
     for (auto entry : subgraph.iter ())
     {
+      assert (entry.first < vertices_.length);
       const auto& node = vertices_[entry.first];
       unsigned subgraph_incoming_edges = entry.second;
 
@@ -749,10 +851,10 @@ struct graph_t
   {
     for (const auto& link : vertices_[node_idx].obj.all_links ())
     {
-      const uint32_t *v;
+      hb_codepoint_t *v;
       if (subgraph.has (link.objidx, &v))
       {
-        subgraph.set (link.objidx, *v + 1);
+        (*v)++;
         continue;
       }
       subgraph.set (link.objidx, 1);
@@ -824,7 +926,7 @@ struct graph_t
     new_link->position = (const char*) new_offset - (const char*) new_v.obj.head;
 
     auto& child = vertices_[child_id];
-    child.parents.push (new_parent_idx);
+    child.add_parent (new_parent_idx);
 
     old_v.remove_real_link (child_id, old_offset);
     child.remove_parent (old_parent_idx);
@@ -868,18 +970,18 @@ struct graph_t
     clone->obj.tail = child.obj.tail;
     clone->distance = child.distance;
     clone->space = child.space;
-    clone->parents.reset ();
+    clone->reset_parents ();
 
     unsigned clone_idx = vertices_.length - 2;
     for (const auto& l : child.obj.real_links)
     {
       clone->obj.real_links.push (l);
-      vertices_[l.objidx].parents.push (clone_idx);
+      vertices_[l.objidx].add_parent (clone_idx);
     }
     for (const auto& l : child.obj.virtual_links)
     {
       clone->obj.virtual_links.push (l);
-      vertices_[l.objidx].parents.push (clone_idx);
+      vertices_[l.objidx].add_parent (clone_idx);
     }
 
     check_success (!clone->obj.real_links.in_error ());
@@ -1008,13 +1110,13 @@ struct graph_t
   {
     update_parents();
 
-    if (root().parents)
+    if (root().incoming_edges ())
       // Root cannot have parents.
       return false;
 
     for (unsigned i = 0; i < root_idx (); i++)
     {
-      if (!vertices_[i].parents)
+      if (!vertices_[i].incoming_edges ())
         return false;
     }
     return true;
@@ -1078,14 +1180,14 @@ struct graph_t
     parents_invalid = true;
     update_parents();
 
-    if (root().parents) {
+    if (root().incoming_edges ()) {
       DEBUG_MSG (SUBSET_REPACK, nullptr, "Root node has incoming edges.");
     }
 
     for (unsigned i = 0; i < root_idx (); i++)
     {
       const auto& v = vertices_[i];
-      if (!v.parents)
+      if (!v.incoming_edges ())
         DEBUG_MSG (SUBSET_REPACK, nullptr, "Node %u is orphaned.", i);
     }
   }
@@ -1117,6 +1219,8 @@ struct graph_t
 
   unsigned space_for (unsigned index, unsigned* root = nullptr) const
   {
+  loop:
+    assert (index < vertices_.length);
     const auto& node = vertices_[index];
     if (node.space)
     {
@@ -1125,14 +1229,15 @@ struct graph_t
       return node.space;
     }
 
-    if (!node.parents)
+    if (!node.incoming_edges ())
     {
       if (root)
         *root = index;
       return 0;
     }
 
-    return space_for (node.parents[0], root);
+    index = *node.parents_iter ();
+    goto loop;
   }
 
   void err_other_error () { this->successful = false; }
@@ -1156,12 +1261,8 @@ struct graph_t
   unsigned wide_parents (unsigned node_idx, hb_set_t& parents) const
   {
     unsigned count = 0;
-    hb_set_t visited;
-    for (unsigned p : vertices_[node_idx].parents)
+    for (unsigned p : vertices_[node_idx].parents_iter ())
     {
-      if (visited.has (p)) continue;
-      visited.add (p);
-
       // Only real links can be wide
       for (const auto& l : vertices_[p].obj.real_links)
       {
@@ -1191,20 +1292,18 @@ struct graph_t
     unsigned count = vertices_.length;
 
     for (unsigned i = 0; i < count; i++)
-      vertices_.arrayZ[i].parents.reset ();
+      vertices_.arrayZ[i].reset_parents ();
 
     for (unsigned p = 0; p < count; p++)
     {
       for (auto& l : vertices_.arrayZ[p].obj.all_links ())
-      {
-        vertices_[l.objidx].parents.push (p);
-      }
+        vertices_[l.objidx].add_parent (p);
     }
 
     for (unsigned i = 0; i < count; i++)
       // parents arrays must be accurate or downstream operations like cycle detection
       // and sorting won't work correctly.
-      check_success (!vertices_.arrayZ[i].parents.in_error ());
+      check_success (!vertices_.arrayZ[i].in_error ());
 
     parents_invalid = false;
   }
@@ -1248,12 +1347,8 @@ struct graph_t
     // (such as a fibonacci queue) with a fast decrease priority.
     unsigned count = vertices_.length;
     for (unsigned i = 0; i < count; i++)
-    {
-      if (i == vertices_.length - 1)
-        vertices_.arrayZ[i].distance = 0;
-      else
-        vertices_.arrayZ[i].distance = hb_int_max (int64_t);
-    }
+      vertices_.arrayZ[i].distance = hb_int_max (int64_t);
+    vertices_.tail ().distance = 0;
 
     hb_priority_queue_t queue;
     queue.insert (0, vertices_.length - 1);
@@ -1273,15 +1368,15 @@ struct graph_t
       {
         if (visited[link.objidx]) continue;
 
-        const auto& child = vertices_[link.objidx].obj;
+        const auto& child = vertices_.arrayZ[link.objidx].obj;
         unsigned link_width = link.width ? link.width : 4; // treat virtual offsets as 32 bits wide
         int64_t child_weight = (child.tail - child.head) +
-                               ((int64_t) 1 << (link_width * 8)) * (vertices_[link.objidx].space + 1);
+                               ((int64_t) 1 << (link_width * 8)) * (vertices_.arrayZ[link.objidx].space + 1);
         int64_t child_distance = next_distance + child_weight;
 
-        if (child_distance < vertices_[link.objidx].distance)
+        if (child_distance < vertices_.arrayZ[link.objidx].distance)
         {
-          vertices_[link.objidx].distance = child_distance;
+          vertices_.arrayZ[link.objidx].distance = child_distance;
           queue.insert (child_distance, link.objidx);
         }
       }
@@ -1309,7 +1404,7 @@ struct graph_t
     unsigned old_idx = link.objidx;
     link.objidx = new_idx;
     vertices_[old_idx].remove_parent (parent_idx);
-    vertices_[new_idx].parents.push (parent_idx);
+    vertices_[new_idx].add_parent (parent_idx);
   }
 
   /*
@@ -1337,18 +1432,20 @@ struct graph_t
   /*
    * Updates all objidx's in all links using the provided mapping.
    */
-  void remap_all_obj_indices (const hb_vector_t<unsigned>& id_map,
+  bool remap_all_obj_indices (const hb_vector_t<unsigned>& id_map,
                               hb_vector_t<vertex_t>* sorted_graph) const
   {
     unsigned count = sorted_graph->length;
     for (unsigned i = 0; i < count; i++)
     {
-      (*sorted_graph)[i].remap_parents (id_map);
+      if (!(*sorted_graph)[i].remap_parents (id_map))
+        return false;
       for (auto& link : sorted_graph->arrayZ[i].obj.all_links_writer ())
       {
         link.objidx = id_map[link.objidx];
       }
     }
+    return true;
   }
 
   /*
@@ -1379,7 +1476,7 @@ struct graph_t
     for (const auto& l : v.obj.all_links ())
       find_connected_nodes (l.objidx, targets, visited, connected);
 
-    for (unsigned p : v.parents)
+    for (unsigned p : v.parents_iter ())
       find_connected_nodes (p, targets, visited, connected);
   }
 

+ 3 - 3
thirdparty/harfbuzz/src/graph/gsubgpos-graph.hh

@@ -225,7 +225,7 @@ struct Lookup : public OT::Lookup
         if (is_ext)
         {
           unsigned ext_id = create_extension_subtable (c, subtable_id, type);
-          c.graph.vertices_[subtable_id].parents.push (ext_id);
+          c.graph.vertices_[subtable_id].add_parent (ext_id);
           subtable_id = ext_id;
         }
 
@@ -234,7 +234,7 @@ struct Lookup : public OT::Lookup
         link->objidx = subtable_id;
         link->position = (char*) &new_lookup->subTable[offset_index++] -
                          (char*) new_lookup;
-        c.graph.vertices_[subtable_id].parents.push (this_index);
+        c.graph.vertices_[subtable_id].add_parent (this_index);
       }
     }
 
@@ -315,7 +315,7 @@ struct Lookup : public OT::Lookup
     // Make extension point at the subtable.
     auto& ext_vertex = c.graph.vertices_[ext_index];
     auto& subtable_vertex = c.graph.vertices_[subtable_index];
-    ext_vertex.parents.push (lookup_index);
+    ext_vertex.add_parent (lookup_index);
     subtable_vertex.remap_parent (lookup_index, ext_index);
 
     return true;

+ 1 - 1
thirdparty/harfbuzz/src/graph/pairpos-graph.hh

@@ -419,7 +419,7 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType
     class_def_link->width = SmallTypes::size;
     class_def_link->objidx = class_def_2_id;
     class_def_link->position = 10;
-    graph.vertices_[class_def_2_id].parents.push (pair_pos_prime_id);
+    graph.vertices_[class_def_2_id].add_parent (pair_pos_prime_id);
     graph.duplicate (pair_pos_prime_id, class_def_2_id);
 
     return pair_pos_prime_id;

+ 2 - 2
thirdparty/harfbuzz/src/hb-aat-layout-trak-table.hh

@@ -111,13 +111,13 @@ struct TrackData
 	break;
       }
     }
-    if (!trackTableEntry) return 0.;
+    if (!trackTableEntry) return 0;
 
     /*
      * Choose size.
      */
     unsigned int sizes = nSizes;
-    if (!sizes) return 0.;
+    if (!sizes) return 0;
     if (sizes == 1) return trackTableEntry->get_value (base, 0, sizes);
 
     hb_array_t<const F16DOT16> size_table ((base+sizeTable).arrayZ, sizes);

+ 4 - 4
thirdparty/harfbuzz/src/hb-algs.hh

@@ -283,8 +283,8 @@ HB_FUNCOBJ (hb_bool);
 // Compression function for Merkle-Damgard construction.
 // This function is generated using the framework provided.
 #define mix(h) (					\
-			(h) ^= (h) >> 23,		\
-			(h) *= 0x2127599bf4325c37ULL,	\
+			(void) ((h) ^= (h) >> 23),		\
+			(void) ((h) *= 0x2127599bf4325c37ULL),	\
 			(h) ^= (h) >> 47)
 
 static inline uint64_t fasthash64(const void *buf, size_t len, uint64_t seed)
@@ -362,10 +362,10 @@ struct
   // https://github.com/harfbuzz/harfbuzz/pull/4228#issuecomment-1565079537
   template <typename T,
 	    hb_enable_if (std::is_integral<T>::value && sizeof (T) <= sizeof (uint32_t))> constexpr auto
-  impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, v * 2654435761u /* Knuh's multiplicative hash */)
+  impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, (uint32_t) v * 2654435761u /* Knuh's multiplicative hash */)
   template <typename T,
 	    hb_enable_if (std::is_integral<T>::value && sizeof (T) > sizeof (uint32_t))> constexpr auto
-  impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, (v ^ (v >> 32)) * 2654435761u /* Knuth's multiplicative hash */)
+  impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, (uint32_t) (v ^ (v >> 32)) * 2654435761u /* Knuth's multiplicative hash */)
 
   template <typename T> constexpr auto
   impl (const T& v, hb_priority<0>) const HB_RETURN (uint32_t, std::hash<hb_decay<decltype (hb_deref (v))>>{} (hb_deref (v)))

+ 1 - 0
thirdparty/harfbuzz/src/hb-bimap.hh

@@ -143,6 +143,7 @@ struct hb_inc_bimap_t
   hb_codepoint_t skip (unsigned count)
   {
     hb_codepoint_t start = back_map.length;
+    back_map.alloc (back_map.length + count);
     for (unsigned i = 0; i < count; i++)
       back_map.push (HB_MAP_VALUE_INVALID);
     return start;

+ 22 - 10
thirdparty/harfbuzz/src/hb-bit-page.hh

@@ -89,14 +89,17 @@ struct hb_vector_size_t
 
 struct hb_bit_page_t
 {
-  void init0 () { v.init0 (); }
-  void init1 () { v.init1 (); }
+  void init0 () { v.init0 (); population = 0; }
+  void init1 () { v.init1 (); population = PAGE_BITS; }
+
+  void dirty () { population = UINT_MAX; }
 
   static inline constexpr unsigned len ()
   { return ARRAY_LENGTH_CONST (v); }
 
   bool is_empty () const
   {
+    if (has_population ()) return !population;
     return
     + hb_iter (v)
     | hb_none
@@ -107,8 +110,8 @@ struct hb_bit_page_t
     return hb_bytes_t ((const char *) &v, sizeof (v)).hash ();
   }
 
-  void add (hb_codepoint_t g) { elt (g) |= mask (g); }
-  void del (hb_codepoint_t g) { elt (g) &= ~mask (g); }
+  void add (hb_codepoint_t g) { elt (g) |= mask (g); dirty (); }
+  void del (hb_codepoint_t g) { elt (g) &= ~mask (g); dirty (); }
   void set (hb_codepoint_t g, bool value) { if (value) add (g); else del (g); }
   bool get (hb_codepoint_t g) const { return elt (g) & mask (g); }
 
@@ -120,20 +123,21 @@ struct hb_bit_page_t
       *la |= (mask (b) << 1) - mask(a);
     else
     {
-      *la |= ~(mask (a) - 1);
+      *la |= ~(mask (a) - 1llu);
       la++;
 
       hb_memset (la, 0xff, (char *) lb - (char *) la);
 
-      *lb |= ((mask (b) << 1) - 1);
+      *lb |= ((mask (b) << 1) - 1llu);
     }
+    dirty ();
   }
   void del_range (hb_codepoint_t a, hb_codepoint_t b)
   {
     elt_t *la = &elt (a);
     elt_t *lb = &elt (b);
     if (la == lb)
-      *la &= ~((mask (b) << 1) - mask(a));
+      *la &= ~((mask (b) << 1llu) - mask(a));
     else
     {
       *la &= mask (a) - 1;
@@ -141,8 +145,9 @@ struct hb_bit_page_t
 
       hb_memset (la, 0, (char *) lb - (char *) la);
 
-      *lb &= ~((mask (b) << 1) - 1);
+      *lb &= ~((mask (b) << 1) - 1llu);
     }
+    dirty ();
   }
   void set_range (hb_codepoint_t a, hb_codepoint_t b, bool v)
   { if (v) add_range (a, b); else del_range (a, b); }
@@ -222,18 +227,25 @@ struct hb_bit_page_t
   }
   bool is_subset (const hb_bit_page_t &larger_page) const
   {
+    if (has_population () && larger_page.has_population () &&
+	population > larger_page.population)
+      return false;
+
     for (unsigned i = 0; i < len (); i++)
       if (~larger_page.v[i] & v[i])
 	return false;
     return true;
   }
 
+  bool has_population () const { return population != UINT_MAX; }
   unsigned int get_population () const
   {
-    return
+    if (has_population ()) return population;
+    population =
     + hb_iter (v)
     | hb_reduce ([] (unsigned pop, const elt_t &_) { return pop + hb_popcount (_); }, 0u)
     ;
+    return population;
   }
 
   bool next (hb_codepoint_t *codepoint) const
@@ -329,9 +341,9 @@ struct hb_bit_page_t
   const elt_t& elt (hb_codepoint_t g) const { return v[(g & MASK) / ELT_BITS]; }
   static constexpr elt_t mask (hb_codepoint_t g) { return elt_t (1) << (g & ELT_MASK); }
 
+  mutable unsigned population;
   vector_t v;
 };
-static_assert (hb_bit_page_t::PAGE_BITS == sizeof (hb_bit_page_t) * 8, "");
 
 
 #endif /* HB_BIT_PAGE_HH */

+ 13 - 3
thirdparty/harfbuzz/src/hb-bit-set.hh

@@ -30,7 +30,6 @@
 
 #include "hb.hh"
 #include "hb-bit-page.hh"
-#include "hb-machinery.hh"
 
 
 struct hb_bit_set_t
@@ -183,6 +182,16 @@ struct hb_bit_set_t
     return true;
   }
 
+  /* Duplicated here from hb-machinery.hh to avoid including it. */
+  template<typename Type>
+  static inline const Type& StructAtOffsetUnaligned(const void *P, unsigned int offset)
+  {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-align"
+    return * reinterpret_cast<const Type*> ((const char *) P + offset);
+#pragma GCC diagnostic pop
+  }
+
   template <typename T>
   void set_array (bool v, const T *array, unsigned int count, unsigned int stride=sizeof(T))
   {
@@ -553,6 +562,7 @@ struct hb_bit_set_t
 	count--;
 	page_map.arrayZ[count] = page_map.arrayZ[a];
 	page_at (count).v = op (page_at (a).v, other.page_at (b).v);
+	page_at (count).dirty ();
       }
       else if (page_map.arrayZ[a - 1].major > other.page_map.arrayZ[b - 1].major)
       {
@@ -571,7 +581,7 @@ struct hb_bit_set_t
 	  count--;
 	  page_map.arrayZ[count].major = other.page_map.arrayZ[b].major;
 	  page_map.arrayZ[count].index = next_page++;
-	  page_at (count).v = other.page_at (b).v;
+	  page_at (count) = other.page_at (b);
 	}
       }
     }
@@ -589,7 +599,7 @@ struct hb_bit_set_t
 	count--;
 	page_map.arrayZ[count].major = other.page_map.arrayZ[b].major;
 	page_map.arrayZ[count].index = next_page++;
-	page_at (count).v = other.page_at (b).v;
+	page_at (count) = other.page_at (b);
       }
     assert (!count);
     resize (newCount);

+ 3 - 3
thirdparty/harfbuzz/src/hb-buffer.cc

@@ -499,12 +499,12 @@ hb_buffer_t::set_masks (hb_mask_t    value,
 			unsigned int cluster_start,
 			unsigned int cluster_end)
 {
-  hb_mask_t not_mask = ~mask;
-  value &= mask;
-
   if (!mask)
     return;
 
+  hb_mask_t not_mask = ~mask;
+  value &= mask;
+
   unsigned int count = len;
   for (unsigned int i = 0; i < count; i++)
     if (cluster_start <= info[i].cluster && info[i].cluster < cluster_end)

+ 7 - 1
thirdparty/harfbuzz/src/hb-buffer.hh

@@ -464,13 +464,16 @@ struct hb_buffer_t
 		      start, end,
 		      true);
   }
+#ifndef HB_OPTIMIZE_SIZE
+  HB_ALWAYS_INLINE
+#endif
   void unsafe_to_concat (unsigned int start = 0, unsigned int end = -1)
   {
     if (likely ((flags & HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT) == 0))
       return;
     _set_glyph_flags (HB_GLYPH_FLAG_UNSAFE_TO_CONCAT,
 		      start, end,
-		      true);
+		      false);
   }
   void unsafe_to_break_from_outbuffer (unsigned int start = 0, unsigned int end = -1)
   {
@@ -478,6 +481,9 @@ struct hb_buffer_t
 		      start, end,
 		      true, true);
   }
+#ifndef HB_OPTIMIZE_SIZE
+  HB_ALWAYS_INLINE
+#endif
   void unsafe_to_concat_from_outbuffer (unsigned int start = 0, unsigned int end = -1)
   {
     if (likely ((flags & HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT) == 0))

+ 1 - 1
thirdparty/harfbuzz/src/hb-config.hh

@@ -183,7 +183,7 @@
 #endif
 
 #ifdef HB_OPTIMIZE_SIZE_MORE
-#define HB_NO_OT_LIGATURES_FAST_PATH
+#define HB_NO_OT_RULESETS_FAST_PATH
 #endif
 
 #ifdef HB_MINIMIZE_MEMORY_USAGE

+ 1 - 1
thirdparty/harfbuzz/src/hb-kern.hh

@@ -70,7 +70,7 @@ struct hb_kern_machine_t
 	continue;
       }
 
-      skippy_iter.reset (idx, 1);
+      skippy_iter.reset (idx);
       unsigned unsafe_to;
       if (!skippy_iter.next (&unsafe_to))
       {

+ 0 - 4
thirdparty/harfbuzz/src/hb-machinery.hh

@@ -131,10 +131,6 @@ static inline Type& StructAfter(TObject &X)
   unsigned int get_size () const { return (size - (array).min_size + (array).get_size ()); } \
   DEFINE_SIZE_ARRAY(size, array)
 
-#define DEFINE_SIZE_MAX(size) \
-  DEFINE_INSTANCE_ASSERTION (sizeof (*this) <= (size)) \
-  static constexpr unsigned max_size = (size)
-
 
 
 /*

+ 8 - 6
thirdparty/harfbuzz/src/hb-map.hh

@@ -78,6 +78,10 @@ struct hb_hashmap_t
 		hash (0),
 		value () {}
 
+    // Needed for https://github.com/harfbuzz/harfbuzz/issues/4138
+    K& get_key () { return key; }
+    V& get_value () { return value; }
+
     bool is_used () const { return is_used_; }
     void set_used (bool is_used) { is_used_ = is_used; }
     void set_real (bool is_real) { is_real_ = is_real; }
@@ -405,23 +409,21 @@ struct hb_hashmap_t
   auto keys_ref () const HB_AUTO_RETURN
   (
     + iter_items ()
-    | hb_map (&item_t::key)
+    | hb_map (&item_t::get_key)
   )
   auto keys () const HB_AUTO_RETURN
   (
-    + iter_items ()
-    | hb_map (&item_t::key)
+    + keys_ref ()
     | hb_map (hb_ridentity)
   )
   auto values_ref () const HB_AUTO_RETURN
   (
     + iter_items ()
-    | hb_map (&item_t::value)
+    | hb_map (&item_t::get_value)
   )
   auto values () const HB_AUTO_RETURN
   (
-    + iter_items ()
-    | hb_map (&item_t::value)
+    + values_ref ()
     | hb_map (hb_ridentity)
   )
 

+ 1 - 10
thirdparty/harfbuzz/src/hb-null.hh

@@ -37,7 +37,7 @@
 
 /* Global nul-content Null pool.  Enlarge as necessary. */
 
-#define HB_NULL_POOL_SIZE 520
+#define HB_NULL_POOL_SIZE 640
 
 template <typename T, typename>
 struct _hb_has_min_size : hb_false_type {};
@@ -48,15 +48,6 @@ template <typename T>
 using hb_has_min_size = _hb_has_min_size<T, void>;
 #define hb_has_min_size(T) hb_has_min_size<T>::value
 
-template <typename T, typename>
-struct _hb_has_max_size : hb_false_type {};
-template <typename T>
-struct _hb_has_max_size<T, hb_void_t<decltype (T::max_size)>>
-	: hb_true_type {};
-template <typename T>
-using hb_has_max_size = _hb_has_max_size<T, void>;
-#define hb_has_max_size(T) hb_has_max_size<T>::value
-
 template <typename T, typename>
 struct _hb_has_null_size : hb_false_type {};
 template <typename T>

+ 0 - 24
thirdparty/harfbuzz/src/hb-open-type.hh

@@ -718,30 +718,6 @@ struct ArrayOf
     return_trace (out);
   }
 
-  /* Special-case ArrayOf Offset16To structs with a maximum size. */
-  template <typename T = Type,
-	    typename Base = void,
-	    hb_enable_if (hb_has_max_size (typename T::target_t) &&
-			  sizeof (T) == 2)>
-  HB_ALWAYS_INLINE
-  bool sanitize (hb_sanitize_context_t *c, const Base *base) const
-  {
-    TRACE_SANITIZE (this);
-
-    if (unlikely (!sanitize_shallow (c))) return_trace (false);
-
-    unsigned max_len = 65536 + Type::target_t::max_size;
-
-    if (unlikely (c->check_range_fast (base, max_len)))
-      return_trace (true);
-
-    unsigned int count = len;
-    for (unsigned int i = 0; i < count; i++)
-      if (unlikely (!c->dispatch (arrayZ[i], base)))
-	return_trace (false);
-    return_trace (true);
-  }
-
   template <typename ...Ts>
   HB_ALWAYS_INLINE
   bool sanitize (hb_sanitize_context_t *c, Ts&&... ds) const

+ 2 - 2
thirdparty/harfbuzz/src/hb-ot-cff-common.hh

@@ -94,10 +94,10 @@ struct CFFIndex
     for (const auto &_ : +it)
     {
       unsigned len = _.length;
+      if (!len)
+	continue;
       if (len <= 1)
       {
-        if (!len)
-	  continue;
 	*ret++ = *_.arrayZ;
 	continue;
       }

+ 2 - 0
thirdparty/harfbuzz/src/hb-ot-cff1-table.hh

@@ -1061,6 +1061,8 @@ struct cff1
   template <typename PRIVOPSET, typename PRIVDICTVAL>
   struct accelerator_templ_t
   {
+    static constexpr hb_tag_t tableTag = cff1::tableTag;
+
     accelerator_templ_t (hb_face_t *face)
     {
       if (!face) return;

+ 2 - 0
thirdparty/harfbuzz/src/hb-ot-cff2-table.hh

@@ -390,6 +390,8 @@ struct cff2
   template <typename PRIVOPSET, typename PRIVDICTVAL>
   struct accelerator_templ_t
   {
+    static constexpr hb_tag_t tableTag = cff2::tableTag;
+
     accelerator_templ_t (hb_face_t *face)
     {
       if (!face) return;

+ 3 - 0
thirdparty/harfbuzz/src/hb-ot-hmtx-table.hh

@@ -179,6 +179,7 @@ struct hmtxvmtx
 	lm.advance = mtx.first;
 	lm.sb = mtx.second;
       }
+      // TODO(beyond-64k): This assumes that maxp.numGlyphs is 0xFFFF.
       else if (gid < 0x10000u)
         short_metrics[gid] = mtx.second;
       else
@@ -199,6 +200,8 @@ struct hmtxvmtx
       /* Determine num_long_metrics to encode. */
       auto& plan = c->plan;
 
+      // TODO Don't consider retaingid holes here.
+
       num_long_metrics = hb_min (plan->num_output_glyphs (), 0xFFFFu);
       unsigned int last_advance = get_new_gid_advance_unscaled (plan, mtx_map, num_long_metrics - 1, _mtx);
       while (num_long_metrics > 1 &&

+ 42 - 10
thirdparty/harfbuzz/src/hb-ot-layout-common.hh

@@ -1937,13 +1937,22 @@ struct ClassDefFormat2_4
     {
       /* Match if there's any glyph that is not listed! */
       hb_codepoint_t g = HB_SET_VALUE_INVALID;
-      for (auto &range : rangeRecord)
+      hb_codepoint_t last = HB_SET_VALUE_INVALID;
+      auto it = hb_iter (rangeRecord);
+      for (auto &range : it)
       {
+        if (it->first == last + 1)
+	{
+	  it++;
+	  continue;
+	}
+
 	if (!glyphs->next (&g))
 	  break;
 	if (g < range.first)
 	  return true;
 	g = range.last;
+	last = g;
       }
       if (g != HB_SET_VALUE_INVALID && glyphs->next (&g))
 	return true;
@@ -2928,9 +2937,29 @@ struct ConditionFormat1
     const hb_map_t *index_map = &c->plan->axes_index_map;
     if (index_map->is_empty ()) return_trace (true);
 
-    if (!index_map->has (axisIndex))
+    const hb_map_t& axes_old_index_tag_map = c->plan->axes_old_index_tag_map;
+    hb_codepoint_t *axis_tag;
+    if (!axes_old_index_tag_map.has (axisIndex, &axis_tag) ||
+        !index_map->has (axisIndex))
       return_trace (false);
 
+    const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location = c->plan->axes_location;
+    Triple axis_limit{-1.f, 0.f, 1.f};
+    Triple *normalized_limit;
+    if (normalized_axes_location.has (*axis_tag, &normalized_limit))
+      axis_limit = *normalized_limit;
+
+    const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances = c->plan->axes_triple_distances;
+    TripleDistances axis_triple_distances{1.f, 1.f};
+    TripleDistances *triple_dists;
+    if (axes_triple_distances.has (*axis_tag, &triple_dists))
+      axis_triple_distances = *triple_dists;
+
+    float normalized_min = renormalizeValue (filterRangeMinValue.to_float (), axis_limit, axis_triple_distances, false);
+    float normalized_max = renormalizeValue (filterRangeMaxValue.to_float (), axis_limit, axis_triple_distances, false);
+    out->filterRangeMinValue.set_float (normalized_min);
+    out->filterRangeMaxValue.set_float (normalized_max);
+
     return_trace (c->serializer->check_assign (out->axisIndex, index_map->get (axisIndex),
                                                HB_SERIALIZE_ERROR_INT_OVERFLOW));
   }
@@ -2946,15 +2975,16 @@ struct ConditionFormat1
     hb_tag_t axis_tag = c->axes_index_tag_map->get (axisIndex);
 
     Triple axis_range (-1.f, 0.f, 1.f);
-    if (c->axes_location->has (axis_tag))
-      axis_range = c->axes_location->get (axis_tag);
+    Triple *axis_limit;
+    if (c->axes_location->has (axis_tag, &axis_limit))
+      axis_range = *axis_limit;
 
-    int axis_min_val = axis_range.minimum;
-    int axis_default_val = axis_range.middle;
-    int axis_max_val = axis_range.maximum;
+    float axis_min_val = axis_range.minimum;
+    float axis_default_val = axis_range.middle;
+    float axis_max_val = axis_range.maximum;
 
-    int16_t filter_min_val = filterRangeMinValue.to_int ();
-    int16_t filter_max_val = filterRangeMaxValue.to_int ();
+    float filter_min_val = filterRangeMinValue.to_float ();
+    float filter_max_val = filterRangeMaxValue.to_float ();
 
     if (axis_default_val < filter_min_val ||
         axis_default_val > filter_max_val)
@@ -2974,7 +3004,9 @@ struct ConditionFormat1
     {
       // add axisIndex->value into the hashmap so we can check if the record is
       // unique with variations
-      hb_codepoint_t val = (filter_max_val << 16) + filter_min_val;
+      int16_t int_filter_max_val = filterRangeMaxValue.to_int ();
+      int16_t int_filter_min_val = filterRangeMinValue.to_int ();
+      hb_codepoint_t val = (int_filter_max_val << 16) + int_filter_min_val;
 
       condition_map->set (axisIndex, val);
       return KEEP_COND_WITH_VAR;

+ 293 - 94
thirdparty/harfbuzz/src/hb-ot-layout-gsubgpos.hh

@@ -402,16 +402,6 @@ struct hb_ot_apply_context_t :
 {
   struct matcher_t
   {
-    matcher_t () :
-	     lookup_props (0),
-	     mask (-1),
-	     ignore_zwnj (false),
-	     ignore_zwj (false),
-	     per_syllable (false),
-	     syllable {0},
-	     match_func (nullptr),
-	     match_data (nullptr) {}
-
     typedef bool (*match_func_t) (hb_glyph_info_t &info, unsigned value, const void *data);
 
     void set_ignore_zwnj (bool ignore_zwnj_) { ignore_zwnj = ignore_zwnj_; }
@@ -470,14 +460,14 @@ struct hb_ot_apply_context_t :
     }
 
     protected:
-    unsigned int lookup_props;
-    hb_mask_t mask;
-    bool ignore_zwnj;
-    bool ignore_zwj;
-    bool per_syllable;
-    uint8_t syllable;
-    match_func_t match_func;
-    const void *match_data;
+    unsigned int lookup_props = 0;
+    hb_mask_t mask = -1;
+    bool ignore_zwnj = false;
+    bool ignore_zwj = false;
+    bool per_syllable = false;
+    uint8_t syllable = 0;
+    match_func_t match_func = nullptr;
+    const void *match_data = nullptr;
   };
 
   struct skipping_iterator_t
@@ -528,11 +518,9 @@ struct hb_ot_apply_context_t :
 #ifndef HB_OPTIMIZE_SIZE
     HB_ALWAYS_INLINE
 #endif
-    void reset (unsigned int start_index_,
-		unsigned int num_items_)
+    void reset (unsigned int start_index_)
     {
       idx = start_index_;
-      num_items = num_items_;
       end = c->buffer->len;
       matcher.set_syllable (start_index_ == c->buffer->idx ? c->buffer->cur().syllable () : 0);
     }
@@ -540,17 +528,14 @@ struct hb_ot_apply_context_t :
 #ifndef HB_OPTIMIZE_SIZE
     HB_ALWAYS_INLINE
 #endif
-    void reset_fast (unsigned int start_index_,
-		     unsigned int num_items_)
+    void reset_fast (unsigned int start_index_)
     {
       // Doesn't set end or syllable. Used by GPOS which doesn't care / change.
       idx = start_index_;
-      num_items = num_items_;
     }
 
     void reject ()
     {
-      num_items++;
       backup_glyph_data ();
     }
 
@@ -593,12 +578,7 @@ struct hb_ot_apply_context_t :
 #endif
     bool next (unsigned *unsafe_to = nullptr)
     {
-      assert (num_items > 0);
-      /* The alternate condition below is faster at string boundaries,
-       * but produces subpar "unsafe-to-concat" values. */
-      signed stop = (signed) end - (signed) num_items;
-      if (c->buffer->flags & HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT)
-        stop = (signed) end - 1;
+      const signed stop = (signed) end - 1;
       while ((signed) idx < stop)
       {
 	idx++;
@@ -606,7 +586,6 @@ struct hb_ot_apply_context_t :
 	{
 	  case MATCH:
 	  {
-	    num_items--;
 	    advance_glyph_data ();
 	    return true;
 	  }
@@ -629,12 +608,7 @@ struct hb_ot_apply_context_t :
 #endif
     bool prev (unsigned *unsafe_from = nullptr)
     {
-      assert (num_items > 0);
-      /* The alternate condition below is faster at string boundaries,
-       * but produces subpar "unsafe-to-concat" values. */
-      unsigned stop = num_items - 1;
-      if (c->buffer->flags & HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT)
-        stop = 1 - 1;
+      const unsigned stop = 0;
       while (idx > stop)
       {
 	idx--;
@@ -642,7 +616,6 @@ struct hb_ot_apply_context_t :
 	{
 	  case MATCH:
 	  {
-	    num_items--;
 	    advance_glyph_data ();
 	    return true;
 	  }
@@ -661,6 +634,7 @@ struct hb_ot_apply_context_t :
       return false;
     }
 
+    HB_ALWAYS_INLINE
     hb_codepoint_t
     get_glyph_data ()
     {
@@ -671,6 +645,7 @@ struct hb_ot_apply_context_t :
 #endif
       return 0;
     }
+    HB_ALWAYS_INLINE
     void
     advance_glyph_data ()
     {
@@ -699,7 +674,6 @@ struct hb_ot_apply_context_t :
     const HBUINT24 *match_glyph_data24;
 #endif
 
-    unsigned int num_items;
     unsigned int end;
   };
 
@@ -826,7 +800,7 @@ struct hb_ot_apply_context_t :
      * match_props has the set index.
      */
     if (match_props & LookupFlag::UseMarkFilteringSet)
-      return gdef.mark_set_covers (match_props >> 16, glyph);
+      return gdef_accel.mark_set_covers (match_props >> 16, glyph);
 
     /* The second byte of match_props has the meaning
      * "ignore marks of attachment type different than
@@ -1198,6 +1172,10 @@ static inline void collect_array (hb_collect_glyphs_context_t *c HB_UNUSED,
 }
 
 
+static inline bool match_always (hb_glyph_info_t &info HB_UNUSED, unsigned value HB_UNUSED, const void *data HB_UNUSED)
+{
+  return true;
+}
 static inline bool match_glyph (hb_glyph_info_t &info, unsigned value, const void *data HB_UNUSED)
 {
   return info.codepoint == value;
@@ -1218,6 +1196,28 @@ static inline bool match_class_cached (hb_glyph_info_t &info, unsigned value, co
     info.syllable() = klass;
   return klass == value;
 }
+static inline bool match_class_cached1 (hb_glyph_info_t &info, unsigned value, const void *data)
+{
+  unsigned klass = info.syllable() & 0x0F;
+  if (klass < 15)
+    return klass == value;
+  const ClassDef &class_def = *reinterpret_cast<const ClassDef *>(data);
+  klass = class_def.get_class (info.codepoint);
+  if (likely (klass < 15))
+    info.syllable() = (info.syllable() & 0xF0) | klass;
+  return klass == value;
+}
+static inline bool match_class_cached2 (hb_glyph_info_t &info, unsigned value, const void *data)
+{
+  unsigned klass = (info.syllable() & 0xF0) >> 4;
+  if (klass < 15)
+    return klass == value;
+  const ClassDef &class_def = *reinterpret_cast<const ClassDef *>(data);
+  klass = class_def.get_class (info.codepoint);
+  if (likely (klass < 15))
+    info.syllable() = (info.syllable() & 0x0F) | (klass << 4);
+  return klass == value;
+}
 static inline bool match_coverage (hb_glyph_info_t &info, unsigned value, const void *data)
 {
   Offset16To<Coverage> coverage;
@@ -1265,7 +1265,7 @@ static bool match_input (hb_ot_apply_context_t *c,
   hb_buffer_t *buffer = c->buffer;
 
   hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
-  skippy_iter.reset (buffer->idx, count - 1);
+  skippy_iter.reset (buffer->idx);
   skippy_iter.set_match_func (match_func, match_data);
   skippy_iter.set_glyph_data (input);
 
@@ -1505,7 +1505,7 @@ static bool match_backtrack (hb_ot_apply_context_t *c,
   TRACE_APPLY (nullptr);
 
   hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_context;
-  skippy_iter.reset (c->buffer->backtrack_len (), count);
+  skippy_iter.reset (c->buffer->backtrack_len ());
   skippy_iter.set_match_func (match_func, match_data);
   skippy_iter.set_glyph_data (backtrack);
 
@@ -1538,7 +1538,7 @@ static bool match_lookahead (hb_ot_apply_context_t *c,
   TRACE_APPLY (nullptr);
 
   hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_context;
-  skippy_iter.reset (start_index - 1, count);
+  skippy_iter.reset (start_index - 1);
   skippy_iter.set_match_func (match_func, match_data);
   skippy_iter.set_glyph_data (lookahead);
 
@@ -1913,12 +1913,13 @@ static inline bool context_would_apply_lookup (hb_would_apply_context_t *c,
 }
 
 template <typename HBUINT>
-static inline bool context_apply_lookup (hb_ot_apply_context_t *c,
-					 unsigned int inputCount, /* Including the first glyph (not matched) */
-					 const HBUINT input[], /* Array of input values--start with second glyph */
-					 unsigned int lookupCount,
-					 const LookupRecord lookupRecord[],
-					 const ContextApplyLookupContext &lookup_context)
+HB_ALWAYS_INLINE
+static bool context_apply_lookup (hb_ot_apply_context_t *c,
+				  unsigned int inputCount, /* Including the first glyph (not matched) */
+				  const HBUINT input[], /* Array of input values--start with second glyph */
+				  unsigned int lookupCount,
+				  const LookupRecord lookupRecord[],
+				  const ContextApplyLookupContext &lookup_context)
 {
   unsigned match_end = 0;
   unsigned match_positions[HB_MAX_CONTEXT_LENGTH];
@@ -1944,6 +1945,9 @@ static inline bool context_apply_lookup (hb_ot_apply_context_t *c,
 template <typename Types>
 struct Rule
 {
+  template <typename T>
+  friend struct RuleSet;
+
   bool intersects (const hb_set_t *glyphs, ContextClosureLookupContext &lookup_context) const
   {
     return context_intersects (glyphs,
@@ -2065,7 +2069,6 @@ struct Rule
 					 * design order */
   public:
   DEFINE_SIZE_ARRAY (4, inputZ);
-  DEFINE_SIZE_MAX (65536 * (Types::HBUINT::static_size + LookupRecord::static_size));
 };
 
 template <typename Types>
@@ -2131,13 +2134,105 @@ struct RuleSet
 	      const ContextApplyLookupContext &lookup_context) const
   {
     TRACE_APPLY (this);
-    return_trace (
-    + hb_iter (rule)
-    | hb_map (hb_add (this))
-    | hb_map ([&] (const Rule &_) { return _.apply (c, lookup_context); })
-    | hb_any
-    )
-    ;
+
+    unsigned num_rules = rule.len;
+
+#ifndef HB_NO_OT_RULESETS_FAST_PATH
+    if (HB_OPTIMIZE_SIZE_VAL || num_rules <= 4)
+#endif
+    {
+    slow:
+      return_trace (
+      + hb_iter (rule)
+      | hb_map (hb_add (this))
+      | hb_map ([&] (const Rule &_) { return _.apply (c, lookup_context); })
+      | hb_any
+      )
+      ;
+    }
+
+    /* This version is optimized for speed by matching the first & second
+     * components of the rule here, instead of calling into the matching code.
+     *
+     * Replicated from LigatureSet::apply(). */
+
+    hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
+    skippy_iter.reset (c->buffer->idx);
+    skippy_iter.set_match_func (match_always, nullptr);
+    skippy_iter.set_glyph_data ((HBUINT16 *) nullptr);
+    unsigned unsafe_to = (unsigned) -1, unsafe_to1 = 0, unsafe_to2 = 0;
+    hb_glyph_info_t *first = nullptr, *second = nullptr;
+    bool matched = skippy_iter.next ();
+    if (likely (matched))
+    {
+      first = &c->buffer->info[skippy_iter.idx];
+      unsafe_to = skippy_iter.idx + 1;
+
+      if (skippy_iter.may_skip (c->buffer->info[skippy_iter.idx]))
+      {
+	/* Can't use the fast path if eg. the next char is a default-ignorable
+	 * or other skippable. */
+        goto slow;
+      }
+    }
+    else
+    {
+      /* Failed to match a next glyph. Only try applying rules that have
+       * no further input. */
+      return_trace (
+      + hb_iter (rule)
+      | hb_map (hb_add (this))
+      | hb_filter ([&] (const Rule &_) { return _.inputCount <= 1; })
+      | hb_map ([&] (const Rule &_) { return _.apply (c, lookup_context); })
+      | hb_any
+      )
+      ;
+    }
+    matched = skippy_iter.next ();
+    if (likely (matched && !skippy_iter.may_skip (c->buffer->info[skippy_iter.idx])))
+    {
+      second = &c->buffer->info[skippy_iter.idx];
+      unsafe_to2 = skippy_iter.idx + 1;
+    }
+
+    auto match_input = lookup_context.funcs.match;
+    auto *input_data = lookup_context.match_data;
+    for (unsigned int i = 0; i < num_rules; i++)
+    {
+      const auto &r = this+rule.arrayZ[i];
+
+      const auto &input = r.inputZ;
+
+      if (r.inputCount <= 1 ||
+	  (!match_input ||
+	   match_input (*first, input.arrayZ[0], input_data)))
+      {
+        if (!second ||
+	    (r.inputCount <= 2 ||
+	     (!match_input ||
+	      match_input (*second, input.arrayZ[1], input_data)))
+	   )
+	{
+	  if (r.apply (c, lookup_context))
+	  {
+	    if (unsafe_to != (unsigned) -1)
+	      c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
+	    return_trace (true);
+	  }
+	}
+	else
+	  unsafe_to = unsafe_to2;
+      }
+      else
+      {
+	if (unsafe_to == (unsigned) -1)
+	  unsafe_to = unsafe_to1;
+      }
+    }
+    if (likely (unsafe_to != (unsigned) -1))
+      c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
+
+    return_trace (false);
   }
 
   bool subset (hb_subset_context_t *c,
@@ -2516,11 +2611,7 @@ struct ContextFormat2_5
     if (cached && c->buffer->cur().syllable() < 255)
       index = c->buffer->cur().syllable ();
     else
-    {
       index = class_def.get_class (c->buffer->cur().codepoint);
-      if (cached && index < 255)
-	c->buffer->cur().syllable() = index;
-    }
     const RuleSet &rule_set = this+ruleSet[index];
     return_trace (rule_set.apply (c, lookup_context));
   }
@@ -2914,16 +3005,17 @@ static inline bool chain_context_would_apply_lookup (hb_would_apply_context_t *c
 }
 
 template <typename HBUINT>
-static inline bool chain_context_apply_lookup (hb_ot_apply_context_t *c,
-					       unsigned int backtrackCount,
-					       const HBUINT backtrack[],
-					       unsigned int inputCount, /* Including the first glyph (not matched) */
-					       const HBUINT input[], /* Array of input values--start with second glyph */
-					       unsigned int lookaheadCount,
-					       const HBUINT lookahead[],
-					       unsigned int lookupCount,
-					       const LookupRecord lookupRecord[],
-					       const ChainContextApplyLookupContext &lookup_context)
+HB_ALWAYS_INLINE
+static bool chain_context_apply_lookup (hb_ot_apply_context_t *c,
+					unsigned int backtrackCount,
+					const HBUINT backtrack[],
+					unsigned int inputCount, /* Including the first glyph (not matched) */
+					const HBUINT input[], /* Array of input values--start with second glyph */
+					unsigned int lookaheadCount,
+					const HBUINT lookahead[],
+					unsigned int lookupCount,
+					const LookupRecord lookupRecord[],
+					const ChainContextApplyLookupContext &lookup_context)
 {
   unsigned end_index = c->buffer->idx;
   unsigned match_end = 0;
@@ -2962,6 +3054,9 @@ static inline bool chain_context_apply_lookup (hb_ot_apply_context_t *c,
 template <typename Types>
 struct ChainRule
 {
+  template <typename T>
+  friend struct ChainRuleSet;
+
   bool intersects (const hb_set_t *glyphs, ChainContextClosureLookupContext &lookup_context) const
   {
     const auto &input = StructAfter<decltype (inputX)> (backtrack);
@@ -3148,7 +3243,6 @@ struct ChainRule
 					 * design order) */
   public:
   DEFINE_SIZE_MIN (8);
-  DEFINE_SIZE_MAX (65536 * (3 * Types::HBUINT::static_size + LookupRecord::static_size));
 };
 
 template <typename Types>
@@ -3211,13 +3305,119 @@ struct ChainRuleSet
 	      const ChainContextApplyLookupContext &lookup_context) const
   {
     TRACE_APPLY (this);
-    return_trace (
-    + hb_iter (rule)
-    | hb_map (hb_add (this))
-    | hb_map ([&] (const ChainRule &_) { return _.apply (c, lookup_context); })
-    | hb_any
-    )
-    ;
+
+    unsigned num_rules = rule.len;
+
+#ifndef HB_NO_OT_RULESETS_FAST_PATH
+    if (HB_OPTIMIZE_SIZE_VAL || num_rules <= 4)
+#endif
+    {
+    slow:
+      return_trace (
+      + hb_iter (rule)
+      | hb_map (hb_add (this))
+      | hb_map ([&] (const ChainRule &_) { return _.apply (c, lookup_context); })
+      | hb_any
+      )
+      ;
+    }
+
+    /* This version is optimized for speed by matching the first & second
+     * components of the rule here, instead of calling into the matching code.
+     *
+     * Replicated from LigatureSet::apply(). */
+
+    hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
+    skippy_iter.reset (c->buffer->idx);
+    skippy_iter.set_match_func (match_always, nullptr);
+    skippy_iter.set_glyph_data ((HBUINT16 *) nullptr);
+    unsigned unsafe_to = (unsigned) -1, unsafe_to1 = 0, unsafe_to2 = 0;
+    hb_glyph_info_t *first = nullptr, *second = nullptr;
+    bool matched = skippy_iter.next ();
+    if (likely (matched))
+    {
+      first = &c->buffer->info[skippy_iter.idx];
+      unsafe_to1 = skippy_iter.idx + 1;
+
+      if (skippy_iter.may_skip (c->buffer->info[skippy_iter.idx]))
+      {
+	/* Can't use the fast path if eg. the next char is a default-ignorable
+	 * or other skippable. */
+        goto slow;
+      }
+    }
+    else
+    {
+      /* Failed to match a next glyph. Only try applying rules that have
+       * no further input and lookahead. */
+      return_trace (
+      + hb_iter (rule)
+      | hb_map (hb_add (this))
+      | hb_filter ([&] (const ChainRule &_)
+		   {
+		     const auto &input = StructAfter<decltype (_.inputX)> (_.backtrack);
+		     const auto &lookahead = StructAfter<decltype (_.lookaheadX)> (input);
+		     return input.lenP1 <= 1 && lookahead.len == 0;
+		   })
+      | hb_map ([&] (const ChainRule &_) { return _.apply (c, lookup_context); })
+      | hb_any
+      )
+      ;
+    }
+    matched = skippy_iter.next ();
+    if (likely (matched && !skippy_iter.may_skip (c->buffer->info[skippy_iter.idx])))
+     {
+      second = &c->buffer->info[skippy_iter.idx];
+      unsafe_to2 = skippy_iter.idx + 1;
+     }
+
+    auto match_input = lookup_context.funcs.match[1];
+    auto match_lookahead = lookup_context.funcs.match[2];
+    auto *input_data = lookup_context.match_data[1];
+    auto *lookahead_data = lookup_context.match_data[2];
+    for (unsigned int i = 0; i < num_rules; i++)
+    {
+      const auto &r = this+rule.arrayZ[i];
+
+      const auto &input = StructAfter<decltype (r.inputX)> (r.backtrack);
+      const auto &lookahead = StructAfter<decltype (r.lookaheadX)> (input);
+
+      unsigned lenP1 = hb_max ((unsigned) input.lenP1, 1u);
+      if (lenP1 > 1 ?
+	   (!match_input ||
+	    match_input (*first, input.arrayZ[0], input_data))
+	  :
+	   (!lookahead.len || !match_lookahead ||
+	    match_lookahead (*first, lookahead.arrayZ[0], lookahead_data)))
+      {
+        if (!second ||
+	    (lenP1 > 2 ?
+	     (!match_input ||
+	      match_input (*second, input.arrayZ[1], input_data))
+	     :
+	     (lookahead.len <= 2 - lenP1 || !match_lookahead ||
+	      match_lookahead (*second, lookahead.arrayZ[2 - lenP1], lookahead_data))))
+	{
+	  if (r.apply (c, lookup_context))
+	  {
+	    if (unsafe_to != (unsigned) -1)
+	      c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
+	    return_trace (true);
+	  }
+	}
+	else
+	  unsafe_to = unsafe_to2;
+      }
+      else
+      {
+	if (unsafe_to == (unsigned) -1)
+	  unsafe_to = unsafe_to1;
+      }
+    }
+    if (likely (unsafe_to != (unsigned) -1))
+      c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
+
+    return_trace (false);
   }
 
   bool subset (hb_subset_context_t *c,
@@ -3616,26 +3816,22 @@ struct ChainContextFormat2_5
     const ClassDef &input_class_def = this+inputClassDef;
     const ClassDef &lookahead_class_def = this+lookaheadClassDef;
 
-    /* For ChainContextFormat2_5 we cache the LookaheadClassDef instead of InputClassDef.
-     * The reason is that most heavy fonts want to identify a glyph in context and apply
-     * a lookup to it. In this scenario, the length of the input sequence is one, whereas
-     * the lookahead / backtrack are typically longer.  The one glyph in input sequence is
-     * looked-up below and no input glyph is looked up in individual rules, whereas the
-     * lookahead and backtrack glyphs are tried.  Since we match lookahead before backtrack,
-     * we should cache lookahead.  This decisions showed a 20% improvement in shaping of
-     * the Gulzar font.
-     */
-
+    /* match_class_caches1 is slightly faster. Use it for lookahead,
+     * which is typically longer. */
     struct ChainContextApplyLookupContext lookup_context = {
-      {{cached && &backtrack_class_def == &lookahead_class_def ? match_class_cached : match_class,
-        cached && &input_class_def == &lookahead_class_def ? match_class_cached : match_class,
-        cached ? match_class_cached : match_class}},
+      {{cached && &backtrack_class_def == &lookahead_class_def ? match_class_cached1 : match_class,
+        cached ? match_class_cached2 : match_class,
+        cached ? match_class_cached1 : match_class}},
       {&backtrack_class_def,
        &input_class_def,
        &lookahead_class_def}
     };
 
-    index = input_class_def.get_class (c->buffer->cur().codepoint);
+    // Note: Corresponds to match_class_cached2
+    if (cached && ((c->buffer->cur().syllable() & 0xF0) >> 4) < 15)
+      index = (c->buffer->cur().syllable () & 0xF0) >> 4;
+    else
+      index = input_class_def.get_class (c->buffer->cur().codepoint);
     const ChainRuleSet &rule_set = this+ruleSet[index];
     return_trace (rule_set.apply (c, lookup_context));
   }
@@ -4139,6 +4335,9 @@ struct hb_ot_layout_lookup_accelerator_t
   bool may_have (hb_codepoint_t g) const
   { return digest.may_have (g); }
 
+#ifndef HB_OPTIMIZE_SIZE
+  HB_ALWAYS_INLINE
+#endif
   bool apply (hb_ot_apply_context_t *c, unsigned subtables_count, bool use_cache) const
   {
 #ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE

+ 39 - 1
thirdparty/harfbuzz/src/hb-ot-layout.cc

@@ -1241,7 +1241,7 @@ script_collect_features (hb_collect_features_context_t *c,
  *   terminated by %HB_TAG_NONE
  * @features: (nullable) (array zero-terminated=1): The array of features to collect,
  *   terminated by %HB_TAG_NONE
- * @feature_indexes: (out): The array of feature indexes found for the query
+ * @feature_indexes: (out): The set of feature indexes found for the query
  *
  * Fetches a list of all feature indexes in the specified face's GSUB table
  * or GPOS table, underneath the specified scripts, languages, and features.
@@ -1282,6 +1282,44 @@ hb_ot_layout_collect_features (hb_face_t      *face,
   }
 }
 
+/**
+ * hb_ot_layout_collect_features_map:
+ * @face: #hb_face_t to work upon
+ * @table_tag: #HB_OT_TAG_GSUB or #HB_OT_TAG_GPOS
+ * @script_index: The index of the requested script tag
+ * @language_index: The index of the requested language tag
+ * @feature_map: (out): The map of feature tag to feature index.
+ *
+ * Fetches the mapping from feature tags to feature indexes for
+ * the specified script and language.
+ *
+ * Since: 8.1.0
+ **/
+void
+hb_ot_layout_collect_features_map (hb_face_t      *face,
+				   hb_tag_t        table_tag,
+				   unsigned        script_index,
+				   unsigned        language_index,
+				   hb_map_t       *feature_map /* OUT */)
+{
+  const OT::GSUBGPOS &g = get_gsubgpos_table (face, table_tag);
+  const OT::LangSys &l = g.get_script (script_index).get_lang_sys (language_index);
+
+  unsigned int count = l.get_feature_indexes (0, nullptr, nullptr);
+  feature_map->alloc (count);
+
+  for (unsigned int i = 0; i < count; i++)
+  {
+    unsigned feature_index = 0;
+    unsigned feature_count = 1;
+    l.get_feature_indexes (i, &feature_count, &feature_index);
+    if (!feature_count)
+      break;
+    hb_tag_t feature_tag = g.get_feature_tag (feature_index);
+    feature_map->set (feature_tag, feature_index);
+  }
+}
+
 
 /**
  * hb_ot_layout_collect_lookups:

+ 7 - 0
thirdparty/harfbuzz/src/hb-ot-layout.h

@@ -324,6 +324,13 @@ hb_ot_layout_collect_features (hb_face_t      *face,
 			       const hb_tag_t *features,
 			       hb_set_t       *feature_indexes /* OUT */);
 
+HB_EXTERN void
+hb_ot_layout_collect_features_map (hb_face_t      *face,
+				   hb_tag_t        table_tag,
+				   unsigned        script_index,
+				   unsigned        language_index,
+				   hb_map_t       *feature_map /* OUT */);
+
 HB_EXTERN void
 hb_ot_layout_collect_lookups (hb_face_t      *face,
 			      hb_tag_t        table_tag,

+ 15 - 7
thirdparty/harfbuzz/src/hb-ot-map.cc

@@ -239,6 +239,13 @@ hb_ot_map_builder_t::compile (hb_ot_map_t                  &m,
     feature_infos.shrink (j + 1);
   }
 
+  hb_map_t feature_indices[2];
+  for (unsigned int table_index = 0; table_index < 2; table_index++)
+    hb_ot_layout_collect_features_map (face,
+				       table_tags[table_index],
+				       script_index[table_index],
+				       language_index[table_index],
+				       &feature_indices[table_index]);
 
   /* Allocate bits now */
   static_assert ((!(HB_GLYPH_FLAG_DEFINED & (HB_GLYPH_FLAG_DEFINED + 1))), "");
@@ -261,7 +268,6 @@ hb_ot_map_builder_t::compile (hb_ot_map_t                  &m,
     if (!info->max_value || next_bit + bits_needed >= global_bit_shift)
       continue; /* Feature disabled, or not enough bits. */
 
-
     bool found = false;
     unsigned int feature_index[2];
     for (unsigned int table_index = 0; table_index < 2; table_index++)
@@ -269,12 +275,14 @@ hb_ot_map_builder_t::compile (hb_ot_map_t                  &m,
       if (required_feature_tag[table_index] == info->tag)
 	required_feature_stage[table_index] = info->stage[table_index];
 
-      found |= (bool) hb_ot_layout_language_find_feature (face,
-							  table_tags[table_index],
-							  script_index[table_index],
-							  language_index[table_index],
-							  info->tag,
-							  &feature_index[table_index]);
+      hb_codepoint_t *index;
+      if (feature_indices[table_index].has (info->tag, &index))
+      {
+        feature_index[table_index] = *index;
+        found = true;
+      }
+      else
+        feature_index[table_index] = HB_OT_LAYOUT_NO_FEATURE_INDEX;
     }
     if (!found && (info->flags & F_GLOBAL_SEARCH))
     {

+ 18 - 1
thirdparty/harfbuzz/src/hb-ot-shape.cc

@@ -476,9 +476,18 @@ hb_set_unicode_props (hb_buffer_t *buffer)
   {
     _hb_glyph_info_set_unicode_props (&info[i], buffer);
 
+    unsigned gen_cat = _hb_glyph_info_get_general_category (&info[i]);
+    if (FLAG_UNSAFE (gen_cat) &
+	(FLAG (HB_UNICODE_GENERAL_CATEGORY_LOWERCASE_LETTER) |
+	 FLAG (HB_UNICODE_GENERAL_CATEGORY_UPPERCASE_LETTER) |
+	 FLAG (HB_UNICODE_GENERAL_CATEGORY_TITLECASE_LETTER) |
+	 FLAG (HB_UNICODE_GENERAL_CATEGORY_OTHER_LETTER) |
+	 FLAG (HB_UNICODE_GENERAL_CATEGORY_SPACE_SEPARATOR)))
+      continue;
+
     /* Marks are already set as continuation by the above line.
      * Handle Emoji_Modifier and ZWJ-continuation. */
-    if (unlikely (_hb_glyph_info_get_general_category (&info[i]) == HB_UNICODE_GENERAL_CATEGORY_MODIFIER_SYMBOL &&
+    if (unlikely (gen_cat == HB_UNICODE_GENERAL_CATEGORY_MODIFIER_SYMBOL &&
 		  hb_in_range<hb_codepoint_t> (info[i].codepoint, 0x1F3FBu, 0x1F3FFu)))
     {
       _hb_glyph_info_set_continuation (&info[i]);
@@ -756,6 +765,14 @@ hb_ot_shape_setup_masks_fraction (const hb_ot_shape_context_t *c)
 	     _hb_glyph_info_get_general_category (&info[end]) ==
 	     HB_UNICODE_GENERAL_CATEGORY_DECIMAL_NUMBER)
 	end++;
+      if (start == i || end == i + 1)
+      {
+        if (start == i)
+	  buffer->unsafe_to_concat (start, start + 1);
+	if (end == i + 1)
+	  buffer->unsafe_to_concat (end - 1, end);
+	continue;
+      }
 
       buffer->unsafe_to_break (start, end);
 

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 603 - 736
thirdparty/harfbuzz/src/hb-ot-shaper-use-machine.hh


+ 164 - 0
thirdparty/harfbuzz/src/hb-ot-var-avar-table.hh

@@ -72,6 +72,65 @@ struct AxisValueMap
     return_trace (c->check_struct (this));
   }
 
+  void set_mapping (float from_coord, float to_coord)
+  {
+    coords[0].set_float (from_coord);
+    coords[1].set_float (to_coord);
+  }
+
+  bool is_outside_axis_range (const Triple& axis_range) const
+  {
+    float from_coord = coords[0].to_float ();
+    return !axis_range.contains (from_coord);
+  }
+
+  bool must_include () const
+  {
+    float from_coord = coords[0].to_float ();
+    float to_coord = coords[1].to_float ();
+    return (from_coord == -1.f && to_coord == -1.f) ||
+           (from_coord == 0.f && to_coord == 0.f) ||
+           (from_coord == 1.f && to_coord == 1.f);
+  }
+
+  void instantiate (const Triple& axis_range,
+                    const Triple& unmapped_range,
+                    const TripleDistances& triple_distances)
+  {
+    float from_coord = coords[0].to_float ();
+    float to_coord = coords[1].to_float ();
+
+    from_coord = renormalizeValue (from_coord, unmapped_range, triple_distances);
+    to_coord = renormalizeValue (to_coord, axis_range, triple_distances);
+
+    coords[0].set_float (from_coord);
+    coords[1].set_float (to_coord);
+  }
+
+  HB_INTERNAL static int cmp (const void *pa, const void *pb)
+  {
+    const AxisValueMap *a = (const AxisValueMap *) pa;
+    const AxisValueMap *b = (const AxisValueMap *) pb;
+
+    int a_from = a->coords[0].to_int ();
+    int b_from = b->coords[0].to_int ();
+    if (a_from != b_from)
+      return a_from - b_from;
+
+    /* this should never be reached. according to the spec, all of the axis
+     * value map records for a given axis must have different fromCoord values
+     * */
+    int a_to = a->coords[1].to_int ();
+    int b_to = b->coords[1].to_int ();
+    return a_to - b_to;
+  }
+
+  bool serialize (hb_serialize_context_t *c) const
+  {
+    TRACE_SERIALIZE (this);
+    return_trace (c->embed (this));
+  }
+
   public:
   F2DOT14	coords[2];
 //   F2DOT14	fromCoord;	/* A normalized coordinate value obtained using
@@ -122,6 +181,78 @@ struct SegmentMaps : Array16Of<AxisValueMap>
 
   int unmap (int value) const { return map (value, 1, 0); }
 
+  Triple unmap_axis_range (const Triple& axis_range) const
+  {
+    F2DOT14 val, unmapped_val;
+
+    val.set_float (axis_range.minimum);
+    unmapped_val.set_int (unmap (val.to_int ()));
+    float unmapped_min = unmapped_val.to_float ();
+
+    val.set_float (axis_range.middle);
+    unmapped_val.set_int (unmap (val.to_int ()));
+    float unmapped_middle = unmapped_val.to_float ();
+
+    val.set_float (axis_range.maximum);
+    unmapped_val.set_int (unmap (val.to_int ()));
+    float unmapped_max = unmapped_val.to_float ();
+
+    return Triple{unmapped_min, unmapped_middle, unmapped_max};
+  }
+
+  bool subset (hb_subset_context_t *c, hb_tag_t axis_tag) const
+  {
+    TRACE_SUBSET (this);
+    /* avar mapped normalized axis range*/
+    Triple *axis_range;
+    if (!c->plan->axes_location.has (axis_tag, &axis_range))
+      return c->serializer->embed (*this);
+
+    TripleDistances *axis_triple_distances;
+    if (!c->plan->axes_triple_distances.has (axis_tag, &axis_triple_distances))
+      return_trace (false);
+
+    auto *out = c->serializer->start_embed (this);
+    if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
+
+    Triple unmapped_range = unmap_axis_range (*axis_range);
+
+    /* create a vector of retained mappings and sort */
+    hb_vector_t<AxisValueMap> value_mappings;
+    for (const auto& _ : as_array ())
+    {
+      if (_.is_outside_axis_range (unmapped_range))
+        continue;
+      AxisValueMap mapping;
+      mapping = _;
+      mapping.instantiate (*axis_range, unmapped_range, *axis_triple_distances);
+      /* (-1, -1), (0, 0), (1, 1) mappings will be added later, so avoid
+       * duplicates here */
+      if (mapping.must_include ())
+        continue;
+      value_mappings.push (std::move (mapping));
+    }
+
+    AxisValueMap m;
+    m.set_mapping (-1.f, -1.f);
+    value_mappings.push (m);
+
+    m.set_mapping (0.f, 0.f);
+    value_mappings.push (m);
+
+    m.set_mapping (1.f, 1.f);
+    value_mappings.push (m);
+
+    value_mappings.qsort ();
+
+    for (const auto& _ : value_mappings)
+    {
+      if (!_.serialize (c->serializer))
+        return_trace (false);
+    }
+    return_trace (c->serializer->check_assign (out->len, value_mappings.length, HB_SERIALIZE_ERROR_INT_OVERFLOW));
+  }
+
   public:
   DEFINE_SIZE_ARRAY (2, *this);
 };
@@ -225,6 +356,39 @@ struct avar
     }
   }
 
+  bool subset (hb_subset_context_t *c) const
+  {
+    TRACE_SUBSET (this);
+    unsigned retained_axis_count = c->plan->axes_index_map.get_population ();
+    if (!retained_axis_count) //all axes are pinned/dropped
+      return_trace (false);
+
+    avar *out = c->serializer->allocate_min<avar> ();
+    if (unlikely (!out)) return_trace (false);
+
+    out->version.major = 1;
+    out->version.minor = 0;
+    if (!c->serializer->check_assign (out->axisCount, retained_axis_count, HB_SERIALIZE_ERROR_INT_OVERFLOW))
+      return_trace (false);
+
+    const hb_map_t& axes_index_map = c->plan->axes_index_map;
+    const SegmentMaps *map = &firstAxisSegmentMaps;
+    unsigned count = axisCount;
+    for (unsigned int i = 0; i < count; i++)
+    {
+      if (axes_index_map.has (i))
+      {
+        hb_tag_t *axis_tag;
+        if (!c->plan->axes_old_index_tag_map.has (i, &axis_tag))
+          return_trace (false);
+        if (!map->subset (c, *axis_tag))
+          return_trace (false);
+      }
+      map = &StructAfter<SegmentMaps> (*map);
+    }
+    return_trace (true);
+  }
+
   protected:
   FixedVersion<>version;	/* Version of the avar table
 				 * initially set to 0x00010000u */

+ 49 - 56
thirdparty/harfbuzz/src/hb-ot-var-common.hh

@@ -60,18 +60,21 @@ struct DeltaSetIndexMapFormat01
 
     entryFormat = ((width-1)<<4)|(inner_bit_count-1);
     mapCount = output_map.length;
-    HBUINT8 *p = c->allocate_size<HBUINT8> (width * output_map.length, false);
+    HBUINT8 *p = c->allocate_size<HBUINT8> (width * output_map.length);
     if (unlikely (!p)) return_trace (false);
     for (unsigned int i = 0; i < output_map.length; i++)
     {
-      unsigned int v = output_map[i];
-      unsigned int outer = v >> 16;
-      unsigned int inner = v & 0xFFFF;
-      unsigned int u = (outer << inner_bit_count) | inner;
-      for (unsigned int w = width; w > 0;)
+      unsigned int v = output_map.arrayZ[i];
+      if (v)
       {
-        p[--w] = u;
-        u >>= 8;
+	unsigned int outer = v >> 16;
+	unsigned int inner = v & 0xFFFF;
+	unsigned int u = (outer << inner_bit_count) | inner;
+	for (unsigned int w = width; w > 0;)
+	{
+	  p[--w] = u;
+	  u >>= 8;
+	}
       }
       p += width;
     }
@@ -421,25 +424,6 @@ struct TupleVariationHeader
   DEFINE_SIZE_MIN (4);
 };
 
-/* not using hb_bytes_t: avoid potential build issues with some compilers */
-struct byte_data_t
-{
-  hb_bytes_t bytes;
-
-  byte_data_t () = default;
-  byte_data_t (const char *p_, unsigned len_) : bytes (hb_bytes_t (p_, len_)) {}
-
-  void fini () { bytes.fini (); }
-
-  bool operator == (const byte_data_t& o) const
-  { return bytes.arrayZ == o.bytes.arrayZ && bytes.length == o.bytes.length; }
-
-  explicit operator bool () const { return bytes.length; }
-
-  void copy (hb_serialize_context_t *c) const
-  { c->embed (bytes.arrayZ, bytes.length); }
-};
-
 enum packed_delta_flag_t
 {
   DELTAS_ARE_ZERO      = 0x80,
@@ -505,6 +489,7 @@ struct tuple_delta_t
       else
       {
         if (!o.indices.arrayZ[i]) continue;
+        indices.arrayZ[i] = true;
         deltas_x[i] = o.deltas_x[i];
         if (deltas_y && o.deltas_y)
           deltas_y[i] = o.deltas_y[i];
@@ -530,7 +515,8 @@ struct tuple_delta_t
     return *this;
   }
 
-  hb_vector_t<tuple_delta_t> change_tuple_var_axis_limit (hb_tag_t axis_tag, Triple axis_limit) const
+  hb_vector_t<tuple_delta_t> change_tuple_var_axis_limit (hb_tag_t axis_tag, Triple axis_limit,
+                                                          TripleDistances axis_triple_distances) const
   {
     hb_vector_t<tuple_delta_t> out;
     Triple *tent;
@@ -550,7 +536,7 @@ struct tuple_delta_t
       return out;
     }
 
-    result_t solutions = rebase_tent (*tent, axis_limit);
+    result_t solutions = rebase_tent (*tent, axis_limit, axis_triple_distances);
     for (auto t : solutions)
     {
       tuple_delta_t new_var = *this;
@@ -715,6 +701,8 @@ struct tuple_delta_t
       }
 
       if (j != rounded_deltas.length) return false;
+      /* reset i because we reuse rounded_deltas for deltas_y */
+      i = 0;
       encoded_len += encode_delta_run (i, compiled_deltas.as_array ().sub_array (encoded_len), rounded_deltas);
     }
     return compiled_deltas.resize (encoded_len);
@@ -755,14 +743,14 @@ struct tuple_delta_t
 
     while (run_length >= 64)
     {
-      *it++ = (DELTAS_ARE_ZERO | 63);
+      *it++ = char (DELTAS_ARE_ZERO | 63);
       run_length -= 64;
       encoded_len++;
     }
 
     if (run_length)
     {
-      *it++ = (DELTAS_ARE_ZERO | (run_length - 1));
+      *it++ = char (DELTAS_ARE_ZERO | (run_length - 1));
       encoded_len++;
     }
     return encoded_len;
@@ -870,6 +858,7 @@ struct tuple_delta_t
     if (run_length)
     {
       *it++ = (DELTAS_ARE_WORDS | (run_length - 1));
+      encoded_len++;
       while (start < i)
       {
         int16_t delta_val = deltas[start++];
@@ -917,7 +906,7 @@ struct TupleVariationData
 
     private:
     /* referenced point set->compiled point data map */
-    hb_hashmap_t<const hb_vector_t<bool>*, byte_data_t> point_data_map;
+    hb_hashmap_t<const hb_vector_t<bool>*, hb_bytes_t> point_data_map;
     /* referenced point set-> count map, used in finding shared points */
     hb_hashmap_t<const hb_vector_t<bool>*, unsigned> point_set_count_map;
 
@@ -1003,16 +992,21 @@ struct TupleVariationData
       return true;
     }
 
-    void change_tuple_variations_axis_limits (const hb_hashmap_t<hb_tag_t, Triple> *normalized_axes_location)
+    void change_tuple_variations_axis_limits (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
+                                              const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
     {
-      for (auto _ : *normalized_axes_location)
+      for (auto _ : normalized_axes_location)
       {
         hb_tag_t axis_tag = _.first;
         Triple axis_limit = _.second;
+        TripleDistances axis_triple_distances{1.f, 1.f};
+        if (axes_triple_distances.has (axis_tag))
+          axis_triple_distances = axes_triple_distances.get (axis_tag);
+
         hb_vector_t<tuple_delta_t> new_vars;
         for (const tuple_delta_t& var : tuple_vars)
         {
-          hb_vector_t<tuple_delta_t> out = var.change_tuple_var_axis_limit (axis_tag, axis_limit);
+          hb_vector_t<tuple_delta_t> out = var.change_tuple_var_axis_limit (axis_tag, axis_limit, axis_triple_distances);
           if (!out) continue;
           unsigned new_len = new_vars.length + out.length;
 
@@ -1054,7 +1048,7 @@ struct TupleVariationData
       tuple_vars = std::move (new_vars);
     }
 
-    byte_data_t compile_point_set (const hb_vector_t<bool> &point_indices)
+    hb_bytes_t compile_point_set (const hb_vector_t<bool> &point_indices)
     {
       unsigned num_points = 0;
       for (bool i : point_indices)
@@ -1066,15 +1060,15 @@ struct TupleVariationData
       if (num_points == indices_length)
       {
         char *p = (char *) hb_calloc (1, sizeof (char));
-        if (unlikely (!p)) return byte_data_t ();
+        if (unlikely (!p)) return hb_bytes_t ();
 
-        return byte_data_t (p, 1);
+        return hb_bytes_t (p, 1);
       }
 
       /* allocate enough memories: 2 bytes for count + 3 bytes for each point */
       unsigned num_bytes = 2 + 3 *num_points;
       char *p = (char *) hb_calloc (num_bytes, sizeof (char));
-      if (unlikely (!p)) return byte_data_t ();
+      if (unlikely (!p)) return hb_bytes_t ();
 
       unsigned pos = 0;
       /* binary data starts with the total number of reference points */
@@ -1137,10 +1131,10 @@ struct TupleVariationData
         else
           p[header_pos] = (run_length - 1) | 0x80;
       }
-      return byte_data_t (p, pos);
+      return hb_bytes_t (p, pos);
     }
 
-    /* compile all point set and store byte data in a point_set->byte_data_t hashmap,
+    /* compile all point set and store byte data in a point_set->hb_bytes_t hashmap,
      * also update point_set->count map, which will be used in finding shared
      * point set*/
     bool compile_all_point_sets ()
@@ -1157,8 +1151,8 @@ struct TupleVariationData
           continue;
         }
         
-        byte_data_t compiled_data = compile_point_set (*points_set);
-        if (unlikely (compiled_data == byte_data_t ()))
+        hb_bytes_t compiled_data = compile_point_set (*points_set);
+        if (unlikely (compiled_data == hb_bytes_t ()))
           return false;
         
         if (!point_data_map.set (points_set, compiled_data) ||
@@ -1169,19 +1163,19 @@ struct TupleVariationData
     }
 
     /* find shared points set which saves most bytes */
-    byte_data_t find_shared_points ()
+    hb_bytes_t find_shared_points ()
     {
       unsigned max_saved_bytes = 0;
-      byte_data_t res{};
+      hb_bytes_t res{};
 
       for (const auto& _ : point_data_map.iter ())
       {
         const hb_vector_t<bool>* points_set = _.first;
-        unsigned data_length = _.second.bytes.length;
+        unsigned data_length = _.second.length;
         unsigned *count;
         if (unlikely (!point_set_count_map.has (points_set, &count) ||
                       *count <= 1))
-          return byte_data_t ();
+          return hb_bytes_t ();
 
         unsigned saved_bytes = data_length * ((*count) -1);
         if (saved_bytes > max_saved_bytes)
@@ -1193,9 +1187,10 @@ struct TupleVariationData
       return res;
     }
 
-    void instantiate (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location)
+    void instantiate (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
+                      const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
     {
-      change_tuple_variations_axis_limits (&normalized_axes_location);
+      change_tuple_variations_axis_limits (normalized_axes_location, axes_triple_distances);
       merge_tuple_variations ();
     }
 
@@ -1209,14 +1204,14 @@ struct TupleVariationData
       for (auto& tuple: tuple_vars)
       {
         const hb_vector_t<bool>* points_set = &(tuple.indices);
-        byte_data_t *points_data;
+        hb_bytes_t *points_data;
         if (unlikely (!point_data_map.has (points_set, &points_data)))
           return false;
 
         if (!tuple.compile_deltas ())
           return false;
 
-        if (!tuple.compile_tuple_var_header (axes_index_map, points_data->bytes.length, axes_old_index_tag_map))
+        if (!tuple.compile_tuple_var_header (axes_index_map, points_data->length, axes_old_index_tag_map))
           return false;
       }
       return true;
@@ -1227,8 +1222,7 @@ struct TupleVariationData
       TRACE_SERIALIZE (this);
       for (const auto& tuple: tuple_vars)
       {
-        byte_data_t compiled_bytes {tuple.compiled_tuple_header.arrayZ, tuple.compiled_tuple_header.length};
-        compiled_bytes.copy (c);
+        tuple.compiled_tuple_header.as_array ().copy (c);
         if (c->in_error ()) return_trace (false);
         total_header_len += tuple.compiled_tuple_header.length;
       }
@@ -1241,13 +1235,12 @@ struct TupleVariationData
       for (const auto& tuple: tuple_vars)
       {
         const hb_vector_t<bool>* points_set = &(tuple.indices);
-        byte_data_t *point_data;
+        hb_bytes_t *point_data;
         if (!point_data_map.has (points_set, &point_data))
           return_trace (false);
 
         point_data->copy (c);
-        byte_data_t compiled_bytes {tuple.compiled_deltas.arrayZ, tuple.compiled_deltas.length};
-        compiled_bytes.copy (c);
+        tuple.compiled_deltas.as_array ().copy (c);
         if (c->in_error ()) return_trace (false);
       }
       return_trace (true);

+ 2 - 1
thirdparty/harfbuzz/src/hb-ot-var-cvar-table.hh

@@ -161,13 +161,14 @@ struct cvar
     const hb_tag_t cvt = HB_TAG('c','v','t',' ');
     hb_blob_t *cvt_blob = hb_face_reference_table (c->plan->source, cvt);
     unsigned point_count = hb_blob_get_length (cvt_blob) / FWORD::static_size;
+    hb_blob_destroy (cvt_blob);
 
     if (!decompile_tuple_variations (axis_count, point_count, false,
                                      &(c->plan->axes_old_index_tag_map),
                                      tuple_variations))
       return_trace (false);
 
-    tuple_variations.instantiate (c->plan->axes_location);
+    tuple_variations.instantiate (c->plan->axes_location, c->plan->axes_triple_distances);
     if (!tuple_variations.compile_bytes (c->plan->axes_index_map, c->plan->axes_old_index_tag_map))
       return_trace (false);
 

+ 35 - 7
thirdparty/harfbuzz/src/hb-ot-var-fvar-table.hh

@@ -99,16 +99,16 @@ struct InstanceRecord
     for (unsigned i = 0 ; i < axis_count; i++)
     {
       uint32_t *axis_tag;
+      Triple *axis_limit;
       // only keep instances whose coordinates == pinned axis location
-      if (!c->plan->axes_old_index_tag_map.has (i, &axis_tag)) continue;
-      if (axes_location->has (*axis_tag))
+      if (!c->plan->axes_old_index_tag_map.has (i, &axis_tag)) return_trace (false);
+      if (axes_location->has (*axis_tag, &axis_limit))
       {
-        Triple axis_limit = axes_location->get (*axis_tag);
-        if (!axis_coord_pinned_or_within_axis_range (coords, i, axis_limit))
+        if (!axis_coord_pinned_or_within_axis_range (coords, i, *axis_limit))
           return_trace (false);
         
         //skip pinned axis
-        if (axis_limit.is_point ())
+        if (axis_limit->is_point ())
           continue;
       }
 
@@ -228,6 +228,30 @@ struct AxisRecord
     return defaultValue.to_float ();
   }
 
+  TripleDistances get_triple_distances () const
+  {
+    float min, default_, max;
+    get_coordinates (min, default_, max);
+    return TripleDistances (min, default_, max);
+  }
+
+  bool subset (hb_subset_context_t *c) const
+  {
+    TRACE_SUBSET (this);
+    auto *out = c->serializer->embed (this);
+    if (unlikely (!out)) return_trace (false);
+
+    const hb_hashmap_t<hb_tag_t, Triple>& user_axes_location = c->plan->user_axes_location;
+    Triple *axis_limit;
+    if (user_axes_location.has (axisTag, &axis_limit))
+    {
+      out->minValue.set_float (axis_limit->minimum);
+      out->defaultValue.set_float (axis_limit->middle);
+      out->maxValue.set_float (axis_limit->maximum);
+    }
+    return_trace (true);
+  }
+
   public:
   Tag		axisTag;	/* Tag identifying the design variation for the axis. */
   protected:
@@ -416,21 +440,25 @@ struct fvar
     for (unsigned i = 0 ; i < (unsigned)axisCount; i++)
     {
       if (!c->plan->axes_index_map.has (i)) continue;
-      if (unlikely (!c->serializer->embed (axes_records[i])))
+      if (unlikely (!axes_records[i].subset (c)))
         return_trace (false);
     }
 
     if (!c->serializer->check_assign (out->firstAxis, get_size (), HB_SERIALIZE_ERROR_INT_OVERFLOW))
       return_trace (false);
 
+    unsigned num_retained_instances = 0;
     for (unsigned i = 0 ; i < (unsigned)instanceCount; i++)
     {
       const InstanceRecord *instance = get_instance (i);
       auto snap = c->serializer->snapshot ();
       if (!instance->subset (c, axisCount, has_postscript_nameid))
         c->serializer->revert (snap);
+      else
+        num_retained_instances++;
     }
-    return_trace (true);
+
+    return_trace (c->serializer->check_assign (out->instanceCount, num_retained_instances, HB_SERIALIZE_ERROR_INT_OVERFLOW));
   }
 
   public:

+ 20 - 33
thirdparty/harfbuzz/src/hb-ot-var-hvar-table.hh

@@ -45,7 +45,8 @@ struct index_map_subset_plan_t
   void init (const DeltaSetIndexMap  &index_map,
 	     hb_inc_bimap_t	     &outer_map,
 	     hb_vector_t<hb_set_t *> &inner_sets,
-	     const hb_subset_plan_t  *plan)
+	     const hb_subset_plan_t  *plan,
+	     bool bypass_empty = true)
   {
     map_count = 0;
     outer_bit_count = 0;
@@ -53,11 +54,10 @@ struct index_map_subset_plan_t
     max_inners.init ();
     output_map.init ();
 
-    if (&index_map == &Null (DeltaSetIndexMap)) return;
+    if (bypass_empty && !index_map.get_map_count ()) return;
 
     unsigned int	last_val = (unsigned int)-1;
     hb_codepoint_t	last_gid = HB_CODEPOINT_INVALID;
-    hb_codepoint_t	num_gid = (hb_codepoint_t) hb_min (index_map.get_map_count (), plan->num_output_glyphs ());
 
     outer_bit_count = (index_map.get_width () * 8) - index_map.get_inner_bit_count ();
     max_inners.resize (inner_sets.length);
@@ -68,24 +68,17 @@ struct index_map_subset_plan_t
     unsigned count = new_to_old_gid_list.length;
     for (unsigned j = count; j; j--)
     {
-      hb_codepoint_t gid = new_to_old_gid_list[j - 1].first;
-      if (gid >= num_gid) continue;
-
-      hb_codepoint_t old_gid = new_to_old_gid_list[j - 1].second;
+      hb_codepoint_t gid = new_to_old_gid_list.arrayZ[j - 1].first;
+      hb_codepoint_t old_gid = new_to_old_gid_list.arrayZ[j - 1].second;
 
       unsigned int v = index_map.map (old_gid);
       if (last_gid == HB_CODEPOINT_INVALID)
       {
-	if (gid + 1 != num_gid)
-	{
-	  last_gid = gid + 1;
-	  break;
-	}
 	last_val = v;
 	last_gid = gid;
 	continue;
       }
-      if (v != last_val || gid + 1 != last_gid)
+      if (v != last_val)
 	break;
 
       last_gid = gid;
@@ -120,8 +113,6 @@ struct index_map_subset_plan_t
 	      const hb_vector_t<hb_inc_bimap_t> &inner_maps,
 	      const hb_subset_plan_t *plan)
   {
-    if (input_map == &Null (DeltaSetIndexMap)) return;
-
     for (unsigned int i = 0; i < max_inners.length; i++)
     {
       if (inner_maps[i].get_population () == 0) continue;
@@ -129,18 +120,17 @@ struct index_map_subset_plan_t
       if (bit_count > inner_bit_count) inner_bit_count = bit_count;
     }
 
-    output_map.resize (map_count);
-    for (hb_codepoint_t gid = 0; gid < output_map.length; gid++)
+    if (unlikely (!output_map.resize (map_count))) return;
+    for (const auto &_ : plan->new_to_old_gid_list)
     {
-      hb_codepoint_t	old_gid;
-      if (plan->old_gid_for_new_gid (gid, &old_gid))
-      {
-	uint32_t v = input_map->map (old_gid);
-	unsigned int outer = v >> 16;
-	output_map[gid] = (outer_map[outer] << 16) | (inner_maps[outer][v & 0xFFFF]);
-      }
-      else
-	output_map[gid] = 0;	/* Map unused glyph to outer/inner=0/0 */
+      hb_codepoint_t new_gid = _.first;
+      hb_codepoint_t old_gid = _.second;
+
+      if (unlikely (new_gid >= map_count)) break;
+
+      uint32_t v = input_map->map (old_gid);
+      unsigned int outer = v >> 16;
+      output_map.arrayZ[new_gid] = (outer_map[outer] << 16) | (inner_maps[outer][v & 0xFFFF]);
     }
   }
 
@@ -184,7 +174,7 @@ struct hvarvvar_subset_plan_t
     if (unlikely (!index_map_plans.length || !inner_sets.length || !inner_maps.length)) return;
 
     bool retain_adv_map = false;
-    index_map_plans[0].init (*index_maps[0], outer_map, inner_sets, plan);
+    index_map_plans[0].init (*index_maps[0], outer_map, inner_sets, plan, false);
     if (index_maps[0] == &Null (DeltaSetIndexMap))
     {
       retain_adv_map = plan->flags & HB_SUBSET_FLAGS_RETAIN_GIDS;
@@ -201,13 +191,10 @@ struct hvarvvar_subset_plan_t
 
     if (retain_adv_map)
     {
-      unsigned num_glyphs = plan->num_output_glyphs ();
-      for (hb_codepoint_t gid = 0; gid < num_glyphs; gid++)
+      for (const auto &_ : plan->new_to_old_gid_list)
       {
-	if (inner_sets[0]->has (gid))
-	  inner_maps[0].add (gid);
-	else
-	  inner_maps[0].skip ();
+        hb_codepoint_t old_gid = _.second;
+	inner_maps[0].add (old_gid);
       }
     }
     else

+ 19 - 8
thirdparty/harfbuzz/src/hb-priority-queue.hh

@@ -54,6 +54,9 @@ struct hb_priority_queue_t
 
   bool in_error () const { return heap.in_error (); }
 
+#ifndef HB_OPTIMIZE_SIZE
+  HB_ALWAYS_INLINE
+#endif
   void insert (int64_t priority, unsigned value)
   {
     heap.push (item_t (priority, value));
@@ -61,6 +64,9 @@ struct hb_priority_queue_t
     bubble_up (heap.length - 1);
   }
 
+#ifndef HB_OPTIMIZE_SIZE
+  HB_ALWAYS_INLINE
+#endif
   item_t pop_minimum ()
   {
     assert (!is_empty ());
@@ -106,8 +112,10 @@ struct hb_priority_queue_t
     return 2 * index + 2;
   }
 
+  HB_ALWAYS_INLINE
   void bubble_down (unsigned index)
   {
+    repeat:
     assert (index < heap.length);
 
     unsigned left = left_child (index);
@@ -123,19 +131,21 @@ struct hb_priority_queue_t
         && (!has_right || heap.arrayZ[index].first <= heap.arrayZ[right].first))
       return;
 
+    unsigned child;
     if (!has_right || heap.arrayZ[left].first < heap.arrayZ[right].first)
-    {
-      swap (index, left);
-      bubble_down (left);
-      return;
-    }
+      child = left;
+    else
+      child = right;
 
-    swap (index, right);
-    bubble_down (right);
+    swap (index, child);
+    index = child;
+    goto repeat;
   }
 
+  HB_ALWAYS_INLINE
   void bubble_up (unsigned index)
   {
+    repeat:
     assert (index < heap.length);
 
     if (index == 0) return;
@@ -145,7 +155,8 @@ struct hb_priority_queue_t
       return;
 
     swap (index, parent_index);
-    bubble_up (parent_index);
+    index = parent_index;
+    goto repeat;
   }
 
   void swap (unsigned a, unsigned b)

+ 5 - 1
thirdparty/harfbuzz/src/hb-sanitize.hh

@@ -258,7 +258,8 @@ struct hb_sanitize_context_t :
       this->max_ops = -1;
       return false;
     }
-    return (this->max_ops -= (int) count) > 0;
+    this->max_ops -= (int) count;
+    return true;
   }
 
 #ifndef HB_OPTIMIZE_SIZE
@@ -381,6 +382,9 @@ struct hb_sanitize_context_t :
   }
 
   template <typename Type>
+#ifndef HB_OPTIMIZE_SIZE
+  HB_ALWAYS_INLINE
+#endif
   bool check_struct (const Type *obj) const
   {
     if (sizeof (uintptr_t) == sizeof (uint32_t))

+ 1 - 1
thirdparty/harfbuzz/src/hb-serialize.hh

@@ -266,7 +266,7 @@ struct hb_serialize_context_t
 	   propagate_error (std::forward<Ts> (os)...); }
 
   /* To be called around main operation. */
-  template <typename Type>
+  template <typename Type=char>
   __attribute__((returns_nonnull))
   Type *start_serialize ()
   {

+ 1 - 1
thirdparty/harfbuzz/src/hb-subset-cff-common.hh

@@ -773,7 +773,7 @@ struct subr_subsetter_t
 	}
       }
 
-      /* Doing this here one by one instead of compacting all at the en
+      /* Doing this here one by one instead of compacting all at the end
        * has massive peak-memory saving.
        *
        * The compacting both saves memory and makes further operations

+ 4 - 6
thirdparty/harfbuzz/src/hb-subset-cff1.cc

@@ -551,14 +551,12 @@ struct cff1_subset_plan
 	sid = sidmap.add (sid);
 
       if (sid != last_sid + 1)
-      {
 	subset_charset_ranges.push (code_pair_t {sid, glyph});
 
-	if (glyph == old_glyph && skip)
-	{
-	  glyph = hb_min (_.first - 1, glyph_to_sid_map->arrayZ[old_glyph].glyph);
-	  sid += glyph - old_glyph;
-	}
+      if (glyph == old_glyph && skip)
+      {
+	glyph = hb_min (_.first - 1, glyph_to_sid_map->arrayZ[old_glyph].glyph);
+	sid += glyph - old_glyph;
       }
       last_sid = sid;
     }

+ 0 - 37
thirdparty/harfbuzz/src/hb-subset-cff1.hh

@@ -1,37 +0,0 @@
-/*
- * Copyright © 2018 Adobe Inc.
- *
- *  This is part of HarfBuzz, a text shaping library.
- *
- * Permission is hereby granted, without written agreement and without
- * license or royalty fees, to use, copy, modify, and distribute this
- * software and its documentation for any purpose, provided that the
- * above copyright notice and the following two paragraphs appear in
- * all copies of this software.
- *
- * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
- * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
- * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
- * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
- * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
- * FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
- * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
- * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
- *
- * Adobe Author(s): Michiharu Ariza
- */
-
-#ifndef HB_SUBSET_CFF1_HH
-#define HB_SUBSET_CFF1_HH
-
-#include "hb.hh"
-
-#include "hb-subset-plan.hh"
-
-HB_INTERNAL bool
-hb_subset_cff1 (hb_subset_context_t *c);
-
-#endif /* HB_SUBSET_CFF1_HH */

+ 0 - 37
thirdparty/harfbuzz/src/hb-subset-cff2.hh

@@ -1,37 +0,0 @@
-/*
- * Copyright © 2018 Adobe Inc.
- *
- *  This is part of HarfBuzz, a text shaping library.
- *
- * Permission is hereby granted, without written agreement and without
- * license or royalty fees, to use, copy, modify, and distribute this
- * software and its documentation for any purpose, provided that the
- * above copyright notice and the following two paragraphs appear in
- * all copies of this software.
- *
- * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
- * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
- * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
- * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
- * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
- * FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
- * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
- * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
- *
- * Adobe Author(s): Michiharu Ariza
- */
-
-#ifndef HB_SUBSET_CFF2_HH
-#define HB_SUBSET_CFF2_HH
-
-#include "hb.hh"
-
-#include "hb-subset-plan.hh"
-
-HB_INTERNAL bool
-hb_subset_cff2 (hb_subset_context_t *c);
-
-#endif /* HB_SUBSET_CFF2_HH */

+ 0 - 1
thirdparty/harfbuzz/src/hb-subset-input.cc

@@ -69,7 +69,6 @@ hb_subset_input_t::hb_subset_input_t ()
   sets.drop_tables->add_array (default_drop_tables, ARRAY_LENGTH (default_drop_tables));
 
   hb_tag_t default_no_subset_tables[] = {
-    HB_TAG ('a', 'v', 'a', 'r'),
     HB_TAG ('g', 'a', 's', 'p'),
     HB_TAG ('f', 'p', 'g', 'm'),
     HB_TAG ('p', 'r', 'e', 'p'),

+ 30 - 22
thirdparty/harfbuzz/src/hb-subset-instancer-solver.cc

@@ -253,9 +253,8 @@ _solve (Triple tent, Triple axisLimit, bool negative = false)
      *              axisDef      axisMax
      */
     float newUpper = peak + (1 - gain) * (upper - peak);
-    // I feel like the first condition is always true because
-    // outGain >= gain.
-    if (axisMax <= newUpper && newUpper <= axisDef + (axisMax - axisDef) * 2)
+    assert (axisMax <= newUpper);  // Because outGain >= gain
+    if (newUpper <= axisDef + (axisMax - axisDef) * 2)
     {
       upper = newUpper;
       if (!negative && axisDef + (axisMax - axisDef) * MAX_F2DOT14 < upper)
@@ -362,38 +361,47 @@ _solve (Triple tent, Triple axisLimit, bool negative = false)
   return out;
 }
 
-/* Normalizes value based on a min/default/max triple. */
-static inline float normalizeValue (float v, const Triple &triple, bool extrapolate = false)
+static inline TripleDistances _reverse_triple_distances (const TripleDistances &v)
+{ return TripleDistances (v.positive, v.negative); }
+
+float renormalizeValue (float v, const Triple &triple,
+                        const TripleDistances &triple_distances, bool extrapolate)
 {
-  /*
-  >>> normalizeValue(400, (100, 400, 900))
-  0.0
-  >>> normalizeValue(100, (100, 400, 900))
-  -1.0
-  >>> normalizeValue(650, (100, 400, 900))
-  0.5
-  */
   float lower = triple.minimum, def = triple.middle, upper = triple.maximum;
   assert (lower <= def && def <= upper);
 
   if (!extrapolate)
       v = hb_max (hb_min (v, upper), lower);
 
-  if ((v == def) || (lower == upper))
+  if (v == def)
     return 0.f;
 
-  if ((v < def && lower != def) || (v > def && upper == def))
+  if (def < 0.f)
+    return -renormalizeValue (-v, _reverse_negate (triple),
+                              _reverse_triple_distances (triple_distances), extrapolate);
+
+  /* default >= 0 and v != default */
+  if (v > def)
+    return (v - def) / (upper - def);
+
+  /* v < def */
+  if (lower >= 0.f)
     return (v - def) / (def - lower);
+
+  /* lower < 0 and v < default */
+  float total_distance = triple_distances.negative * (-lower) + triple_distances.positive * def;
+
+  float v_distance;
+  if (v >= 0.f)
+    v_distance = (def - v) * triple_distances.positive;
   else
-  {
-    assert ((v > def && upper != def) ||
-	    (v < def && lower == def));
-    return (v - def) / (upper - def);
-  }
+    v_distance = (-v) * triple_distances.negative + triple_distances.positive * def;
+
+  return (-v_distance) /total_distance;
 }
 
 result_t
-rebase_tent (Triple tent, Triple axisLimit)
+rebase_tent (Triple tent, Triple axisLimit, TripleDistances axis_triple_distances)
 {
   assert (-1.f <= axisLimit.minimum && axisLimit.minimum <= axisLimit.middle && axisLimit.middle <= axisLimit.maximum && axisLimit.maximum <= +1.f);
   assert (-2.f <= tent.minimum && tent.minimum <= tent.middle && tent.middle <= tent.maximum && tent.maximum <= +2.f);
@@ -401,7 +409,7 @@ rebase_tent (Triple tent, Triple axisLimit)
 
   result_t sols = _solve (tent, axisLimit);
 
-  auto n = [&axisLimit] (float v) { return normalizeValue (v, axisLimit, true); };
+  auto n = [&axisLimit, &axis_triple_distances] (float v) { return renormalizeValue (v, axisLimit, axis_triple_distances); };
 
   result_t out;
   for (auto &p : sols)

+ 23 - 1
thirdparty/harfbuzz/src/hb-subset-instancer-solver.hh

@@ -27,6 +27,21 @@
 
 #include "hb.hh"
 
+/* pre-normalized distances */
+struct TripleDistances
+{
+  TripleDistances (): negative (1.f), positive (1.f) {}
+  TripleDistances (float neg_, float pos_): negative (neg_), positive (pos_) {}
+  TripleDistances (float min, float default_, float max)
+  {
+    negative = default_ - min;
+    positive = max - default_;
+  }
+
+  float negative;
+  float positive;
+};
+
 struct Triple {
 
   Triple () :
@@ -66,6 +81,7 @@ struct Triple {
     return current;
   }
 
+
   float minimum;
   float middle;
   float maximum;
@@ -74,6 +90,12 @@ struct Triple {
 using result_item_t = hb_pair_t<float, Triple>;
 using result_t = hb_vector_t<result_item_t>;
 
+/* renormalize a normalized value v to the range of an axis,
+ * considering the prenormalized distances as well as the new axis limits.
+ * Ported from fonttools */
+HB_INTERNAL float renormalizeValue (float v, const Triple &triple,
+                                    const TripleDistances &triple_distances,
+                                    bool extrapolate = true);
 /* Given a tuple (lower,peak,upper) "tent" and new axis limits
  * (axisMin,axisDefault,axisMax), solves how to represent the tent
  * under the new axis configuration.  All values are in normalized
@@ -85,6 +107,6 @@ using result_t = hb_vector_t<result_item_t>;
  * If tent value is Triple{}, that is a special deltaset that should
  * be always-enabled (called "gain").
  */
-HB_INTERNAL result_t rebase_tent (Triple tent, Triple axisLimit);
+HB_INTERNAL result_t rebase_tent (Triple tent, Triple axisLimit, TripleDistances axis_triple_distances);
 
 #endif /* HB_SUBSET_INSTANCER_SOLVER_HH */

+ 2 - 0
thirdparty/harfbuzz/src/hb-subset-plan-member-list.hh

@@ -105,6 +105,8 @@ HB_SUBSET_PLAN_MEMBER (hb_vector_t<int>, normalized_coords)
 
 //user specified axes range map
 HB_SUBSET_PLAN_MEMBER (hb_hashmap_t E(<hb_tag_t, Triple>), user_axes_location)
+//axis->TripleDistances map (distances in the pre-normalized space)
+HB_SUBSET_PLAN_MEMBER (hb_hashmap_t E(<hb_tag_t, TripleDistances>), axes_triple_distances)
 
 //retained old axis index -> new axis index mapping in fvar axis array
 HB_SUBSET_PLAN_MEMBER (hb_map_t, axes_index_map)

+ 16 - 11
thirdparty/harfbuzz/src/hb-subset-plan.cc

@@ -605,11 +605,14 @@ _populate_unicodes_to_retain (const hb_set_t *unicodes,
 
     /* Add gids which where requested, but not mapped in cmap */
     unsigned num_glyphs = plan->source->get_num_glyphs ();
-    for (hb_codepoint_t gid : *glyphs)
+    hb_codepoint_t first = HB_SET_VALUE_INVALID, last = HB_SET_VALUE_INVALID;
+    for (; glyphs->next_range (&first, &last); )
     {
-      if (gid >= num_glyphs)
+      if (first >= num_glyphs)
 	break;
-      plan->_glyphset_gsub.add (gid);
+      if (last >= num_glyphs)
+        last = num_glyphs - 1;
+      plan->_glyphset_gsub.add_range (first, last);
     }
   }
 
@@ -927,12 +930,14 @@ _normalize_axes_location (hb_face_t *face, hb_subset_plan_t *plan)
       new_axis_idx++;
     }
 
-    if (plan->user_axes_location.has (axis_tag))
+    Triple *axis_range;
+    if (plan->user_axes_location.has (axis_tag, &axis_range))
     {
-      Triple axis_range = plan->user_axes_location.get (axis_tag);
-      int normalized_min = axis.normalize_axis_value (axis_range.minimum);
-      int normalized_default = axis.normalize_axis_value (axis_range.middle);
-      int normalized_max = axis.normalize_axis_value (axis_range.maximum);
+      plan->axes_triple_distances.set (axis_tag, axis.get_triple_distances ());
+
+      int normalized_min = axis.normalize_axis_value (axis_range->minimum);
+      int normalized_default = axis.normalize_axis_value (axis_range->middle);
+      int normalized_max = axis.normalize_axis_value (axis_range->maximum);
 
       if (has_avar && old_axis_idx < avar_axis_count)
       {
@@ -940,9 +945,9 @@ _normalize_axes_location (hb_face_t *face, hb_subset_plan_t *plan)
         normalized_default = seg_maps->map (normalized_default);
         normalized_max = seg_maps->map (normalized_max);
       }
-      plan->axes_location.set (axis_tag, Triple (static_cast<float> (normalized_min),
-                                                 static_cast<float> (normalized_default),
-                                                 static_cast<float> (normalized_max)));
+      plan->axes_location.set (axis_tag, Triple (static_cast<float> (normalized_min / 16384.f),
+                                                 static_cast<float> (normalized_default / 16384.f),
+                                                 static_cast<float> (normalized_max / 16384.f)));
 
       if (normalized_default != 0)
         plan->pinned_at_default = false;

+ 6 - 4
thirdparty/harfbuzz/src/hb-subset.cc

@@ -50,6 +50,7 @@
 #include "hb-ot-name-table.hh"
 #include "hb-ot-layout-gsub-table.hh"
 #include "hb-ot-layout-gpos-table.hh"
+#include "hb-ot-var-avar-table.hh"
 #include "hb-ot-var-cvar-table.hh"
 #include "hb-ot-var-fvar-table.hh"
 #include "hb-ot-var-gvar-table.hh"
@@ -273,7 +274,7 @@ _try_subset (const TableType *table,
              hb_vector_t<char>* buf,
              hb_subset_context_t* c /* OUT */)
 {
-  c->serializer->start_serialize<TableType> ();
+  c->serializer->start_serialize ();
   if (c->serializer->in_error ()) return false;
 
   bool needed = table->subset (c);
@@ -516,10 +517,11 @@ _subset_table (hb_subset_plan_t *plan,
   case HB_OT_TAG_fvar:
     if (plan->user_axes_location.is_empty ()) return _passthrough (plan, tag);
     return _subset<const OT::fvar> (plan, buf);
+  case HB_OT_TAG_avar:
+    if (plan->user_axes_location.is_empty ()) return _passthrough (plan, tag);
+    return _subset<const OT::avar> (plan, buf);
   case HB_OT_TAG_STAT:
-    /*TODO(qxliu): change the condition as we support more complex
-     * instancing operation*/
-    if (plan->all_axes_pinned) return _subset<const OT::STAT> (plan, buf);
+    if (!plan->user_axes_location.is_empty ()) return _subset<const OT::STAT> (plan, buf);
     else return _passthrough (plan, tag);
 
   case HB_TAG ('c', 'v', 't', ' '):

+ 3 - 3
thirdparty/harfbuzz/src/hb-uniscribe.cc

@@ -699,7 +699,7 @@ retry:
 				     script_tags,
 				     &item_count);
   if (unlikely (FAILED (hr)))
-    FAIL ("ScriptItemizeOpenType() failed: 0x%08lx", hr);
+    FAIL ("ScriptItemizeOpenType() failed: 0x%08lx", (unsigned long) hr);
 
 #undef MAX_ITEMS
 
@@ -785,7 +785,7 @@ retry:
     }
     if (unlikely (FAILED (hr)))
     {
-      FAIL ("ScriptShapeOpenType() failed: 0x%08lx", hr);
+      FAIL ("ScriptShapeOpenType() failed: 0x%08lx", (unsigned long) hr);
     }
 
     for (unsigned int j = chars_offset; j < chars_offset + item_chars_len; j++)
@@ -811,7 +811,7 @@ retry:
 				     offsets + glyphs_offset,
 				     nullptr);
     if (unlikely (FAILED (hr)))
-      FAIL ("ScriptPlaceOpenType() failed: 0x%08lx", hr);
+      FAIL ("ScriptPlaceOpenType() failed: 0x%08lx", (unsigned long) hr);
 
     if (DEBUG_ENABLED (UNISCRIBE))
       fprintf (stderr, "Item %d RTL %d LayoutRTL %d LogicalOrder %d ScriptTag %c%c%c%c\n",

+ 3 - 3
thirdparty/harfbuzz/src/hb-version.h

@@ -47,20 +47,20 @@ HB_BEGIN_DECLS
  *
  * The minor component of the library version available at compile-time.
  */
-#define HB_VERSION_MINOR 0
+#define HB_VERSION_MINOR 1
 /**
  * HB_VERSION_MICRO:
  *
  * The micro component of the library version available at compile-time.
  */
-#define HB_VERSION_MICRO 0
+#define HB_VERSION_MICRO 1
 
 /**
  * HB_VERSION_STRING:
  *
  * A string literal containing the library version available at compile-time.
  */
-#define HB_VERSION_STRING "8.0.0"
+#define HB_VERSION_STRING "8.1.1"
 
 /**
  * HB_VERSION_ATLEAST:

+ 1 - 0
thirdparty/harfbuzz/src/hb.hh

@@ -64,6 +64,7 @@
 #pragma GCC diagnostic error   "-Wbitwise-instead-of-logical"
 #pragma GCC diagnostic error   "-Wcast-align"
 #pragma GCC diagnostic error   "-Wcast-function-type"
+#pragma GCC diagnostic error   "-Wconstant-conversion"
 #pragma GCC diagnostic error   "-Wcomma"
 #pragma GCC diagnostic error   "-Wdelete-non-virtual-dtor"
 #pragma GCC diagnostic error   "-Wembedded-directive"

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä