summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/freetype/src/tools
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/freetype/src/tools')
-rw-r--r--src/3rdparty/freetype/src/tools/Jamfile5
-rw-r--r--src/3rdparty/freetype/src/tools/afblue.pl2
-rw-r--r--src/3rdparty/freetype/src/tools/apinames.c52
-rwxr-xr-x[-rw-r--r--]src/3rdparty/freetype/src/tools/chktrcmp.py153
-rw-r--r--src/3rdparty/freetype/src/tools/cordic.py41
-rw-r--r--src/3rdparty/freetype/src/tools/docmaker/content.py672
-rw-r--r--src/3rdparty/freetype/src/tools/docmaker/docbeauty.py111
-rw-r--r--src/3rdparty/freetype/src/tools/docmaker/docmaker.py115
-rw-r--r--src/3rdparty/freetype/src/tools/docmaker/formatter.py228
-rw-r--r--src/3rdparty/freetype/src/tools/docmaker/sources.py410
-rw-r--r--src/3rdparty/freetype/src/tools/docmaker/tohtml.py725
-rw-r--r--src/3rdparty/freetype/src/tools/docmaker/utils.py127
-rw-r--r--src/3rdparty/freetype/src/tools/ftfuzzer/README81
-rw-r--r--src/3rdparty/freetype/src/tools/ftfuzzer/ftfuzzer.cc428
-rw-r--r--src/3rdparty/freetype/src/tools/ftfuzzer/ftmutator.cc314
-rw-r--r--src/3rdparty/freetype/src/tools/ftfuzzer/rasterfuzzer.cc129
-rw-r--r--src/3rdparty/freetype/src/tools/ftfuzzer/runinput.cc58
-rw-r--r--src/3rdparty/freetype/src/tools/ftrandom/Makefile45
-rw-r--r--src/3rdparty/freetype/src/tools/ftrandom/ftrandom.c8
-rw-r--r--src/3rdparty/freetype/src/tools/glnames.py1423
-rwxr-xr-xsrc/3rdparty/freetype/src/tools/make_distribution_archives.py208
-rw-r--r--src/3rdparty/freetype/src/tools/no-copyright13
-rw-r--r--src/3rdparty/freetype/src/tools/test_afm.c7
-rw-r--r--src/3rdparty/freetype/src/tools/test_bbox.c5
-rw-r--r--src/3rdparty/freetype/src/tools/test_trig.c5
-rwxr-xr-x[-rw-r--r--]src/3rdparty/freetype/src/tools/update-copyright4
-rwxr-xr-x[-rw-r--r--]src/3rdparty/freetype/src/tools/update-copyright-year65
-rw-r--r--src/3rdparty/freetype/src/tools/vms_shorten_symbol.c250
28 files changed, 1375 insertions, 4309 deletions
diff --git a/src/3rdparty/freetype/src/tools/Jamfile b/src/3rdparty/freetype/src/tools/Jamfile
deleted file mode 100644
index 475161e07f..0000000000
--- a/src/3rdparty/freetype/src/tools/Jamfile
+++ /dev/null
@@ -1,5 +0,0 @@
-# Jamfile for src/tools
-#
-SubDir FT2_TOP src tools ;
-
-Main apinames : apinames.c ;
diff --git a/src/3rdparty/freetype/src/tools/afblue.pl b/src/3rdparty/freetype/src/tools/afblue.pl
index 937d4ecf6e..1098e30abc 100644
--- a/src/3rdparty/freetype/src/tools/afblue.pl
+++ b/src/3rdparty/freetype/src/tools/afblue.pl
@@ -5,7 +5,7 @@
#
# Process a blue zone character data file.
#
-# Copyright (C) 2013-2019 by
+# Copyright (C) 2013-2023 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used,
diff --git a/src/3rdparty/freetype/src/tools/apinames.c b/src/3rdparty/freetype/src/tools/apinames.c
index aeecf88d22..dfa258fd7d 100644
--- a/src/3rdparty/freetype/src/tools/apinames.c
+++ b/src/3rdparty/freetype/src/tools/apinames.c
@@ -18,11 +18,14 @@
#include <stdio.h>
#include <stdlib.h>
+#include <stdarg.h>
#include <string.h>
#include <ctype.h>
+#include "vms_shorten_symbol.c"
+
#define PROGRAM_NAME "apinames"
-#define PROGRAM_VERSION "0.3"
+#define PROGRAM_VERSION "0.5"
#define LINEBUFF_SIZE 1024
@@ -33,6 +36,7 @@ typedef enum OutputFormat_
OUTPUT_WINDOWS_DEF, /* output a Windows .DEF file for Visual C++ or Mingw */
OUTPUT_BORLAND_DEF, /* output a Windows .DEF file for Borland C++ */
OUTPUT_WATCOM_LBC, /* output a Watcom Linker Command File */
+ OUTPUT_VMS_OPT, /* output an OpenVMS Linker Option File */
OUTPUT_NETWARE_IMP, /* output a NetWare ImportFile */
OUTPUT_GNU_VERMAP /* output a version map for GNU or Solaris linker */
@@ -40,9 +44,20 @@ typedef enum OutputFormat_
static void
-panic( const char* message )
+panic( const char* fmt,
+ ... )
{
- fprintf( stderr, "PANIC: %s\n", message );
+ va_list ap;
+
+
+ fprintf( stderr, "PANIC: " );
+
+ va_start( ap, fmt );
+ vfprintf( stderr, fmt, ap );
+ va_end( ap );
+
+ fprintf( stderr, "\n" );
+
exit(2);
}
@@ -167,7 +182,6 @@ names_dump( FILE* out,
case OUTPUT_WATCOM_LBC:
{
const char* dot;
- char temp[512];
if ( !dll_name )
@@ -181,7 +195,8 @@ names_dump( FILE* out,
dot = strchr( dll_name, '.' );
if ( dot )
{
- int len = dot - dll_name;
+ char temp[512];
+ int len = dot - dll_name;
if ( len > (int)( sizeof ( temp ) - 1 ) )
@@ -200,6 +215,28 @@ names_dump( FILE* out,
break;
+ case OUTPUT_VMS_OPT:
+ fprintf( out, "case_sensitive=YES\n" );
+
+ for ( nn = 0; nn < num_names; nn++ )
+ {
+ char short_symbol[32];
+
+
+ if ( vms_shorten_symbol( the_names[nn].name, short_symbol, 1 ) == -1 )
+ panic( "could not shorten name '%s'", the_names[nn].name );
+ fprintf( out, "symbol_vector = ( %s = PROCEDURE)\n", short_symbol );
+
+ /* Also emit a 64-bit symbol, as created by the `vms_auto64` tool. */
+ /* It has the string '64__' appended to its name. */
+ strcat( the_names[nn].name , "64__" );
+ if ( vms_shorten_symbol( the_names[nn].name, short_symbol, 1 ) == -1 )
+ panic( "could not shorten name '%s'", the_names[nn].name );
+ fprintf( out, "symbol_vector = ( %s = PROCEDURE)\n", short_symbol );
+ }
+
+ break;
+
case OUTPUT_NETWARE_IMP:
if ( dll_name )
fprintf( out, " (%s)\n", dll_name );
@@ -352,6 +389,7 @@ usage( void )
" -w output .DEF file for Visual C++ and Mingw\n"
" -wB output .DEF file for Borland C++\n"
" -wW output Watcom Linker Response File\n"
+ " -wV output OpenVMS Linker Options File\n"
" -wN output NetWare Import File\n"
" -wL output version map for GNU or Solaris linker\n"
"\n";
@@ -445,6 +483,10 @@ main( int argc,
format = OUTPUT_WATCOM_LBC;
break;
+ case 'V':
+ format = OUTPUT_VMS_OPT;
+ break;
+
case 'N':
format = OUTPUT_NETWARE_IMP;
break;
diff --git a/src/3rdparty/freetype/src/tools/chktrcmp.py b/src/3rdparty/freetype/src/tools/chktrcmp.py
index 4c40bdafdb..d072a87866 100644..100755
--- a/src/3rdparty/freetype/src/tools/chktrcmp.py
+++ b/src/3rdparty/freetype/src/tools/chktrcmp.py
@@ -1,114 +1,119 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Check trace components in FreeType 2 source.
-# Author: suzuki toshiya, 2009, 2013
+# Author: suzuki toshiya, 2009, 2013, 2020
#
# This code is explicitly into the public domain.
-
import sys
import os
import re
-SRC_FILE_LIST = []
-USED_COMPONENT = {}
+SRC_FILE_LIST = []
+USED_COMPONENT = {}
KNOWN_COMPONENT = {}
-SRC_FILE_DIRS = [ "src" ]
-TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ]
+SRC_FILE_DIRS = ["src"]
+TRACE_DEF_FILES = ["include/freetype/internal/fttrace.h"]
+
+
+def usage():
+ print("Usage: %s [option]" % sys.argv[0])
+ print("Search used-but-defined and defined-but-not-used trace_XXX macros")
+ print("")
+ print(" --help:")
+ print(" Show this help")
+ print("")
+ print(" --src-dirs=dir1:dir2:...")
+ print(" Specify the directories of C source files to be checked")
+ print(" Default is %s" % ":".join(SRC_FILE_DIRS))
+ print("")
+ print(" --def-files=file1:file2:...")
+ print(" Specify the header files including FT_TRACE_DEF()")
+ print(" Default is %s" % ":".join(TRACE_DEF_FILES))
+ print("")
# --------------------------------------------------------------
# Parse command line options
#
-
-for i in range( 1, len( sys.argv ) ):
- if sys.argv[i].startswith( "--help" ):
- print "Usage: %s [option]" % sys.argv[0]
- print "Search used-but-defined and defined-but-not-used trace_XXX macros"
- print ""
- print " --help:"
- print " Show this help"
- print ""
- print " --src-dirs=dir1:dir2:..."
- print " Specify the directories of C source files to be checked"
- print " Default is %s" % ":".join( SRC_FILE_DIRS )
- print ""
- print " --def-files=file1:file2:..."
- print " Specify the header files including FT_TRACE_DEF()"
- print " Default is %s" % ":".join( TRACE_DEF_FILES )
- print ""
- exit(0)
- if sys.argv[i].startswith( "--src-dirs=" ):
- SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
- elif sys.argv[i].startswith( "--def-files=" ):
- TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
-
+for i in range(1, len(sys.argv)):
+ if sys.argv[i].startswith("--help"):
+ usage()
+ exit(0)
+ if sys.argv[i].startswith("--src-dirs="):
+ SRC_FILE_DIRS = sys.argv[i].replace("--src-dirs=", "", 1).split(":")
+ elif sys.argv[i].startswith("--def-files="):
+ TRACE_DEF_FILES = sys.argv[i].replace("--def-files=", "", 1).split(":")
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
-c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
-trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' )
+c_pathname_pat = re.compile('^.*\.[ch]$', re.IGNORECASE)
+trace_use_pat = re.compile('^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+')
for d in SRC_FILE_DIRS:
- for ( p, dlst, flst ) in os.walk( d ):
- for f in flst:
- if c_pathname_pat.match( f ) != None:
- src_pathname = os.path.join( p, f )
-
- line_num = 0
- for src_line in open( src_pathname, 'r' ):
- line_num = line_num + 1
- src_line = src_line.strip()
- if trace_use_pat.match( src_line ) != None:
- component_name = trace_use_pat.sub( '', src_line )
- if component_name in USED_COMPONENT:
- USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
- else:
- USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
-
+ for (p, dlst, flst) in os.walk(d):
+ for f in flst:
+ if c_pathname_pat.match(f) is not None:
+ src_pathname = os.path.join(p, f)
+
+ line_num = 0
+ for src_line in open(src_pathname, 'r'):
+ line_num = line_num + 1
+ src_line = src_line.strip()
+ if trace_use_pat.match(src_line) is not None:
+ component_name = trace_use_pat.sub('', src_line)
+ if component_name in USED_COMPONENT:
+ USED_COMPONENT[component_name]\
+ .append("%s:%d" % (src_pathname, line_num))
+ else:
+ USED_COMPONENT[component_name] =\
+ ["%s:%d" % (src_pathname, line_num)]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
-trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
-trace_def_pat_cls = re.compile( '[ \t\)].*$' )
+trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \t]*\([ \t]*')
+trace_def_pat_cls = re.compile('[ \t\)].*$')
for f in TRACE_DEF_FILES:
- line_num = 0
- for hdr_line in open( f, 'r' ):
- line_num = line_num + 1
- hdr_line = hdr_line.strip()
- if trace_def_pat_opn.match( hdr_line ) != None:
- component_name = trace_def_pat_opn.sub( '', hdr_line )
- component_name = trace_def_pat_cls.sub( '', component_name )
- if component_name in KNOWN_COMPONENT:
- print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
- ( component_name, KNOWN_COMPONENT[component_name], line_num )
- else:
- KNOWN_COMPONENT[component_name] = "%s:%d" % \
- ( os.path.basename( f ), line_num )
-
+ line_num = 0
+ for hdr_line in open(f, 'r'):
+ line_num = line_num + 1
+ hdr_line = hdr_line.strip()
+ if trace_def_pat_opn.match(hdr_line) is not None:
+ component_name = trace_def_pat_opn.sub('', hdr_line)
+ component_name = trace_def_pat_cls.sub('', component_name)
+ if component_name in KNOWN_COMPONENT:
+ print("trace component %s is defined twice,"
+ " see %s and fttrace.h:%d" %
+ (component_name, KNOWN_COMPONENT[component_name],
+ line_num))
+ else:
+ KNOWN_COMPONENT[component_name] =\
+ "%s:%d" % (os.path.basename(f), line_num)
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
-print "# Trace component used in the implementations but not defined in fttrace.h."
-cmpnt = USED_COMPONENT.keys()
+print("# Trace component used in the implementations but not defined in "
+ "fttrace.h.")
+cmpnt = list(USED_COMPONENT.keys())
cmpnt.sort()
for c in cmpnt:
- if c not in KNOWN_COMPONENT:
- print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
+ if c not in KNOWN_COMPONENT:
+ print("Trace component %s (used in %s) is not defined." %
+ (c, ", ".join(USED_COMPONENT[c])))
-print "# Trace component is defined but not used in the implementations."
-cmpnt = KNOWN_COMPONENT.keys()
+print("# Trace component is defined but not used in the implementations.")
+cmpnt = list(KNOWN_COMPONENT.keys())
cmpnt.sort()
for c in cmpnt:
- if c not in USED_COMPONENT:
- if c != "any":
- print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
-
+ if c not in USED_COMPONENT:
+ if c != "any":
+ print("Trace component %s (defined in %s) is not used." %
+ (c, KNOWN_COMPONENT[c]))
diff --git a/src/3rdparty/freetype/src/tools/cordic.py b/src/3rdparty/freetype/src/tools/cordic.py
index 6742c90dfe..6511429889 100644
--- a/src/3rdparty/freetype/src/tools/cordic.py
+++ b/src/3rdparty/freetype/src/tools/cordic.py
@@ -1,33 +1,32 @@
+#!/usr/bin/env python3
+
# compute arctangent table for CORDIC computations in fttrigon.c
-import sys, math
+import math
-#units = 64*65536.0 # don't change !!
-units = 180 * 2**16
-scale = units/math.pi
+# units = 64*65536.0 # don't change !!
+units = 180 * 2 ** 16
+scale = units / math.pi
shrink = 1.0
-comma = ""
+angles2 = []
-print ""
-print "table of arctan( 1/2^n ) for PI = " + repr(units/65536.0) + " units"
+print("")
+print("table of arctan( 1/2^n ) for PI = " + repr(units / 65536.0) + " units")
-for n in range(1,32):
+for n in range(1, 32):
- x = 0.5**n # tangent value
+ x = 0.5 ** n # tangent value
- angle = math.atan(x) # arctangent
- angle2 = round(angle*scale) # arctangent in FT_Angle units
+ angle = math.atan(x) # arctangent
+ angle2 = round(angle * scale) # arctangent in FT_Angle units
if angle2 <= 0:
break
- sys.stdout.write( comma + repr( int(angle2) ) )
- comma = ", "
-
- shrink /= math.sqrt( 1 + x*x )
-
-print
-print "shrink factor = " + repr( shrink )
-print "shrink factor 2 = " + repr( int( shrink * (2**32) ) )
-print "expansion factor = " + repr( 1/shrink )
-print ""
+ angles2.append(repr(int(angle2)))
+ shrink /= math.sqrt(1 + x * x)
+print(", ".join(angles2))
+print("shrink factor = " + repr(shrink))
+print("shrink factor 2 = " + repr(int(shrink * (2 ** 32))))
+print("expansion factor = " + repr(1 / shrink))
+print("")
diff --git a/src/3rdparty/freetype/src/tools/docmaker/content.py b/src/3rdparty/freetype/src/tools/docmaker/content.py
deleted file mode 100644
index 198780aee4..0000000000
--- a/src/3rdparty/freetype/src/tools/docmaker/content.py
+++ /dev/null
@@ -1,672 +0,0 @@
-#
-# content.py
-#
-# Parse comment blocks to build content blocks (library file).
-#
-# Copyright 2002-2018 by
-# David Turner.
-#
-# This file is part of the FreeType project, and may only be used,
-# modified, and distributed under the terms of the FreeType project
-# license, LICENSE.TXT. By continuing to use, modify, or distribute
-# this file you indicate that you have read the license and
-# understand and accept it fully.
-
-#
-# This file contains routines to parse documentation comment blocks,
-# building more structured objects out of them.
-#
-
-
-from sources import *
-from utils import *
-
-import string, re
-
-
-#
-# Regular expressions to detect code sequences. `Code sequences' are simply
-# code fragments embedded in '{' and '}', as demonstrated in the following
-# example.
-#
-# {
-# x = y + z;
-# if ( zookoo == 2 )
-# {
-# foobar();
-# }
-# }
-#
-# Note that the indentation of the first opening brace and the last closing
-# brace must be exactly the same. The code sequence itself should have a
-# larger indentation than the surrounding braces.
-#
-re_code_start = re.compile( r"(\s*){\s*$" )
-re_code_end = re.compile( r"(\s*)}\s*$" )
-
-
-#
-# A regular expression to isolate identifiers from other text. Two syntax
-# forms are supported:
-#
-# <name>
-# <name>[<id>]
-#
-# where both `<name>' and `<id>' consist of alphanumeric characters, `_',
-# and `-'. Use `<id>' if there are multiple, valid `<name>' entries; in the
-# index, `<id>' will be appended in parentheses.
-#
-# For example,
-#
-# stem_darkening[autofit]
-#
-# becomes `stem_darkening (autofit)' in the index.
-#
-re_identifier = re.compile( r"""
- ((?:\w|-)+
- (?:\[(?:\w|-)+\])?)
- """, re.VERBOSE )
-
-
-#
-# We collect macro names ending in `_H' (group 1), as defined in
-# `freetype/config/ftheader.h'. While outputting the object data, we use
-# this info together with the object's file location (group 2) to emit the
-# appropriate header file macro and its associated file name before the
-# object itself.
-#
-# Example:
-#
-# #define FT_FREETYPE_H <freetype.h>
-#
-re_header_macro = re.compile( r'^#define\s{1,}(\w{1,}_H)\s{1,}<(.*)>' )
-
-
-################################################################
-##
-## DOC CODE CLASS
-##
-## The `DocCode' class is used to store source code lines.
-##
-## `self.lines' contains a set of source code lines that will be dumped as
-## HTML in a <PRE> tag.
-##
-## The object is filled line by line by the parser; it strips the leading
-## `margin' space from each input line before storing it in `self.lines'.
-##
-class DocCode:
-
- def __init__( self, margin, lines ):
- self.lines = []
- self.words = None
-
- # remove margin spaces
- for l in lines:
- if string.strip( l[:margin] ) == "":
- l = l[margin:]
- self.lines.append( l )
-
- def dump( self, prefix = "", width = 60 ):
- lines = self.dump_lines( 0, width )
- for l in lines:
- print( prefix + l )
-
- def dump_lines( self, margin = 0, width = 60 ):
- result = []
- for l in self.lines:
- result.append( " " * margin + l )
- return result
-
-
-
-################################################################
-##
-## DOC PARA CLASS
-##
-## `Normal' text paragraphs are stored in the `DocPara' class.
-##
-## `self.words' contains the list of words that make up the paragraph.
-##
-class DocPara:
-
- def __init__( self, lines ):
- self.lines = None
- self.words = []
- for l in lines:
- l = string.strip( l )
- self.words.extend( string.split( l ) )
-
- def dump( self, prefix = "", width = 60 ):
- lines = self.dump_lines( 0, width )
- for l in lines:
- print( prefix + l )
-
- def dump_lines( self, margin = 0, width = 60 ):
- cur = "" # current line
- col = 0 # current width
- result = []
-
- for word in self.words:
- ln = len( word )
- if col > 0:
- ln = ln + 1
-
- if col + ln > width:
- result.append( " " * margin + cur )
- cur = word
- col = len( word )
- else:
- if col > 0:
- cur = cur + " "
- cur = cur + word
- col = col + ln
-
- if col > 0:
- result.append( " " * margin + cur )
-
- return result
-
-
-################################################################
-##
-## DOC FIELD CLASS
-##
-## The `DocField' class stores a list containing either `DocPara' or
-## `DocCode' objects. Each DocField object also has an optional `name'
-## that is used when the object corresponds to a field or value definition.
-##
-class DocField:
-
- def __init__( self, name, lines ):
- self.name = name # can be `None' for normal paragraphs/sources
- self.items = [] # list of items
-
- mode_none = 0 # start parsing mode
- mode_code = 1 # parsing code sequences
- mode_para = 3 # parsing normal paragraph
-
- margin = -1 # current code sequence indentation
- cur_lines = []
-
- # analyze the markup lines to check whether they contain paragraphs,
- # code sequences, or fields definitions
- #
- start = 0
- mode = mode_none
-
- for l in lines:
- # are we parsing a code sequence?
- if mode == mode_code:
- m = re_code_end.match( l )
- if m and len( m.group( 1 ) ) <= margin:
- # that's it, we finished the code sequence
- code = DocCode( 0, cur_lines )
- self.items.append( code )
- margin = -1
- cur_lines = []
- mode = mode_none
- else:
- # otherwise continue the code sequence
- cur_lines.append( l[margin:] )
- else:
- # start of code sequence?
- m = re_code_start.match( l )
- if m:
- # save current lines
- if cur_lines:
- para = DocPara( cur_lines )
- self.items.append( para )
- cur_lines = []
-
- # switch to code extraction mode
- margin = len( m.group( 1 ) )
- mode = mode_code
- else:
- if not string.split( l ) and cur_lines:
- # if the line is empty, we end the current paragraph,
- # if any
- para = DocPara( cur_lines )
- self.items.append( para )
- cur_lines = []
- else:
- # otherwise, simply add the line to the current
- # paragraph
- cur_lines.append( l )
-
- if mode == mode_code:
- # unexpected end of code sequence
- code = DocCode( margin, cur_lines )
- self.items.append( code )
- elif cur_lines:
- para = DocPara( cur_lines )
- self.items.append( para )
-
- def dump( self, prefix = "" ):
- if self.field:
- print( prefix + self.field + " ::" )
- prefix = prefix + "----"
-
- first = 1
- for p in self.items:
- if not first:
- print( "" )
- p.dump( prefix )
- first = 0
-
- def dump_lines( self, margin = 0, width = 60 ):
- result = []
- nl = None
-
- for p in self.items:
- if nl:
- result.append( "" )
-
- result.extend( p.dump_lines( margin, width ) )
- nl = 1
-
- return result
-
-
-#
-# A regular expression to detect field definitions.
-#
-# Examples:
-#
-# foo ::
-# foo.bar ::
-#
-re_field = re.compile( r"""
- \s*
- (
- \w*
- |
- \w (\w | \.)* \w
- )
- \s* ::
- """, re.VERBOSE )
-
-
-################################################################
-##
-## DOC MARKUP CLASS
-##
-class DocMarkup:
-
- def __init__( self, tag, lines ):
- self.tag = string.lower( tag )
- self.fields = []
-
- cur_lines = []
- field = None
- mode = 0
-
- for l in lines:
- m = re_field.match( l )
- if m:
- # We detected the start of a new field definition.
-
- # first, save the current one
- if cur_lines:
- f = DocField( field, cur_lines )
- self.fields.append( f )
- cur_lines = []
- field = None
-
- field = m.group( 1 ) # record field name
- ln = len( m.group( 0 ) )
- l = " " * ln + l[ln:]
- cur_lines = [l]
- else:
- cur_lines.append( l )
-
- if field or cur_lines:
- f = DocField( field, cur_lines )
- self.fields.append( f )
-
- def get_name( self ):
- try:
- return self.fields[0].items[0].words[0]
- except:
- return None
-
- def dump( self, margin ):
- print( " " * margin + "<" + self.tag + ">" )
- for f in self.fields:
- f.dump( " " )
- print( " " * margin + "</" + self.tag + ">" )
-
-
-################################################################
-##
-## DOC CHAPTER CLASS
-##
-class DocChapter:
-
- def __init__( self, block ):
- self.block = block
- self.sections = []
- if block:
- self.name = block.name
- self.title = block.get_markup_words( "title" )
- self.order = block.get_markup_words( "sections" )
- else:
- self.name = "Other"
- self.title = string.split( "Miscellaneous" )
- self.order = []
-
-
-################################################################
-##
-## DOC SECTION CLASS
-##
-class DocSection:
-
- def __init__( self, name = "Other" ):
- self.name = name
- self.blocks = {}
- self.block_names = [] # ordered block names in section
- self.defs = []
- self.abstract = ""
- self.description = ""
- self.order = []
- self.title = "ERROR"
- self.chapter = None
-
- def add_def( self, block ):
- self.defs.append( block )
-
- def add_block( self, block ):
- self.block_names.append( block.name )
- self.blocks[block.name] = block
-
- def process( self ):
- # look up one block that contains a valid section description
- for block in self.defs:
- title = block.get_markup_text( "title" )
- if title:
- self.title = title
- self.abstract = block.get_markup_words( "abstract" )
- self.description = block.get_markup_items( "description" )
- self.order = block.get_markup_words_all( "order" )
- return
-
- def reorder( self ):
- self.block_names = sort_order_list( self.block_names, self.order )
-
-
-################################################################
-##
-## CONTENT PROCESSOR CLASS
-##
-class ContentProcessor:
-
- def __init__( self ):
- """Initialize a block content processor."""
- self.reset()
-
- self.sections = {} # dictionary of documentation sections
- self.section = None # current documentation section
-
- self.chapters = [] # list of chapters
-
- self.headers = {} # dictionary of header macros
-
- def set_section( self, section_name ):
- """Set current section during parsing."""
- if not section_name in self.sections:
- section = DocSection( section_name )
- self.sections[section_name] = section
- self.section = section
- else:
- self.section = self.sections[section_name]
-
- def add_chapter( self, block ):
- chapter = DocChapter( block )
- self.chapters.append( chapter )
-
- def reset( self ):
- """Reset the content processor for a new block."""
- self.markups = []
- self.markup = None
- self.markup_lines = []
-
- def add_markup( self ):
- """Add a new markup section."""
- if self.markup and self.markup_lines:
-
- # get rid of last line of markup if it's empty
- marks = self.markup_lines
- if len( marks ) > 0 and not string.strip( marks[-1] ):
- self.markup_lines = marks[:-1]
-
- m = DocMarkup( self.markup, self.markup_lines )
-
- self.markups.append( m )
-
- self.markup = None
- self.markup_lines = []
-
- def process_content( self, content ):
- """Process a block content and return a list of DocMarkup objects
- corresponding to it."""
- markup = None
- markup_lines = []
- first = 1
-
- margin = -1
- in_code = 0
-
- for line in content:
- if in_code:
- m = re_code_end.match( line )
- if m and len( m.group( 1 ) ) <= margin:
- in_code = 0
- margin = -1
- else:
- m = re_code_start.match( line )
- if m:
- in_code = 1
- margin = len( m.group( 1 ) )
-
- found = None
-
- if not in_code:
- for t in re_markup_tags:
- m = t.match( line )
- if m:
- found = string.lower( m.group( 1 ) )
- prefix = len( m.group( 0 ) )
- # remove markup from line
- line = " " * prefix + line[prefix:]
- break
-
- # is it the start of a new markup section ?
- if found:
- first = 0
- self.add_markup() # add current markup content
- self.markup = found
- if len( string.strip( line ) ) > 0:
- self.markup_lines.append( line )
- elif first == 0:
- self.markup_lines.append( line )
-
- self.add_markup()
-
- return self.markups
-
- def parse_sources( self, source_processor ):
- blocks = source_processor.blocks
- count = len( blocks )
-
- for n in range( count ):
- source = blocks[n]
- if source.content:
- # this is a documentation comment, we need to catch
- # all following normal blocks in the "follow" list
- #
- follow = []
- m = n + 1
- while m < count and not blocks[m].content:
- follow.append( blocks[m] )
- m = m + 1
-
- doc_block = DocBlock( source, follow, self )
-
- def finish( self ):
- # process all sections to extract their abstract, description
- # and ordered list of items
- #
- for sec in self.sections.values():
- sec.process()
-
- # process chapters to check that all sections are correctly
- # listed there
- for chap in self.chapters:
- for sec in chap.order:
- if sec in self.sections:
- section = self.sections[sec]
- section.chapter = chap
- section.reorder()
- chap.sections.append( section )
- else:
- sys.stderr.write( "WARNING: chapter '" + \
- chap.name + "' in " + chap.block.location() + \
- " lists unknown section '" + sec + "'\n" )
-
- # check that all sections are in a chapter
- #
- others = []
- for sec in self.sections.values():
- if not sec.chapter:
- sec.reorder()
- others.append( sec )
-
- # create a new special chapter for all remaining sections
- # when necessary
- #
- if others:
- chap = DocChapter( None )
- chap.sections = others
- self.chapters.append( chap )
-
-
-################################################################
-##
-## DOC BLOCK CLASS
-##
-class DocBlock:
-
- def __init__( self, source, follow, processor ):
- processor.reset()
-
- self.source = source
- self.code = []
- self.type = "ERRTYPE"
- self.name = "ERRNAME"
- self.section = processor.section
- self.markups = processor.process_content( source.content )
-
- # compute block type from first markup tag
- try:
- self.type = self.markups[0].tag
- except:
- pass
-
- # compute block name from first markup paragraph
- try:
- markup = self.markups[0]
- para = markup.fields[0].items[0]
- name = para.words[0]
- m = re_identifier.match( name )
- if m:
- name = m.group( 1 )
- self.name = name
- except:
- pass
-
- if self.type == "section":
- # detect new section starts
- processor.set_section( self.name )
- processor.section.add_def( self )
- elif self.type == "chapter":
- # detect new chapter
- processor.add_chapter( self )
- else:
- processor.section.add_block( self )
-
- # now, compute the source lines relevant to this documentation
- # block. We keep normal comments in for obvious reasons (??)
- source = []
- for b in follow:
- if b.format:
- break
- for l in b.lines:
- # collect header macro definitions
- m = re_header_macro.match( l )
- if m:
- processor.headers[m.group( 2 )] = m.group( 1 );
-
- # we use "/* */" as a separator
- if re_source_sep.match( l ):
- break
- source.append( l )
-
- # now strip the leading and trailing empty lines from the sources
- start = 0
- end = len( source ) - 1
-
- while start < end and not string.strip( source[start] ):
- start = start + 1
-
- while start < end and not string.strip( source[end] ):
- end = end - 1
-
- if start == end and not string.strip( source[start] ):
- self.code = []
- else:
- self.code = source[start:end + 1]
-
- def location( self ):
- return self.source.location()
-
- def get_markup( self, tag_name ):
- """Return the DocMarkup corresponding to a given tag in a block."""
- for m in self.markups:
- if m.tag == string.lower( tag_name ):
- return m
- return None
-
- def get_markup_words( self, tag_name ):
- try:
- m = self.get_markup( tag_name )
- return m.fields[0].items[0].words
- except:
- return []
-
- def get_markup_words_all( self, tag_name ):
- try:
- m = self.get_markup( tag_name )
- words = []
- for item in m.fields[0].items:
- # We honour empty lines in an `<Order>' section element by
- # adding the sentinel `/empty/'. The formatter should then
- # convert it to an appropriate representation in the
- # `section_enter' function.
- words += item.words
- words.append( "/empty/" )
- return words
- except:
- return []
-
- def get_markup_text( self, tag_name ):
- result = self.get_markup_words( tag_name )
- return string.join( result )
-
- def get_markup_items( self, tag_name ):
- try:
- m = self.get_markup( tag_name )
- return m.fields[0].items
- except:
- return None
-
-# eof
diff --git a/src/3rdparty/freetype/src/tools/docmaker/docbeauty.py b/src/3rdparty/freetype/src/tools/docmaker/docbeauty.py
deleted file mode 100644
index 0b021fa6c9..0000000000
--- a/src/3rdparty/freetype/src/tools/docmaker/docbeauty.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-#
-# DocBeauty (c) 2003, 2004, 2008 David Turner <david@freetype.org>
-#
-# This program is used to beautify the documentation comments used
-# in the FreeType 2 public headers.
-#
-
-from sources import *
-from content import *
-from utils import *
-
-import sys, os, string, getopt
-
-
-content_processor = ContentProcessor()
-
-
-def beautify_block( block ):
- if block.content:
- content_processor.reset()
-
- markups = content_processor.process_content( block.content )
- text = []
- first = 1
-
- for markup in markups:
- text.extend( markup.beautify( first ) )
- first = 0
-
- # now beautify the documentation "borders" themselves
- lines = [" /*************************************************************************"]
- for l in text:
- lines.append( " *" + l )
- lines.append( " */" )
-
- block.lines = lines
-
-
-def usage():
- print( "\nDocBeauty 0.1 Usage information\n" )
- print( " docbeauty [options] file1 [file2 ...]\n" )
- print( "using the following options:\n" )
- print( " -h : print this page" )
- print( " -b : backup original files with the 'orig' extension" )
- print( "" )
- print( " --backup : same as -b" )
-
-
-def main( argv ):
- """main program loop"""
-
- global output_dir
-
- try:
- opts, args = getopt.getopt( sys.argv[1:], \
- "hb", \
- ["help", "backup"] )
- except getopt.GetoptError:
- usage()
- sys.exit( 2 )
-
- if args == []:
- usage()
- sys.exit( 1 )
-
- # process options
- #
- output_dir = None
- do_backup = None
-
- for opt in opts:
- if opt[0] in ( "-h", "--help" ):
- usage()
- sys.exit( 0 )
-
- if opt[0] in ( "-b", "--backup" ):
- do_backup = 1
-
- # create context and processor
- source_processor = SourceProcessor()
-
- # retrieve the list of files to process
- file_list = make_file_list( args )
- for filename in file_list:
- source_processor.parse_file( filename )
-
- for block in source_processor.blocks:
- beautify_block( block )
-
- new_name = filename + ".new"
- ok = None
-
- try:
- file = open( new_name, "wt" )
- for block in source_processor.blocks:
- for line in block.lines:
- file.write( line )
- file.write( "\n" )
- file.close()
- except:
- ok = 0
-
-
-# if called from the command line
-#
-if __name__ == '__main__':
- main( sys.argv )
-
-
-# eof
diff --git a/src/3rdparty/freetype/src/tools/docmaker/docmaker.py b/src/3rdparty/freetype/src/tools/docmaker/docmaker.py
deleted file mode 100644
index eb49afb0a0..0000000000
--- a/src/3rdparty/freetype/src/tools/docmaker/docmaker.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-#
-# docmaker.py
-#
-# Convert source code markup to HTML documentation.
-#
-# Copyright 2002-2018 by
-# David Turner.
-#
-# This file is part of the FreeType project, and may only be used,
-# modified, and distributed under the terms of the FreeType project
-# license, LICENSE.TXT. By continuing to use, modify, or distribute
-# this file you indicate that you have read the license and
-# understand and accept it fully.
-
-#
-# This program is a re-write of the original DocMaker tool used to generate
-# the API Reference of the FreeType font rendering engine by converting
-# in-source comments into structured HTML.
-#
-# This new version is capable of outputting XML data as well as accepting
-# more liberal formatting options. It also uses regular expression matching
-# and substitution to speed up operation significantly.
-#
-
-from sources import *
-from content import *
-from utils import *
-from formatter import *
-from tohtml import *
-
-import utils
-
-import sys, glob, getopt
-
-
-def usage():
- print( "\nDocMaker Usage information\n" )
- print( " docmaker [options] file1 [file2 ...]\n" )
- print( "using the following options:\n" )
- print( " -h : print this page" )
- print( " -t : set project title, as in '-t \"My Project\"'" )
- print( " -o : set output directory, as in '-o mydir'" )
- print( " -p : set documentation prefix, as in '-p ft2'" )
- print( "" )
- print( " --title : same as -t, as in '--title=\"My Project\"'" )
- print( " --output : same as -o, as in '--output=mydir'" )
- print( " --prefix : same as -p, as in '--prefix=ft2'" )
-
-
-def main( argv ):
- """Main program loop."""
-
- global output_dir
-
- try:
- opts, args = getopt.getopt( sys.argv[1:],
- "ht:o:p:",
- ["help", "title=", "output=", "prefix="] )
- except getopt.GetoptError:
- usage()
- sys.exit( 2 )
-
- if args == []:
- usage()
- sys.exit( 1 )
-
- # process options
- project_title = "Project"
- project_prefix = None
- output_dir = None
-
- for opt in opts:
- if opt[0] in ( "-h", "--help" ):
- usage()
- sys.exit( 0 )
-
- if opt[0] in ( "-t", "--title" ):
- project_title = opt[1]
-
- if opt[0] in ( "-o", "--output" ):
- utils.output_dir = opt[1]
-
- if opt[0] in ( "-p", "--prefix" ):
- project_prefix = opt[1]
-
- check_output()
-
- # create context and processor
- source_processor = SourceProcessor()
- content_processor = ContentProcessor()
-
- # retrieve the list of files to process
- file_list = make_file_list( args )
- for filename in file_list:
- source_processor.parse_file( filename )
- content_processor.parse_sources( source_processor )
-
- # process sections
- content_processor.finish()
-
- formatter = HtmlFormatter( content_processor,
- project_title,
- project_prefix )
-
- formatter.toc_dump()
- formatter.index_dump()
- formatter.section_dump_all()
-
-
-# if called from the command line
-if __name__ == '__main__':
- main( sys.argv )
-
-# eof
diff --git a/src/3rdparty/freetype/src/tools/docmaker/formatter.py b/src/3rdparty/freetype/src/tools/docmaker/formatter.py
deleted file mode 100644
index 2708fd40d6..0000000000
--- a/src/3rdparty/freetype/src/tools/docmaker/formatter.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#
-# formatter.py
-#
-# Convert parsed content blocks to a structured document (library file).
-#
-# Copyright 2002-2018 by
-# David Turner.
-#
-# This file is part of the FreeType project, and may only be used,
-# modified, and distributed under the terms of the FreeType project
-# license, LICENSE.TXT. By continuing to use, modify, or distribute
-# this file you indicate that you have read the license and
-# understand and accept it fully.
-
-#
-# This is the base Formatter class. Its purpose is to convert a content
-# processor's data into specific documents (i.e., table of contents, global
-# index, and individual API reference indices).
-#
-# You need to sub-class it to output anything sensible. For example, the
-# file `tohtml.py' contains the definition of the `HtmlFormatter' sub-class
-# to output HTML.
-#
-
-
-from sources import *
-from content import *
-from utils import *
-
-
-################################################################
-##
-## FORMATTER CLASS
-##
-class Formatter:
-
- def __init__( self, processor ):
- self.processor = processor
- self.identifiers = {}
- self.chapters = processor.chapters
- self.sections = processor.sections.values()
- self.block_index = []
-
- # store all blocks in a dictionary
- self.blocks = []
- for section in self.sections:
- for block in section.blocks.values():
- self.add_identifier( block.name, block )
-
- # add enumeration values to the index, since this is useful
- for markup in block.markups:
- if markup.tag == 'values':
- for field in markup.fields:
- self.add_identifier( field.name, block )
-
- self.block_index = self.identifiers.keys()
- self.block_index.sort( key = index_key )
-
- # also add section names to dictionary (without making them appear
- # in the index)
- for section in self.sections:
- self.add_identifier( section.name, section )
-
- def add_identifier( self, name, block ):
- if name in self.identifiers:
- # duplicate name!
- sys.stderr.write( "WARNING: duplicate definition for"
- + " '" + name + "' "
- + "in " + block.location() + ", "
- + "previous definition in "
- + self.identifiers[name].location()
- + "\n" )
- else:
- self.identifiers[name] = block
-
- #
- # formatting the table of contents
- #
- def toc_enter( self ):
- pass
-
- def toc_chapter_enter( self, chapter ):
- pass
-
- def toc_section_enter( self, section ):
- pass
-
- def toc_section_exit( self, section ):
- pass
-
- def toc_chapter_exit( self, chapter ):
- pass
-
- def toc_index( self, index_filename ):
- pass
-
- def toc_exit( self ):
- pass
-
- def toc_dump( self, toc_filename = None, index_filename = None ):
- output = None
- if toc_filename:
- output = open_output( toc_filename )
-
- self.toc_enter()
-
- for chap in self.processor.chapters:
-
- self.toc_chapter_enter( chap )
-
- for section in chap.sections:
- self.toc_section_enter( section )
- self.toc_section_exit( section )
-
- self.toc_chapter_exit( chap )
-
- self.toc_index( index_filename )
-
- self.toc_exit()
-
- if output:
- close_output( output )
-
- #
- # formatting the index
- #
- def index_enter( self ):
- pass
-
- def index_name_enter( self, name ):
- pass
-
- def index_name_exit( self, name ):
- pass
-
- def index_exit( self ):
- pass
-
- def index_dump( self, index_filename = None ):
- output = None
- if index_filename:
- output = open_output( index_filename )
-
- self.index_enter()
-
- for name in self.block_index:
- self.index_name_enter( name )
- self.index_name_exit( name )
-
- self.index_exit()
-
- if output:
- close_output( output )
-
- #
- # formatting a section
- #
- def section_enter( self, section ):
- pass
-
- def block_enter( self, block ):
- pass
-
- def markup_enter( self, markup, block = None ):
- pass
-
- def field_enter( self, field, markup = None, block = None ):
- pass
-
- def field_exit( self, field, markup = None, block = None ):
- pass
-
- def markup_exit( self, markup, block = None ):
- pass
-
- def block_exit( self, block ):
- pass
-
- def section_exit( self, section ):
- pass
-
- def section_dump( self, section, section_filename = None ):
- output = None
- if section_filename:
- output = open_output( section_filename )
-
- self.section_enter( section )
-
- for name in section.block_names:
- skip_entry = 0
- try:
- block = self.identifiers[name]
- # `block_names' can contain field names also,
- # which we filter out
- for markup in block.markups:
- if markup.tag == 'values':
- for field in markup.fields:
- if field.name == name:
- skip_entry = 1
- except:
- skip_entry = 1 # this happens e.g. for `/empty/' entries
-
- if skip_entry:
- continue
-
- self.block_enter( block )
-
- for markup in block.markups[1:]: # always ignore first markup!
- self.markup_enter( markup, block )
-
- for field in markup.fields:
- self.field_enter( field, markup, block )
- self.field_exit( field, markup, block )
-
- self.markup_exit( markup, block )
-
- self.block_exit( block )
-
- self.section_exit( section )
-
- if output:
- close_output( output )
-
- def section_dump_all( self ):
- for section in self.sections:
- self.section_dump( section )
-
-# eof
diff --git a/src/3rdparty/freetype/src/tools/docmaker/sources.py b/src/3rdparty/freetype/src/tools/docmaker/sources.py
deleted file mode 100644
index e3b95e0faa..0000000000
--- a/src/3rdparty/freetype/src/tools/docmaker/sources.py
+++ /dev/null
@@ -1,410 +0,0 @@
-#
-# sources.py
-#
-# Convert source code comments to multi-line blocks (library file).
-#
-# Copyright 2002-2018 by
-# David Turner.
-#
-# This file is part of the FreeType project, and may only be used,
-# modified, and distributed under the terms of the FreeType project
-# license, LICENSE.TXT. By continuing to use, modify, or distribute
-# this file you indicate that you have read the license and
-# understand and accept it fully.
-
-#
-# This library file contains definitions of classes needed to decompose C
-# source code files into a series of multi-line `blocks'. There are two
-# kinds of blocks.
-#
-# - Normal blocks, which contain source code or ordinary comments.
-#
-# - Documentation blocks, which have restricted formatting, and whose text
-# always start with a documentation markup tag like `<Function>',
-# `<Type>', etc.
-#
-# The routines to process the content of documentation blocks are contained
-# in file `content.py'; the classes and methods found here only deal with
-# text parsing and basic documentation block extraction.
-#
-
-
-import fileinput, re, string
-
-
-################################################################
-##
-## SOURCE BLOCK FORMAT CLASS
-##
-## A simple class containing compiled regular expressions to detect
-## potential documentation format block comments within C source code.
-##
-## The `column' pattern must contain a group to `unbox' the content of
-## documentation comment blocks.
-##
-## Later on, paragraphs are converted to long lines, which simplifies the
-## regular expressions that act upon the text.
-##
-class SourceBlockFormat:
-
- def __init__( self, id, start, column, end ):
- """Create a block pattern, used to recognize special documentation
- blocks."""
- self.id = id
- self.start = re.compile( start, re.VERBOSE )
- self.column = re.compile( column, re.VERBOSE )
- self.end = re.compile( end, re.VERBOSE )
-
-
-#
-# Format 1 documentation comment blocks.
-#
-# /************************************/ (at least 2 asterisks)
-# /* */
-# /* */
-# /* */
-# /************************************/ (at least 2 asterisks)
-#
-start = r'''
- \s* # any number of whitespace
- /\*{2,}/ # followed by '/' and at least two asterisks then '/'
- \s*$ # probably followed by whitespace
-'''
-
-column = r'''
- \s* # any number of whitespace
- /\*{1} # followed by '/' and precisely one asterisk
- ([^*].*) # followed by anything (group 1)
- \*{1}/ # followed by one asterisk and a '/'
- \s*$ # probably followed by whitespace
-'''
-
-re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
-
-
-#
-# Format 2 documentation comment blocks.
-#
-# /************************************ (at least 2 asterisks)
-# *
-# * (1 asterisk)
-# *
-# */ (1 or more asterisks)
-#
-start = r'''
- \s* # any number of whitespace
- /\*{2,} # followed by '/' and at least two asterisks
- \s*$ # probably followed by whitespace
-'''
-
-column = r'''
- \s* # any number of whitespace
- \*{1}(?![*/]) # followed by precisely one asterisk not followed by `/'
- (.*) # then anything (group1)
-'''
-
-end = r'''
- \s* # any number of whitespace
- \*+/ # followed by at least one asterisk, then '/'
-'''
-
-re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
-
-
-#
-# The list of supported documentation block formats. We could add new ones
-# quite easily.
-#
-re_source_block_formats = [re_source_block_format1, re_source_block_format2]
-
-
-#
-# The following regular expressions correspond to markup tags within the
-# documentation comment blocks. They are equivalent despite their different
-# syntax.
-#
-# A markup tag consists of letters or character `-', to be found in group 1.
-#
-# Notice that a markup tag _must_ begin a new paragraph.
-#
-re_markup_tag1 = re.compile( r'''\s*<((?:\w|-)*)>''' ) # <xxxx> format
-re_markup_tag2 = re.compile( r'''\s*@((?:\w|-)*):''' ) # @xxxx: format
-
-#
-# The list of supported markup tags. We could add new ones quite easily.
-#
-re_markup_tags = [re_markup_tag1, re_markup_tag2]
-
-
-#
-# A regular expression to detect a cross reference, after markup tags have
-# been stripped off.
-#
-# Two syntax forms are supported:
-#
-# @<name>
-# @<name>[<id>]
-#
-# where both `<name>' and `<id>' consist of alphanumeric characters, `_',
-# and `-'. Use `<id>' if there are multiple, valid `<name>' entries.
-#
-# Example: @foo[bar]
-#
-re_crossref = re.compile( r"""
- @
- (?P<name>(?:\w|-)+
- (?:\[(?:\w|-)+\])?)
- (?P<rest>.*)
- """, re.VERBOSE )
-
-#
-# Two regular expressions to detect italic and bold markup, respectively.
-# Group 1 is the markup, group 2 the rest of the line.
-#
-# Note that the markup is limited to words consisting of letters, digits,
-# the characters `_' and `-', or an apostrophe (but not as the first
-# character).
-#
-re_italic = re.compile( r"_((?:\w|-)(?:\w|'|-)*)_(.*)" ) # _italic_
-re_bold = re.compile( r"\*((?:\w|-)(?:\w|'|-)*)\*(.*)" ) # *bold*
-
-#
-# This regular expression code to identify an URL has been taken from
-#
-# https://mail.python.org/pipermail/tutor/2002-September/017228.html
-#
-# (with slight modifications).
-#
-urls = r'(?:https?|telnet|gopher|file|wais|ftp)'
-ltrs = r'\w'
-gunk = r'/#~:.?+=&%@!\-'
-punc = r'.:?\-'
-any = "%(ltrs)s%(gunk)s%(punc)s" % { 'ltrs' : ltrs,
- 'gunk' : gunk,
- 'punc' : punc }
-url = r"""
- (
- \b # start at word boundary
- %(urls)s : # need resource and a colon
- [%(any)s] +? # followed by one or more of any valid
- # character, but be conservative and
- # take only what you need to...
- (?= # [look-ahead non-consumptive assertion]
- [%(punc)s]* # either 0 or more punctuation
- (?: # [non-grouping parentheses]
- [^%(any)s] | $ # followed by a non-url char
- # or end of the string
- )
- )
- )
- """ % {'urls' : urls,
- 'any' : any,
- 'punc' : punc }
-
-re_url = re.compile( url, re.VERBOSE | re.MULTILINE )
-
-#
-# A regular expression that stops collection of comments for the current
-# block.
-#
-re_source_sep = re.compile( r'\s*/\*\s*\*/' ) # /* */
-
-#
-# A regular expression to find possible C identifiers while outputting
-# source code verbatim, covering things like `*foo' or `(bar'. Group 1 is
-# the prefix, group 2 the identifier -- since we scan lines from left to
-# right, sequentially splitting the source code into prefix and identifier
-# is fully sufficient for our purposes.
-#
-re_source_crossref = re.compile( r'(\W*)(\w*)' )
-
-#
-# A regular expression that matches a list of reserved C source keywords.
-#
-re_source_keywords = re.compile( '''\\b ( typedef |
- struct |
- enum |
- union |
- const |
- char |
- int |
- short |
- long |
- void |
- signed |
- unsigned |
- \#include |
- \#define |
- \#undef |
- \#if |
- \#ifdef |
- \#ifndef |
- \#else |
- \#endif ) \\b''', re.VERBOSE )
-
-
-################################################################
-##
-## SOURCE BLOCK CLASS
-##
-## There are two important fields in a `SourceBlock' object.
-##
-## self.lines
-## A list of text lines for the corresponding block.
-##
-## self.content
-## For documentation comment blocks only, this is the block content
-## that has been `unboxed' from its decoration. This is `None' for all
-## other blocks (i.e., sources or ordinary comments with no starting
-## markup tag)
-##
-class SourceBlock:
-
- def __init__( self, processor, filename, lineno, lines ):
- self.processor = processor
- self.filename = filename
- self.lineno = lineno
- self.lines = lines[:]
- self.format = processor.format
- self.content = []
-
- if self.format == None:
- return
-
- words = []
-
- # extract comment lines
- lines = []
-
- for line0 in self.lines:
- m = self.format.column.match( line0 )
- if m:
- lines.append( m.group( 1 ) )
-
- # now, look for a markup tag
- for l in lines:
- l = string.strip( l )
- if len( l ) > 0:
- for tag in re_markup_tags:
- if tag.match( l ):
- self.content = lines
- return
-
- def location( self ):
- return "(" + self.filename + ":" + repr( self.lineno ) + ")"
-
- # debugging only -- not used in normal operations
- def dump( self ):
- if self.content:
- print( "{{{content start---" )
- for l in self.content:
- print( l )
- print( "---content end}}}" )
- return
-
- fmt = ""
- if self.format:
- fmt = repr( self.format.id ) + " "
-
- for line in self.lines:
- print( line )
-
-
-################################################################
-##
-## SOURCE PROCESSOR CLASS
-##
-## The `SourceProcessor' is in charge of reading a C source file and
-## decomposing it into a series of different `SourceBlock' objects.
-##
-## A SourceBlock object consists of the following data.
-##
-## - A documentation comment block using one of the layouts above. Its
-## exact format will be discussed later.
-##
-## - Normal sources lines, including comments.
-##
-##
-class SourceProcessor:
-
- def __init__( self ):
- """Initialize a source processor."""
- self.blocks = []
- self.filename = None
- self.format = None
- self.lines = []
-
- def reset( self ):
- """Reset a block processor and clean up all its blocks."""
- self.blocks = []
- self.format = None
-
- def parse_file( self, filename ):
- """Parse a C source file and add its blocks to the processor's
- list."""
- self.reset()
-
- self.filename = filename
-
- fileinput.close()
- self.format = None
- self.lineno = 0
- self.lines = []
-
- for line in fileinput.input( filename ):
- # strip trailing newlines, important on Windows machines!
- if line[-1] == '\012':
- line = line[0:-1]
-
- if self.format == None:
- self.process_normal_line( line )
- else:
- if self.format.end.match( line ):
- # A normal block end. Add it to `lines' and create a
- # new block
- self.lines.append( line )
- self.add_block_lines()
- elif self.format.column.match( line ):
- # A normal column line. Add it to `lines'.
- self.lines.append( line )
- else:
- # An unexpected block end. Create a new block, but
- # don't process the line.
- self.add_block_lines()
-
- # we need to process the line again
- self.process_normal_line( line )
-
- # record the last lines
- self.add_block_lines()
-
- def process_normal_line( self, line ):
- """Process a normal line and check whether it is the start of a new
- block."""
- for f in re_source_block_formats:
- if f.start.match( line ):
- self.add_block_lines()
- self.format = f
- self.lineno = fileinput.filelineno()
-
- self.lines.append( line )
-
- def add_block_lines( self ):
- """Add the current accumulated lines and create a new block."""
- if self.lines != []:
- block = SourceBlock( self,
- self.filename,
- self.lineno,
- self.lines )
-
- self.blocks.append( block )
- self.format = None
- self.lines = []
-
- # debugging only, not used in normal operations
- def dump( self ):
- """Print all blocks in a processor."""
- for b in self.blocks:
- b.dump()
-
-# eof
diff --git a/src/3rdparty/freetype/src/tools/docmaker/tohtml.py b/src/3rdparty/freetype/src/tools/docmaker/tohtml.py
deleted file mode 100644
index 9f318a2a49..0000000000
--- a/src/3rdparty/freetype/src/tools/docmaker/tohtml.py
+++ /dev/null
@@ -1,725 +0,0 @@
-#
-# tohtml.py
-#
-# A sub-class container of the `Formatter' class to produce HTML.
-#
-# Copyright 2002-2018 by
-# David Turner.
-#
-# This file is part of the FreeType project, and may only be used,
-# modified, and distributed under the terms of the FreeType project
-# license, LICENSE.TXT. By continuing to use, modify, or distribute
-# this file you indicate that you have read the license and
-# understand and accept it fully.
-
-# The parent class is contained in file `formatter.py'.
-
-
-from sources import *
-from content import *
-from formatter import *
-
-import time
-
-
-# The following strings define the HTML header used by all generated pages.
-html_header_1 = """\
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
-"https://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
-<title>\
-"""
-
-html_header_2 = """\
- API Reference</title>
-<style type="text/css">
- a:link { color: #0000EF; }
- a:visited { color: #51188E; }
- a:hover { color: #FF0000; }
-
- body { font-family: Verdana, Geneva, Arial, Helvetica, serif;
- color: #000000;
- background: #FFFFFF;
- width: 87%;
- margin: auto; }
-
- div.section { width: 75%;
- margin: auto; }
- div.section hr { margin: 4ex 0 1ex 0; }
- div.section h4 { background-color: #EEEEFF;
- font-size: medium;
- font-style: oblique;
- font-weight: bold;
- margin: 3ex 0 1.5ex 9%;
- padding: 0.3ex 0 0.3ex 1%; }
- div.section p { margin: 1.5ex 0 1.5ex 10%; }
- div.section pre { margin: 3ex 0 3ex 9%;
- background-color: #D6E8FF;
- padding: 2ex 0 2ex 1%; }
- div.section table.fields { width: 90%;
- margin: 1.5ex 0 1.5ex 10%; }
- div.section table.toc { width: 95%;
- margin: 1.5ex 0 1.5ex 5%; }
- div.timestamp { text-align: center;
- font-size: 69%;
- margin: 1.5ex 0 1.5ex 0; }
-
- h1 { text-align: center; }
- h3 { font-size: medium;
- margin: 4ex 0 1.5ex 0; }
-
- p { text-align: justify; }
-
- pre.colored { color: blue; }
-
- span.keyword { font-family: monospace;
- text-align: left;
- white-space: pre;
- color: darkblue; }
-
- table.fields td.val { font-weight: bold;
- text-align: right;
- width: 30%;
- vertical-align: baseline;
- padding: 1ex 1em 1ex 0; }
- table.fields td.desc { vertical-align: baseline;
- padding: 1ex 0 1ex 1em; }
- table.fields td.desc p:first-child { margin: 0; }
- table.fields td.desc p { margin: 1.5ex 0 0 0; }
- table.index { margin: 6ex auto 6ex auto;
- border: 0;
- border-collapse: separate;
- border-spacing: 1em 0.3ex; }
- table.index tr { padding: 0; }
- table.index td { padding: 0; }
- table.index-toc-link { width: 100%;
- border: 0;
- border-spacing: 0;
- margin: 1ex 0 1ex 0; }
- table.index-toc-link td.left { padding: 0 0.5em 0 0.5em;
- font-size: 83%;
- text-align: left; }
- table.index-toc-link td.middle { padding: 0 0.5em 0 0.5em;
- font-size: 83%;
- text-align: center; }
- table.index-toc-link td.right { padding: 0 0.5em 0 0.5em;
- font-size: 83%;
- text-align: right; }
- table.synopsis { margin: 6ex auto 6ex auto;
- border: 0;
- border-collapse: separate;
- border-spacing: 2em 0.6ex; }
- table.synopsis tr { padding: 0; }
- table.synopsis td { padding: 0; }
- table.toc td.link { width: 30%;
- text-align: right;
- vertical-align: baseline;
- padding: 1ex 1em 1ex 0; }
- table.toc td.desc { vertical-align: baseline;
- padding: 1ex 0 1ex 1em;
- text-align: left; }
- table.toc td.desc p:first-child { margin: 0;
- text-align: left; }
- table.toc td.desc p { margin: 1.5ex 0 0 0;
- text-align: left; }
-
-</style>
-</head>
-<body>
-"""
-
-html_header_3l = """
-<table class="index-toc-link"><tr><td class="left">[<a href="\
-"""
-
-html_header_3r = """
-<table class="index-toc-link"><tr><td class="right">[<a href="\
-"""
-
-html_header_4 = """\
-">Index</a>]</td><td class="right">[<a href="\
-"""
-
-html_header_5t = """\
-">TOC</a>]</td></tr></table>
-<h1>\
-"""
-
-html_header_5i = """\
-">Index</a>]</td></tr></table>
-<h1>\
-"""
-
-html_header_6 = """\
- API Reference</h1>
-"""
-
-
-# The HTML footer used by all generated pages.
-html_footer = """\
-</body>
-</html>\
-"""
-
-# The header and footer used for each section.
-section_title_header1 = '<h1 id="'
-section_title_header2 = '">'
-section_title_footer = "</h1>"
-
-# The header and footer used for code segments.
-code_header = '<pre class="colored">'
-code_footer = '</pre>'
-
-# Paragraph header and footer.
-para_header = "<p>"
-para_footer = "</p>"
-
-# Block header and footer.
-block_header = '<div class="section">'
-block_footer_start = """\
-<hr>
-<table class="index-toc-link"><tr><td class="left">[<a href="\
-"""
-block_footer_middle = """\
-">Index</a>]</td>\
-<td class="middle">[<a href="#">Top</a>]</td>\
-<td class="right">[<a href="\
-"""
-block_footer_end = """\
-">TOC</a>]</td></tr></table></div>
-"""
-
-# Description header/footer.
-description_header = ""
-description_footer = ""
-
-# Marker header/inter/footer combination.
-marker_header = "<h4>"
-marker_inter = "</h4>"
-marker_footer = ""
-
-# Header location header/footer.
-header_location_header = "<p>"
-header_location_footer = "</p>"
-
-# Source code extracts header/footer.
-source_header = "<pre>"
-source_footer = "</pre>"
-
-# Chapter header/inter/footer.
-chapter_header = """\
-<div class="section">
-<h2>\
-"""
-chapter_inter = '</h2>'
-chapter_footer = '</div>'
-
-# Index footer.
-index_footer_start = """\
-<hr>
-<table class="index-toc-link"><tr><td class="right">[<a href="\
-"""
-index_footer_end = """\
-">TOC</a>]</td></tr></table>
-"""
-
-# TOC footer.
-toc_footer_start = """\
-<hr>
-<table class="index-toc-link"><tr><td class="left">[<a href="\
-"""
-toc_footer_end = """\
-">Index</a>]</td></tr></table>
-"""
-
-
-# Source language keyword coloration and styling.
-keyword_prefix = '<span class="keyword">'
-keyword_suffix = '</span>'
-
-section_synopsis_header = '<h2>Synopsis</h2>'
-section_synopsis_footer = ''
-
-
-# Translate a single line of source to HTML. This converts `<', `>', and
-# `&' into `&lt;',`&gt;', and `&amp;'.
-#
-def html_quote( line ):
- result = string.replace( line, "&", "&amp;" )
- result = string.replace( result, "<", "&lt;" )
- result = string.replace( result, ">", "&gt;" )
- return result
-
-
-################################################################
-##
-## HTML FORMATTER CLASS
-##
-class HtmlFormatter( Formatter ):
-
- def __init__( self, processor, project_title, file_prefix ):
- Formatter.__init__( self, processor )
-
- global html_header_1
- global html_header_2
- global html_header_3l, html_header_3r
- global html_header_4
- global html_header_5t, html_header_5i
- global html_header_6
- global html_footer
-
- if file_prefix:
- file_prefix = file_prefix + "-"
- else:
- file_prefix = ""
-
- self.headers = processor.headers
- self.project_title = project_title
- self.file_prefix = file_prefix
- self.html_header = (
- html_header_1 + project_title
- + html_header_2
- + html_header_3l + file_prefix + "index.html"
- + html_header_4 + file_prefix + "toc.html"
- + html_header_5t + project_title
- + html_header_6 )
- self.html_index_header = (
- html_header_1 + project_title
- + html_header_2
- + html_header_3r + file_prefix + "toc.html"
- + html_header_5t + project_title
- + html_header_6 )
- self.html_toc_header = (
- html_header_1 + project_title
- + html_header_2
- + html_header_3l + file_prefix + "index.html"
- + html_header_5i + project_title
- + html_header_6 )
- self.html_footer = (
- '<div class="timestamp">generated on '
- + time.asctime( time.localtime( time.time() ) )
- + "</div>" + html_footer )
-
- self.columns = 3
-
- def make_section_url( self, section ):
- return self.file_prefix + section.name + ".html"
-
- def make_block_url( self, block, name = None ):
- if name == None:
- name = block.name
-
- try:
- section_url = self.make_section_url( block.section )
- except:
- # we already have a section
- section_url = self.make_section_url( block )
-
- return section_url + "#" + name
-
- def make_html_word( self, word ):
- """Analyze a simple word to detect cross-references and markup."""
- # handle cross-references
- m = re_crossref.match( word )
- if m:
- try:
- name = m.group( 'name' )
- rest = m.group( 'rest' )
- block = self.identifiers[name]
- url = self.make_block_url( block )
- # display `foo[bar]' as `foo'
- name = re.sub( r'\[.*\]', '', name )
- # normalize url, following RFC 3986
- url = string.replace( url, "[", "(" )
- url = string.replace( url, "]", ")" )
-
- try:
- # for sections, display title
- url = ( '&lsquo;<a href="' + url + '">'
- + block.title + '</a>&rsquo;'
- + rest )
- except:
- url = ( '<a href="' + url + '">'
- + name + '</a>'
- + rest )
-
- return url
- except:
- # we detected a cross-reference to an unknown item
- sys.stderr.write( "WARNING: undefined cross reference"
- + " '" + name + "'.\n" )
- return '?' + name + '?' + rest
-
- # handle markup for italic and bold
- m = re_italic.match( word )
- if m:
- name = m.group( 1 )
- rest = m.group( 2 )
- return '<i>' + name + '</i>' + rest
-
- m = re_bold.match( word )
- if m:
- name = m.group( 1 )
- rest = m.group( 2 )
- return '<b>' + name + '</b>' + rest
-
- return html_quote( word )
-
- def make_html_para( self, words ):
- """Convert words of a paragraph into tagged HTML text. Also handle
- cross references."""
- line = ""
- if words:
- line = self.make_html_word( words[0] )
- for word in words[1:]:
- line = line + " " + self.make_html_word( word )
- # handle hyperlinks
- line = re_url.sub( r'<a href="\1">\1</a>', line )
- # convert `...' quotations into real left and right single quotes
- line = re.sub( r"(^|\W)`(.*?)'(\W|$)",
- r'\1&lsquo;\2&rsquo;\3',
- line )
- # convert tilde into non-breakable space
- line = string.replace( line, "~", "&nbsp;" )
-
- return para_header + line + para_footer
-
- def make_html_code( self, lines ):
- """Convert a code sequence to HTML."""
- line = code_header + '\n'
- for l in lines:
- line = line + html_quote( l ).rstrip() + '\n'
-
- return line + code_footer
-
- def make_html_items( self, items ):
- """Convert a field's content into HTML."""
- lines = []
- for item in items:
- if item.lines:
- lines.append( self.make_html_code( item.lines ) )
- else:
- lines.append( self.make_html_para( item.words ) )
-
- return string.join( lines, '\n' )
-
- def print_html_items( self, items ):
- print( self.make_html_items( items ) )
-
- def print_html_field( self, field ):
- if field.name:
- print( '<table><tr valign="top"><td><b>'
- + field.name
- + "</b></td><td>" )
-
- print( self.make_html_items( field.items ) )
-
- if field.name:
- print( "</td></tr></table>" )
-
- def html_source_quote( self, line, block_name = None ):
- result = ""
- while line:
- m = re_source_crossref.match( line )
- if m:
- name = m.group( 2 )
- prefix = html_quote( m.group( 1 ) )
- length = len( m.group( 0 ) )
-
- if name == block_name:
- # this is the current block name, if any
- result = result + prefix + '<b>' + name + '</b>'
- elif re_source_keywords.match( name ):
- # this is a C keyword
- result = ( result + prefix
- + keyword_prefix + name + keyword_suffix )
- elif name in self.identifiers:
- # this is a known identifier
- block = self.identifiers[name]
- id = block.name
-
- # link to a field ID if possible
- try:
- for markup in block.markups:
- if markup.tag == 'values':
- for field in markup.fields:
- if field.name:
- id = name
-
- result = ( result + prefix
- + '<a href="'
- + self.make_block_url( block, id )
- + '">' + name + '</a>' )
- except:
- # sections don't have `markups'; however, we don't
- # want references to sections here anyway
- result = result + html_quote( line[:length] )
-
- else:
- result = result + html_quote( line[:length] )
-
- line = line[length:]
- else:
- result = result + html_quote( line )
- line = []
-
- return result
-
- def print_html_field_list( self, fields ):
- print( '<table class="fields">' )
- for field in fields:
- print( '<tr><td class="val" id="' + field.name + '">'
- + field.name
- + '</td><td class="desc">' )
- self.print_html_items( field.items )
- print( "</td></tr>" )
- print( "</table>" )
-
- def print_html_markup( self, markup ):
- table_fields = []
- for field in markup.fields:
- if field.name:
- # We begin a new series of field or value definitions. We
- # record them in the `table_fields' list before outputting
- # all of them as a single table.
- table_fields.append( field )
- else:
- if table_fields:
- self.print_html_field_list( table_fields )
- table_fields = []
-
- self.print_html_items( field.items )
-
- if table_fields:
- self.print_html_field_list( table_fields )
-
- #
- # formatting the index
- #
- def index_enter( self ):
- print( self.html_index_header )
- self.index_items = {}
-
- def index_name_enter( self, name ):
- block = self.identifiers[name]
- url = self.make_block_url( block )
- self.index_items[name] = url
-
- def index_exit( self ):
- # `block_index' already contains the sorted list of index names
- count = len( self.block_index )
- rows = ( count + self.columns - 1 ) // self.columns
-
- print( '<table class="index">' )
- for r in range( rows ):
- line = "<tr>"
- for c in range( self.columns ):
- i = r + c * rows
- if i < count:
- bname = self.block_index[r + c * rows]
- url = self.index_items[bname]
- # display `foo[bar]' as `foo (bar)'
- bname = string.replace( bname, "[", " (" )
- bname = string.replace( bname, "]", ")" )
- # normalize url, following RFC 3986
- url = string.replace( url, "[", "(" )
- url = string.replace( url, "]", ")" )
- line = ( line + '<td><a href="' + url + '">'
- + bname + '</a></td>' )
- else:
- line = line + '<td></td>'
- line = line + "</tr>"
- print( line )
-
- print( "</table>" )
-
- print( index_footer_start
- + self.file_prefix + "toc.html"
- + index_footer_end )
-
- print( self.html_footer )
-
- self.index_items = {}
-
- def index_dump( self, index_filename = None ):
- if index_filename == None:
- index_filename = self.file_prefix + "index.html"
-
- Formatter.index_dump( self, index_filename )
-
- #
- # formatting the table of contents
- #
- def toc_enter( self ):
- print( self.html_toc_header )
- print( "<h1>Table of Contents</h1>" )
-
- def toc_chapter_enter( self, chapter ):
- print( chapter_header + string.join( chapter.title ) + chapter_inter )
- print( '<table class="toc">' )
-
- def toc_section_enter( self, section ):
- print( '<tr><td class="link">'
- + '<a href="' + self.make_section_url( section ) + '">'
- + section.title + '</a></td><td class="desc">' )
- print( self.make_html_para( section.abstract ) )
-
- def toc_section_exit( self, section ):
- print( "</td></tr>" )
-
- def toc_chapter_exit( self, chapter ):
- print( "</table>" )
- print( chapter_footer )
-
- def toc_index( self, index_filename ):
- print( chapter_header
- + '<a href="' + index_filename + '">Global Index</a>'
- + chapter_inter + chapter_footer )
-
- def toc_exit( self ):
- print( toc_footer_start
- + self.file_prefix + "index.html"
- + toc_footer_end )
-
- print( self.html_footer )
-
- def toc_dump( self, toc_filename = None, index_filename = None ):
- if toc_filename == None:
- toc_filename = self.file_prefix + "toc.html"
-
- if index_filename == None:
- index_filename = self.file_prefix + "index.html"
-
- Formatter.toc_dump( self, toc_filename, index_filename )
-
- #
- # formatting sections
- #
- def section_enter( self, section ):
- print( self.html_header )
-
- print( section_title_header1 + section.name + section_title_header2
- + section.title
- + section_title_footer )
-
- maxwidth = 0
- for b in section.blocks.values():
- if len( b.name ) > maxwidth:
- maxwidth = len( b.name )
-
- width = 70 # XXX magic number
- if maxwidth > 0:
- # print section synopsis
- print( section_synopsis_header )
- print( '<table class="synopsis">' )
-
- columns = width // maxwidth
- if columns < 1:
- columns = 1
-
- count = len( section.block_names )
- # don't handle last entry if it is empty
- if section.block_names[-1] == "/empty/":
- count -= 1
- rows = ( count + columns - 1 ) // columns
-
- for r in range( rows ):
- line = "<tr>"
- for c in range( columns ):
- i = r + c * rows
- line = line + '<td>'
- if i < count:
- name = section.block_names[i]
- if name == "/empty/":
- # it can happen that a complete row is empty, and
- # without a proper `filler' the browser might
- # collapse the row to a much smaller height (or
- # even omit it completely)
- line = line + "&nbsp;"
- else:
- url = name
- # display `foo[bar]' as `foo'
- name = re.sub( r'\[.*\]', '', name )
- # normalize url, following RFC 3986
- url = string.replace( url, "[", "(" )
- url = string.replace( url, "]", ")" )
- line = ( line + '<a href="#' + url + '">'
- + name + '</a>' )
-
- line = line + '</td>'
- line = line + "</tr>"
- print( line )
-
- print( "</table>" )
- print( section_synopsis_footer )
-
- print( description_header )
- print( self.make_html_items( section.description ) )
- print( description_footer )
-
- def block_enter( self, block ):
- print( block_header )
-
- # place html anchor if needed
- if block.name:
- url = block.name
- # display `foo[bar]' as `foo'
- name = re.sub( r'\[.*\]', '', block.name )
- # normalize url, following RFC 3986
- url = string.replace( url, "[", "(" )
- url = string.replace( url, "]", ")" )
- print( '<h3 id="' + url + '">' + name + '</h3>' )
-
- # dump the block C source lines now
- if block.code:
- header = ''
- for f in self.headers.keys():
- if block.source.filename.find( f ) >= 0:
- header = self.headers[f] + ' (' + f + ')'
- break
-
-# if not header:
-# sys.stderr.write(
-# "WARNING: No header macro for"
-# + " '" + block.source.filename + "'.\n" )
-
- if header:
- print( header_location_header
- + 'Defined in ' + header + '.'
- + header_location_footer )
-
- print( source_header )
- for l in block.code:
- print( self.html_source_quote( l, block.name ) )
- print( source_footer )
-
- def markup_enter( self, markup, block ):
- if markup.tag == "description":
- print( description_header )
- else:
- print( marker_header + markup.tag + marker_inter )
-
- self.print_html_markup( markup )
-
- def markup_exit( self, markup, block ):
- if markup.tag == "description":
- print( description_footer )
- else:
- print( marker_footer )
-
- def block_exit( self, block ):
- print( block_footer_start + self.file_prefix + "index.html"
- + block_footer_middle + self.file_prefix + "toc.html"
- + block_footer_end )
-
- def section_exit( self, section ):
- print( html_footer )
-
- def section_dump_all( self ):
- for section in self.sections:
- self.section_dump( section,
- self.file_prefix + section.name + '.html' )
-
-# eof
diff --git a/src/3rdparty/freetype/src/tools/docmaker/utils.py b/src/3rdparty/freetype/src/tools/docmaker/utils.py
deleted file mode 100644
index f40f1674a0..0000000000
--- a/src/3rdparty/freetype/src/tools/docmaker/utils.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#
-# utils.py
-#
-# Auxiliary functions for the `docmaker' tool (library file).
-#
-# Copyright 2002-2018 by
-# David Turner.
-#
-# This file is part of the FreeType project, and may only be used,
-# modified, and distributed under the terms of the FreeType project
-# license, LICENSE.TXT. By continuing to use, modify, or distribute
-# this file you indicate that you have read the license and
-# understand and accept it fully.
-
-
-import string, sys, os, glob, itertools
-
-
-# current output directory
-#
-output_dir = None
-
-
-# A function that generates a sorting key. We want lexicographical order
-# (primary key) except that capital letters are sorted before lowercase
-# ones (secondary key).
-#
-# The primary key is implemented by lowercasing the input. The secondary
-# key is simply the original data appended, character by character. For
-# example, the sort key for `FT_x' is `fFtT__xx', while the sort key for
-# `ft_X' is `fftt__xX'. Since ASCII codes of uppercase letters are
-# numerically smaller than the codes of lowercase letters, `fFtT__xx' gets
-# sorted before `fftt__xX'.
-#
-def index_key( s ):
- return string.join( itertools.chain( *zip( s.lower(), s ) ) )
-
-
-# Sort `input_list', placing the elements of `order_list' in front.
-#
-def sort_order_list( input_list, order_list ):
- new_list = order_list[:]
- for id in input_list:
- if not id in order_list:
- new_list.append( id )
- return new_list
-
-
-# Divert standard output to a given project documentation file. Use
-# `output_dir' to determine the filename location if necessary and save the
-# old stdout handle in a tuple that is returned by this function.
-#
-def open_output( filename ):
- global output_dir
-
- if output_dir and output_dir != "":
- filename = output_dir + os.sep + filename
-
- old_stdout = sys.stdout
- new_file = open( filename, "w" )
- sys.stdout = new_file
-
- return ( new_file, old_stdout )
-
-
-# Close the output that was returned by `open_output'.
-#
-def close_output( output ):
- output[0].close()
- sys.stdout = output[1]
-
-
-# Check output directory.
-#
-def check_output():
- global output_dir
- if output_dir:
- if output_dir != "":
- if not os.path.isdir( output_dir ):
- sys.stderr.write( "argument"
- + " '" + output_dir + "' "
- + "is not a valid directory\n" )
- sys.exit( 2 )
- else:
- output_dir = None
-
-
-def file_exists( pathname ):
- """Check that a given file exists."""
- result = 1
- try:
- file = open( pathname, "r" )
- file.close()
- except:
- result = None
- sys.stderr.write( pathname + " couldn't be accessed\n" )
-
- return result
-
-
-def make_file_list( args = None ):
- """Build a list of input files from command-line arguments."""
- file_list = []
- # sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
-
- if not args:
- args = sys.argv[1:]
-
- for pathname in args:
- if string.find( pathname, '*' ) >= 0:
- newpath = glob.glob( pathname )
- newpath.sort() # sort files -- this is important because
- # of the order of files
- else:
- newpath = [pathname]
-
- file_list.extend( newpath )
-
- if len( file_list ) == 0:
- file_list = None
- else:
- # now filter the file list to remove non-existing ones
- file_list = filter( file_exists, file_list )
-
- return file_list
-
-# eof
diff --git a/src/3rdparty/freetype/src/tools/ftfuzzer/README b/src/3rdparty/freetype/src/tools/ftfuzzer/README
deleted file mode 100644
index 09d8e9f325..0000000000
--- a/src/3rdparty/freetype/src/tools/ftfuzzer/README
+++ /dev/null
@@ -1,81 +0,0 @@
-ftfuzzer
-========
-
-
-ftfuzzer.cc
------------
-
-This file contains a target function for FreeType fuzzing. It can be
-used with libFuzzer (https://llvm.org/docs/LibFuzzer.html) or
-potentially any other similar fuzzer.
-
-Usage:
-
- 1. Build `libfreetype.a' and `ftfuzzer.cc' using the most recent
- clang compiler with these flags:
-
- # for fuzzer coverage feedback
- -fsanitize-coverage=edge,8bit-counters
- # for bug checking
- -fsanitize=address,signed-integer-overflow,shift
-
- You also need the header files from the `libarchive' library
- (https://www.libarchive.org/) for handling tar files (see file
- `ftmutator.cc' below for more).
-
- 2. Link with `libFuzzer' (it contains `main') and `libarchive'.
-
- 3. Run the fuzzer on some test corpus.
-
-The exact flags and commands may vary.
-
- https://github.com/google/oss-fuzz/tree/master/projects/freetype2
-
-There is a continuous fuzzing bot that runs ftfuzzer.
-
- https://oss-fuzz.com
-
-(You need an account to be able to see coverage reports and the like
-on oss-fuzz.com.)
-
-Check the bot configuration for the most current settings.
-
-
-ftmutator.cc
-------------
-
-FreeType has the ability to `attach' auxiliary files to a font file,
-providing additional information. The main usage is to load AFM files
-for PostScript Type 1 fonts.
-
-However, libFuzzer currently only supports mutation of a single input
-file. For this reason, `ftmutator.cc' contains a custom fuzzer
-mutator that uses an uncompressed tar file archive as the input. The
-first file in such a tarball gets opened by FreeType as a font, all
-other files are treated as input for `FT_Attach_Stream'.
-
-Compilation is similar to `ftfuzzer.c'.
-
-
-runinput.cc
------------
-
-To run the target function on a set of input files, this file contains
-a convenience `main' function. Link it with `ftfuzzer.cc',
-`libfreetype.a', and `libarchive' and run like
-
- ./a.out my_tests_inputs/*
-
-----------------------------------------------------------------------
-
-Copyright 2015-2018 by
-David Turner, Robert Wilhelm, and Werner Lemberg.
-
-This file is part of the FreeType project, and may only be used,
-modified, and distributed under the terms of the FreeType project
-license, LICENSE.TXT. By continuing to use, modify, or distribute
-this file you indicate that you have read the license and understand
-and accept it fully.
-
-
---- end of README ---
diff --git a/src/3rdparty/freetype/src/tools/ftfuzzer/ftfuzzer.cc b/src/3rdparty/freetype/src/tools/ftfuzzer/ftfuzzer.cc
deleted file mode 100644
index acf2bc9820..0000000000
--- a/src/3rdparty/freetype/src/tools/ftfuzzer/ftfuzzer.cc
+++ /dev/null
@@ -1,428 +0,0 @@
-// ftfuzzer.cc
-//
-// A fuzzing function to test FreeType with libFuzzer.
-//
-// Copyright 2015-2018 by
-// David Turner, Robert Wilhelm, and Werner Lemberg.
-//
-// This file is part of the FreeType project, and may only be used,
-// modified, and distributed under the terms of the FreeType project
-// license, LICENSE.TXT. By continuing to use, modify, or distribute
-// this file you indicate that you have read the license and
-// understand and accept it fully.
-
-
-// we use `unique_ptr', `decltype', and other gimmicks defined since C++11
-#if __cplusplus < 201103L
-# error "a C++11 compiler is needed"
-#endif
-
-#include <archive.h>
-#include <archive_entry.h>
-
-#include <assert.h>
-#include <stdint.h>
-
-#include <memory>
-#include <vector>
-
-
- using namespace std;
-
-
-#include <ft2build.h>
-
-#include FT_FREETYPE_H
-#include FT_GLYPH_H
-#include FT_CACHE_H
-#include FT_CACHE_CHARMAP_H
-#include FT_CACHE_IMAGE_H
-#include FT_CACHE_SMALL_BITMAPS_H
-#include FT_SYNTHESIS_H
-#include FT_ADVANCES_H
-#include FT_OUTLINE_H
-#include FT_BBOX_H
-#include FT_MODULE_H
-#include FT_DRIVER_H
-#include FT_MULTIPLE_MASTERS_H
-
-
- static FT_Library library;
- static int InitResult;
-
-
- struct FT_Global
- {
- FT_Global()
- {
- InitResult = FT_Init_FreeType( &library );
- if ( InitResult )
- return;
-
- // try to activate Adobe's CFF engine; it might not be the default
- unsigned int cff_hinting_engine = FT_HINTING_ADOBE;
- FT_Property_Set( library,
- "cff",
- "hinting-engine", &cff_hinting_engine );
- }
-
- ~FT_Global()
- {
- FT_Done_FreeType( library );
- }
- };
-
- FT_Global global_ft;
-
-
- // We want to select n values at random (without repetition),
- // with 0 < n <= N. The algorithm is taken from TAoCP, Vol. 2
- // (Algorithm S, selection sampling technique)
- struct Random
- {
- int n;
- int N;
-
- int t; // total number of values so far
- int m; // number of selected values so far
-
- uint32_t r; // the current pseudo-random number
-
- Random( int n_,
- int N_ )
- : n( n_ ),
- N( N_ )
- {
- t = 0;
- m = 0;
-
- // Ideally, this should depend on the input file,
- // for example, taking the sha256 as input;
- // however, this is overkill for fuzzying tests.
- r = 12345;
- }
-
- int get()
- {
- if ( m >= n )
- return -1;
-
- Redo:
- // We can't use `rand': different C libraries might provide
- // different implementations of this function. As a replacement,
- // we use a 32bit version of the `xorshift' algorithm.
- r ^= r << 13;
- r ^= r >> 17;
- r ^= r << 5;
-
- double U = double( r ) / UINT32_MAX;
-
- if ( ( N - t ) * U >= ( n - m ) )
- {
- t++;
- goto Redo;
- }
-
- t++;
- m++;
-
- return t;
- }
- };
-
-
- static int
- archive_read_entry_data( struct archive *ar,
- vector<FT_Byte> *vw )
- {
- int r;
- const FT_Byte* buff;
- size_t size;
- int64_t offset;
-
- for (;;)
- {
- r = archive_read_data_block( ar,
- reinterpret_cast<const void**>( &buff ),
- &size,
- &offset );
- if ( r == ARCHIVE_EOF )
- return ARCHIVE_OK;
- if ( r != ARCHIVE_OK )
- return r;
-
- vw->insert( vw->end(), buff, buff + size );
- }
- }
-
-
- static vector<vector<FT_Byte>>
- parse_data( const uint8_t* data,
- size_t size )
- {
- struct archive_entry* entry;
- int r;
- vector<vector<FT_Byte>> files;
-
- unique_ptr<struct archive,
- decltype ( archive_read_free )*> a( archive_read_new(),
- archive_read_free );
-
- // activate reading of uncompressed tar archives
- archive_read_support_format_tar( a.get() );
-
- // the need for `const_cast' was removed with libarchive commit be4d4dd
- if ( !( r = archive_read_open_memory(
- a.get(),
- const_cast<void*>(static_cast<const void*>( data ) ),
- size ) ) )
- {
- unique_ptr<struct archive,
- decltype ( archive_read_close )*> a_open( a.get(),
- archive_read_close );
-
- // read files contained in archive
- for (;;)
- {
- r = archive_read_next_header( a_open.get(), &entry );
- if ( r == ARCHIVE_EOF )
- break;
- if ( r != ARCHIVE_OK )
- break;
-
- vector<FT_Byte> entry_data;
- r = archive_read_entry_data( a.get(), &entry_data );
- if ( r != ARCHIVE_OK )
- break;
-
- files.push_back( move( entry_data ) );
- }
- }
-
- if ( files.size() == 0 )
- files.emplace_back( data, data + size );
-
- return files;
- }
-
-
- static void
- setIntermediateAxis( FT_Face face )
- {
- // only handle Multiple Masters and GX variation fonts
- if ( !FT_HAS_MULTIPLE_MASTERS( face ) )
- return;
-
- // get variation data for current instance
- FT_MM_Var* variations_ptr = nullptr;
- if ( FT_Get_MM_Var( face, &variations_ptr ) )
- return;
-
- unique_ptr<FT_MM_Var,
- decltype ( free )*> variations( variations_ptr, free );
- vector<FT_Fixed> coords( variations->num_axis );
-
- // select an arbitrary instance
- for ( unsigned int i = 0; i < variations->num_axis; i++ )
- coords[i] = ( variations->axis[i].minimum +
- variations->axis[i].def ) / 2;
-
- if ( FT_Set_Var_Design_Coordinates( face,
- FT_UInt( coords.size() ),
- coords.data() ) )
- return;
- }
-
-
- // the interface function to the libFuzzer library
- extern "C" int
- LLVMFuzzerTestOneInput( const uint8_t* data,
- size_t size_ )
- {
- assert( !InitResult );
-
- if ( size_ < 1 )
- return 0;
-
- const vector<vector<FT_Byte>>& files = parse_data( data, size_ );
-
- FT_Face face;
- FT_Int32 load_flags = FT_LOAD_DEFAULT;
-#if 0
- FT_Render_Mode render_mode = FT_RENDER_MODE_NORMAL;
-#endif
-
- // We use a conservative approach here, at the cost of calling
- // `FT_New_Face' quite often. The idea is that the fuzzer should be
- // able to try all faces and named instances of a font, expecting that
- // some faces don't work for various reasons, e.g., a broken subfont, or
- // an unsupported NFNT bitmap font in a Mac dfont resource that holds
- // more than a single font.
-
- // get number of faces
- if ( FT_New_Memory_Face( library,
- files[0].data(),
- (FT_Long)files[0].size(),
- -1,
- &face ) )
- return 0;
- long num_faces = face->num_faces;
- FT_Done_Face( face );
-
- // loop over up to 20 arbitrarily selected faces
- // from index range [0;num-faces-1]
- long max_face_cnt = num_faces < 20
- ? num_faces
- : 20;
-
- Random faces_pool( (int)max_face_cnt, (int)num_faces );
-
- for ( long face_cnt = 0;
- face_cnt < max_face_cnt;
- face_cnt++ )
- {
- long face_index = faces_pool.get() - 1;
-
- // get number of instances
- if ( FT_New_Memory_Face( library,
- files[0].data(),
- (FT_Long)files[0].size(),
- -( face_index + 1 ),
- &face ) )
- continue;
- long num_instances = face->style_flags >> 16;
- FT_Done_Face( face );
-
- // loop over the face without instance (index 0)
- // and up to 20 arbitrarily selected instances
- // from index range [1;num_instances]
- long max_instance_cnt = num_instances < 20
- ? num_instances
- : 20;
-
- Random instances_pool( (int)max_instance_cnt, (int)num_instances );
-
- for ( long instance_cnt = 0;
- instance_cnt <= max_instance_cnt;
- instance_cnt++ )
- {
- long instance_index = 0;
-
- if ( !instance_cnt )
- {
- if ( FT_New_Memory_Face( library,
- files[0].data(),
- (FT_Long)files[0].size(),
- face_index,
- &face ) )
- continue;
- }
- else
- {
- instance_index = instances_pool.get();
-
- if ( FT_New_Memory_Face( library,
- files[0].data(),
- (FT_Long)files[0].size(),
- ( instance_index << 16 ) + face_index,
- &face ) )
- continue;
- }
-
- // if we have more than a single input file coming from an archive,
- // attach them (starting with the second file) using the order given
- // in the archive
- for ( size_t files_index = 1;
- files_index < files.size();
- files_index++ )
- {
- FT_Open_Args open_args = {};
- open_args.flags = FT_OPEN_MEMORY;
- open_args.memory_base = files[files_index].data();
- open_args.memory_size = (FT_Long)files[files_index].size();
-
- // the last archive element will be eventually used as the
- // attachment
- FT_Attach_Stream( face, &open_args );
- }
-
- // loop over an arbitrary size for outlines
- // and up to ten arbitrarily selected bitmap strike sizes
- // from the range [0;num_fixed_sizes - 1]
- int max_size_cnt = face->num_fixed_sizes < 10
- ? face->num_fixed_sizes
- : 10;
-
- Random sizes_pool( max_size_cnt, face->num_fixed_sizes );
-
- for ( int size_cnt = 0;
- size_cnt <= max_size_cnt;
- size_cnt++ )
- {
- FT_Int32 flags = load_flags;
-
- int size_index = 0;
-
- if ( !size_cnt )
- {
- // set up 20pt at 72dpi as an arbitrary size
- if ( FT_Set_Char_Size( face, 20 * 64, 20 * 64, 72, 72 ) )
- continue;
- flags |= FT_LOAD_NO_BITMAP;
- }
- else
- {
- // bitmap strikes are not active for font variations
- if ( instance_index )
- continue;
-
- size_index = sizes_pool.get() - 1;
-
- if ( FT_Select_Size( face, size_index ) )
- continue;
- flags |= FT_LOAD_COLOR;
- }
-
- // test MM interface only for a face without a selected instance
- // and without a selected bitmap strike
- if ( !instance_index && !size_cnt )
- setIntermediateAxis( face );
-
- // loop over all glyphs
- for ( unsigned int glyph_index = 0;
- glyph_index < (unsigned int)face->num_glyphs;
- glyph_index++ )
- {
- if ( FT_Load_Glyph( face, glyph_index, flags ) )
- continue;
-
- // Rendering is the most expensive and the least interesting part.
- //
- // if ( FT_Render_Glyph( face->glyph, render_mode) )
- // continue;
- // FT_GlyphSlot_Embolden( face->glyph );
-
-#if 0
- FT_Glyph glyph;
- if ( !FT_Get_Glyph( face->glyph, &glyph ) )
- FT_Done_Glyph( glyph );
-
- FT_Outline* outline = &face->glyph->outline;
- FT_Matrix rot30 = { 0xDDB4, -0x8000, 0x8000, 0xDDB4 };
-
- FT_Outline_Transform( outline, &rot30 );
-
- FT_BBox bbox;
- FT_Outline_Get_BBox( outline, &bbox );
-#endif
- }
- }
- FT_Done_Face( face );
- }
- }
-
- return 0;
- }
-
-
-// END
diff --git a/src/3rdparty/freetype/src/tools/ftfuzzer/ftmutator.cc b/src/3rdparty/freetype/src/tools/ftfuzzer/ftmutator.cc
deleted file mode 100644
index ae4b140404..0000000000
--- a/src/3rdparty/freetype/src/tools/ftfuzzer/ftmutator.cc
+++ /dev/null
@@ -1,314 +0,0 @@
-// ftmutator.cc
-//
-// A custom fuzzer mutator to test for FreeType with libFuzzer.
-//
-// Copyright 2015-2018 by
-// David Turner, Robert Wilhelm, and Werner Lemberg.
-//
-// This file is part of the FreeType project, and may only be used,
-// modified, and distributed under the terms of the FreeType project
-// license, LICENSE.TXT. By continuing to use, modify, or distribute
-// this file you indicate that you have read the license and
-// understand and accept it fully.
-
-
-// Since `tar' is not a valid format for input to FreeType, treat any input
-// that looks like `tar' as multiple files and mutate them separately.
-//
-// In the future, a variation of this may be used to guide mutation on a
-// logically higher level.
-
-
-// we use `unique_ptr', `decltype', and other gimmicks defined since C++11
-#if __cplusplus < 201103L
-# error "a C++11 compiler is needed"
-#endif
-
-#include <cstdint>
-#include <cassert>
-#include <cstdio>
-#include <cstdlib>
-#include <cstddef>
-#include <cstring>
-#include <iostream>
-
-#include <memory>
-#include <vector>
-
-#include <archive.h>
-#include <archive_entry.h>
-
-#include "FuzzerInterface.h"
-
-
- using namespace std;
-
-
- // This function should be defined by `ftfuzzer.cc'.
- extern "C" int
- LLVMFuzzerTestOneInput( const uint8_t* Data,
- size_t Size );
-
-
- static void
- check_result( struct archive* a,
- int r )
- {
- if ( r == ARCHIVE_OK )
- return;
-
- const char* m = archive_error_string( a );
- write( 1, m, strlen( m ) );
- exit( 1 );
- }
-
-
- static int
- archive_read_entry_data( struct archive *ar,
- vector<uint8_t> *vw )
- {
- int r;
- const uint8_t* buff;
- size_t size;
- int64_t offset;
-
- for (;;)
- {
- r = archive_read_data_block( ar,
- reinterpret_cast<const void**>( &buff ),
- &size,
- &offset );
- if ( r == ARCHIVE_EOF )
- return ARCHIVE_OK;
- if ( r != ARCHIVE_OK )
- return r;
-
- vw->insert( vw->end(), buff, buff + size );
- }
- }
-
-
- static vector<vector<uint8_t>>
- parse_data( const uint8_t* data,
- size_t size )
- {
- struct archive_entry* entry;
- int r;
- vector<vector<uint8_t>> files;
-
- unique_ptr<struct archive,
- decltype ( archive_read_free )*> a( archive_read_new(),
- archive_read_free );
-
- // activate reading of uncompressed tar archives
- archive_read_support_format_tar( a.get() );
-
- // the need for `const_cast' was removed with libarchive commit be4d4dd
- if ( !( r = archive_read_open_memory(
- a.get(),
- const_cast<void*>(static_cast<const void*>( data ) ),
- size ) ) )
- {
- unique_ptr<struct archive,
- decltype ( archive_read_close )*> a_open( a.get(),
- archive_read_close );
-
- // read files contained in archive
- for (;;)
- {
- r = archive_read_next_header( a_open.get(), &entry );
- if ( r == ARCHIVE_EOF )
- break;
- if ( r != ARCHIVE_OK )
- break;
-
- vector<uint8_t> entry_data;
- r = archive_read_entry_data( a.get(), &entry_data );
- if ( entry_data.size() == 0 )
- continue;
-
- files.push_back( move( entry_data ) );
- if ( r != ARCHIVE_OK )
- break;
- }
- }
-
- return files;
- }
-
-
- class FTFuzzer
- : public fuzzer::UserSuppliedFuzzer
- {
-
- public:
- FTFuzzer( fuzzer::FuzzerRandomBase* Rand )
- : fuzzer::UserSuppliedFuzzer( Rand ) {}
-
-
- int
- TargetFunction( const uint8_t* Data,
- size_t Size )
- {
- return LLVMFuzzerTestOneInput( Data, Size );
- }
-
-
- // Custom mutator.
- virtual size_t
- Mutate( uint8_t* Data,
- size_t Size,
- size_t MaxSize )
- {
- vector<vector<uint8_t>> files = parse_data( Data, Size );
-
- // If the file was not recognized as a tar file, treat it as non-tar.
- if ( files.size() == 0 )
- return fuzzer::UserSuppliedFuzzer::Mutate( Data, Size, MaxSize );
-
- // This is somewhat `white box' on tar. The tar format uses 512 byte
- // blocks. One block as header for each file, two empty blocks of 0's
- // at the end. File data is padded to fill its last block.
- size_t used_blocks = files.size() + 2;
- for ( const auto& file : files )
- used_blocks += ( file.size() + 511 ) / 512;
-
- size_t max_blocks = MaxSize / 512;
-
- // If the input is big, it will need to be downsized. If the original
- // tar file was too big, it may have been clipped to fit. In this
- // case it may not be possible to properly write out the data, as
- // there may not be enough space for the trailing two blocks. Start
- // dropping file data or files from the end.
- for ( size_t i = files.size();
- i-- > 1 && used_blocks > max_blocks; )
- {
- size_t blocks_to_free = used_blocks - max_blocks;
- size_t blocks_currently_used_by_file_data =
- ( files[i].size() + 511 ) / 512;
-
- if ( blocks_currently_used_by_file_data >= blocks_to_free )
- {
- files[i].resize( ( blocks_currently_used_by_file_data -
- blocks_to_free ) * 512 );
- used_blocks -= blocks_to_free;
- continue;
- }
-
- files.pop_back();
- used_blocks -= blocks_currently_used_by_file_data + 1;
- }
-
- // If we get down to one file, don't use tar.
- if ( files.size() == 1 )
- {
- memcpy( Data, files[0].data(), files[0].size() );
- return fuzzer::UserSuppliedFuzzer::Mutate( Data,
- files[0].size(),
- MaxSize );
- }
-
- size_t free_blocks = max_blocks - used_blocks;
-
- // Allow each file to use up as much of the currently available space
- // it can. If it uses or gives up blocks, add them or remove them
- // from the pool.
- for ( auto&& file : files )
- {
- size_t blocks_currently_used_by_file = ( file.size() + 511 ) / 512;
- size_t blocks_available = blocks_currently_used_by_file +
- free_blocks;
- size_t max_size = blocks_available * 512;
- size_t data_size = file.size();
-
- file.resize( max_size );
- file.resize( fuzzer::UserSuppliedFuzzer::Mutate( file.data(),
- data_size,
- max_size ) );
-
- size_t blocks_now_used_by_file = ( file.size() + 511 ) / 512;
- free_blocks = free_blocks +
- blocks_currently_used_by_file -
- blocks_now_used_by_file;
- }
-
- unique_ptr<struct archive,
- decltype ( archive_write_free )*> a( archive_write_new(),
- archive_write_free );
-
- check_result( a.get(), archive_write_add_filter_none( a.get() ) );
- check_result( a.get(), archive_write_set_format_ustar( a.get() ) );
-
- // `used' may not be correct until after the archive is closed.
- size_t used = 0xbadbeef;
- check_result( a.get(), archive_write_open_memory( a.get(),
- Data,
- MaxSize,
- &used ) );
-
- {
- unique_ptr<struct archive,
- decltype ( archive_write_close )*> a_open( a.get(),
- archive_write_close );
-
- int file_index = 0;
- for ( const auto& file : files )
- {
- unique_ptr<struct archive_entry,
- decltype ( archive_entry_free )*>
- e( archive_entry_new2( a_open.get() ),
- archive_entry_free );
-
- char name_buffer[100];
- snprintf( name_buffer, 100, "file%d", file_index++ );
-
- archive_entry_set_pathname( e.get(), name_buffer );
- archive_entry_set_size( e.get(), file.size() );
- archive_entry_set_filetype( e.get(), AE_IFREG );
- archive_entry_set_perm( e.get(), 0644 );
-
- check_result( a_open.get(),
- archive_write_header( a_open.get(), e.get() ) );
- archive_write_data( a_open.get(), file.data(), file.size() );
- check_result( a_open.get(),
- archive_write_finish_entry( a_open.get() ) );
- }
- }
-
- return used;
- }
-
-
- // Cross `Data1' and `Data2', write up to `MaxOutSize' bytes into `Out',
- // return the number of bytes written, which should be positive.
- virtual size_t
- CrossOver( const uint8_t* Data1,
- size_t Size1,
- const uint8_t* Data2,
- size_t Size2,
- uint8_t* Out,
- size_t MaxOutSize )
- {
- return fuzzer::UserSuppliedFuzzer::CrossOver( Data1,
- Size1,
- Data2,
- Size2,
- Out,
- MaxOutSize );
- }
-
- }; // end of FTFuzzer class
-
-
- int
- main( int argc,
- char* *argv )
- {
- fuzzer::FuzzerRandomLibc Rand( 0 );
- FTFuzzer F( &Rand );
-
- fuzzer::FuzzerDriver( argc, argv, F );
- }
-
-
-// END
diff --git a/src/3rdparty/freetype/src/tools/ftfuzzer/rasterfuzzer.cc b/src/3rdparty/freetype/src/tools/ftfuzzer/rasterfuzzer.cc
deleted file mode 100644
index c69b95ea0f..0000000000
--- a/src/3rdparty/freetype/src/tools/ftfuzzer/rasterfuzzer.cc
+++ /dev/null
@@ -1,129 +0,0 @@
-// rasterfuzzer.cc
-//
-// A fuzzing function to test FreeType's rasterizers with libFuzzer.
-//
-// Copyright 2016-2018 by
-// David Turner, Robert Wilhelm, and Werner Lemberg.
-//
-// This file is part of the FreeType project, and may only be used,
-// modified, and distributed under the terms of the FreeType project
-// license, LICENSE.TXT. By continuing to use, modify, or distribute
-// this file you indicate that you have read the license and
-// understand and accept it fully.
-
-
-#include <stdint.h>
-
-#include <vector>
-
-
- using namespace std;
-
-
-#include <ft2build.h>
-
-#include FT_FREETYPE_H
-#include FT_IMAGE_H
-#include FT_OUTLINE_H
-
-
- static FT_Library library;
- static int InitResult;
-
-
- struct FT_Global {
- FT_Global() {
- InitResult = FT_Init_FreeType( &library );
- }
- ~FT_Global() {
- FT_Done_FreeType( library );
- }
- };
-
- FT_Global global_ft;
-
-
- extern "C" int
- LLVMFuzzerTestOneInput( const uint8_t* data,
- size_t size_ )
- {
- unsigned char pixels[4];
-
- FT_Bitmap bitmap_mono = {
- 1, // rows
- 1, // width
- 4, // pitch
- pixels, // buffer
- 2, // num_grays
- FT_PIXEL_MODE_MONO, // pixel_mode
- 0, // palette_mode
- NULL // palette
- };
-
- FT_Bitmap bitmap_gray = {
- 1, // rows
- 1, // width
- 4, // pitch
- pixels, // buffer
- 256, // num_grays
- FT_PIXEL_MODE_GRAY, // pixel_mode
- 0, // palette_mode
- NULL // palette
- };
-
- const size_t vsize = sizeof ( FT_Vector );
- const size_t tsize = sizeof ( char );
-
- // we use the input data for both points and tags
- short n_points = short( size_ / ( vsize + tsize ) );
- if ( n_points <= 2 )
- return 0;
-
- FT_Vector* points = reinterpret_cast<FT_Vector*>(
- const_cast<uint8_t*>(
- data ) );
- char* tags = reinterpret_cast<char*>(
- const_cast<uint8_t*>(
- data + size_t( n_points ) * vsize ) );
-
- // to reduce the number of invalid outlines that are immediately
- // rejected in `FT_Outline_Render', limit values to 2^18 pixels
- // (i.e., 2^24 bits)
- for ( short i = 0; i < n_points; i++ )
- {
- if ( points[i].x == LONG_MIN )
- points[i].x = 0;
- else if ( points[i].x < 0 )
- points[i].x = -( -points[i].x & 0xFFFFFF ) - 1;
- else
- points[i].x = ( points[i].x & 0xFFFFFF ) + 1;
-
- if ( points[i].y == LONG_MIN )
- points[i].y = 0;
- else if ( points[i].y < 0 )
- points[i].y = -( -points[i].y & 0xFFFFFF ) - 1;
- else
- points[i].y = ( points[i].y & 0xFFFFFF ) + 1;
- }
-
- short contours[1];
- contours[0] = n_points - 1;
-
- FT_Outline outline =
- {
- 1, // n_contours
- n_points, // n_points
- points, // points
- tags, // tags
- contours, // contours
- FT_OUTLINE_NONE // flags
- };
-
- FT_Outline_Get_Bitmap( library, &outline, &bitmap_mono );
- FT_Outline_Get_Bitmap( library, &outline, &bitmap_gray );
-
- return 0;
- }
-
-
-// END
diff --git a/src/3rdparty/freetype/src/tools/ftfuzzer/runinput.cc b/src/3rdparty/freetype/src/tools/ftfuzzer/runinput.cc
deleted file mode 100644
index 2b02f57580..0000000000
--- a/src/3rdparty/freetype/src/tools/ftfuzzer/runinput.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// runinput.cc
-//
-// A `main' function for fuzzers like `ftfuzzer.cc'.
-//
-// Copyright 2015-2018 by
-// David Turner, Robert Wilhelm, and Werner Lemberg.
-//
-// This file is part of the FreeType project, and may only be used,
-// modified, and distributed under the terms of the FreeType project
-// license, LICENSE.TXT. By continuing to use, modify, or distribute
-// this file you indicate that you have read the license and
-// understand and accept it fully.
-
-
-#include <assert.h>
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-#include <stdint.h>
-
-
- extern "C" void
- LLVMFuzzerTestOneInput( const uint8_t* data,
- size_t size );
-
-
- unsigned char a[1 << 24];
-
-
- int
- main( int argc,
- char* *argv )
- {
- assert( argc >= 2 );
-
- for ( int i = 1; i < argc; i++ )
- {
- fprintf( stderr, "%s\n", argv[i] );
-
- FILE* f = fopen( argv[i], "r" );
- assert( f );
-
- size_t n = fread( a, 1, sizeof ( a ), f );
- fclose( f );
- if ( !n )
- continue;
-
- unsigned char* b = (unsigned char*)malloc( n );
- memcpy( b, a, n );
-
- LLVMFuzzerTestOneInput( b, n );
-
- free( b );
- }
- }
-
-
-// END
diff --git a/src/3rdparty/freetype/src/tools/ftrandom/Makefile b/src/3rdparty/freetype/src/tools/ftrandom/Makefile
deleted file mode 100644
index 24dc49c563..0000000000
--- a/src/3rdparty/freetype/src/tools/ftrandom/Makefile
+++ /dev/null
@@ -1,45 +0,0 @@
-# TOP_DIR and OBJ_DIR should be set by the user to the right directories,
-# if necessary.
-
-TOP_DIR ?= ../../..
-OBJ_DIR ?= $(TOP_DIR)/objs
-
-
-# The setup below is for gcc on a Unix-like platform,
-# where FreeType has been set up to create a static library
-# (which is the default).
-
-VPATH = $(OBJ_DIR) \
- $(OBJ_DIR)/.libs
-
-SRC_DIR = $(TOP_DIR)/src/tools/ftrandom
-
-CC = gcc
-WFLAGS = -Wmissing-prototypes \
- -Wunused \
- -Wimplicit \
- -Wreturn-type \
- -Wparentheses \
- -pedantic \
- -Wformat \
- -Wchar-subscripts \
- -Wsequence-point
-CFLAGS = $(WFLAGS) \
- -g
-INCLUDES = -I $(TOP_DIR)/include
-LDFLAGS =
-LIBS = -lm \
- -lz \
- -lpng \
- -lbz2 \
- -lharfbuzz
-
-all: $(OBJ_DIR)/ftrandom
-
-$(OBJ_DIR)/ftrandom.o: $(SRC_DIR)/ftrandom.c
- $(CC) $(CFLAGS) $(INCLUDES) -c -o $@ $<
-
-$(OBJ_DIR)/ftrandom: $(OBJ_DIR)/ftrandom.o libfreetype.a
- $(CC) $(LDFLAGS) -o $@ $^ $(LIBS)
-
-# EOF
diff --git a/src/3rdparty/freetype/src/tools/ftrandom/ftrandom.c b/src/3rdparty/freetype/src/tools/ftrandom/ftrandom.c
index ab5cfc98b6..0ee765e528 100644
--- a/src/3rdparty/freetype/src/tools/ftrandom/ftrandom.c
+++ b/src/3rdparty/freetype/src/tools/ftrandom/ftrandom.c
@@ -29,7 +29,7 @@
/* This file is now part of the FreeType library */
-#define _XOPEN_SOURCE 500 /* for `kill', `strdup', `random', and `srandom' */
+#define _XOPEN_SOURCE 600 /* for `kill', `strdup', `random', and `srandom' */
#include <stdio.h>
@@ -45,8 +45,8 @@
#include <time.h>
#include <ft2build.h>
-#include FT_FREETYPE_H
-#include FT_OUTLINE_H
+#include <freetype/freetype.h>
+#include <freetype/ftoutln.h>
#define true 1
#define false 0
@@ -520,7 +520,7 @@
char buffer[1024];
- sprintf( buffer, "%s/test%d", results_dir, test_num++ );
+ snprintf( buffer, 1024, "%s/test%d", results_dir, test_num++ );
if ( copyfont ( &fontlist[i], buffer ) )
{
diff --git a/src/3rdparty/freetype/src/tools/glnames.py b/src/3rdparty/freetype/src/tools/glnames.py
index e1b613d11b..41509dbc2d 100644
--- a/src/3rdparty/freetype/src/tools/glnames.py
+++ b/src/3rdparty/freetype/src/tools/glnames.py
@@ -1,12 +1,9 @@
-#!/usr/bin/env python
-#
+#!/usr/bin/env python3
#
# FreeType 2 glyph name builder
#
-
-
-# Copyright (C) 1996-2019 by
+# Copyright (C) 1996-2023 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
@@ -16,8 +13,7 @@
# fully.
-"""\
-
+"""
usage: %s <output-file>
This python script generates the glyph names tables defined in the
@@ -26,9 +22,9 @@ usage: %s <output-file>
Its single argument is the name of the header file to be created.
"""
-
-import sys, string, struct, re, os.path
-
+import os.path
+import struct
+import sys
# This table lists the glyphs according to the Macintosh specification.
# It is used by the TrueType Postscript names table.
@@ -39,379 +35,371 @@ import sys, string, struct, re, os.path
#
# for the official list.
#
-mac_standard_names = \
-[
- # 0
- ".notdef", ".null", "nonmarkingreturn", "space", "exclam",
- "quotedbl", "numbersign", "dollar", "percent", "ampersand",
-
- # 10
- "quotesingle", "parenleft", "parenright", "asterisk", "plus",
- "comma", "hyphen", "period", "slash", "zero",
-
- # 20
- "one", "two", "three", "four", "five",
- "six", "seven", "eight", "nine", "colon",
-
- # 30
- "semicolon", "less", "equal", "greater", "question",
- "at", "A", "B", "C", "D",
-
- # 40
- "E", "F", "G", "H", "I",
- "J", "K", "L", "M", "N",
-
- # 50
- "O", "P", "Q", "R", "S",
- "T", "U", "V", "W", "X",
-
- # 60
- "Y", "Z", "bracketleft", "backslash", "bracketright",
- "asciicircum", "underscore", "grave", "a", "b",
-
- # 70
- "c", "d", "e", "f", "g",
- "h", "i", "j", "k", "l",
-
- # 80
- "m", "n", "o", "p", "q",
- "r", "s", "t", "u", "v",
-
- # 90
- "w", "x", "y", "z", "braceleft",
- "bar", "braceright", "asciitilde", "Adieresis", "Aring",
-
- # 100
- "Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis",
- "aacute", "agrave", "acircumflex", "adieresis", "atilde",
-
- # 110
- "aring", "ccedilla", "eacute", "egrave", "ecircumflex",
- "edieresis", "iacute", "igrave", "icircumflex", "idieresis",
-
- # 120
- "ntilde", "oacute", "ograve", "ocircumflex", "odieresis",
- "otilde", "uacute", "ugrave", "ucircumflex", "udieresis",
-
- # 130
- "dagger", "degree", "cent", "sterling", "section",
- "bullet", "paragraph", "germandbls", "registered", "copyright",
-
- # 140
- "trademark", "acute", "dieresis", "notequal", "AE",
- "Oslash", "infinity", "plusminus", "lessequal", "greaterequal",
-
- # 150
- "yen", "mu", "partialdiff", "summation", "product",
- "pi", "integral", "ordfeminine", "ordmasculine", "Omega",
-
- # 160
- "ae", "oslash", "questiondown", "exclamdown", "logicalnot",
- "radical", "florin", "approxequal", "Delta", "guillemotleft",
-
- # 170
- "guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde",
- "Otilde", "OE", "oe", "endash", "emdash",
-
- # 180
- "quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide",
- "lozenge", "ydieresis", "Ydieresis", "fraction", "currency",
-
- # 190
- "guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl",
- "periodcentered", "quotesinglbase", "quotedblbase", "perthousand",
+mac_standard_names = [
+ # 0
+ ".notdef", ".null", "nonmarkingreturn", "space", "exclam",
+ "quotedbl", "numbersign", "dollar", "percent", "ampersand",
+
+ # 10
+ "quotesingle", "parenleft", "parenright", "asterisk", "plus",
+ "comma", "hyphen", "period", "slash", "zero",
+
+ # 20
+ "one", "two", "three", "four", "five",
+ "six", "seven", "eight", "nine", "colon",
+
+ # 30
+ "semicolon", "less", "equal", "greater", "question",
+ "at", "A", "B", "C", "D",
+
+ # 40
+ "E", "F", "G", "H", "I",
+ "J", "K", "L", "M", "N",
+
+ # 50
+ "O", "P", "Q", "R", "S",
+ "T", "U", "V", "W", "X",
+
+ # 60
+ "Y", "Z", "bracketleft", "backslash", "bracketright",
+ "asciicircum", "underscore", "grave", "a", "b",
+
+ # 70
+ "c", "d", "e", "f", "g",
+ "h", "i", "j", "k", "l",
+
+ # 80
+ "m", "n", "o", "p", "q",
+ "r", "s", "t", "u", "v",
+
+ # 90
+ "w", "x", "y", "z", "braceleft",
+ "bar", "braceright", "asciitilde", "Adieresis", "Aring",
+
+ # 100
+ "Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis",
+ "aacute", "agrave", "acircumflex", "adieresis", "atilde",
+
+ # 110
+ "aring", "ccedilla", "eacute", "egrave", "ecircumflex",
+ "edieresis", "iacute", "igrave", "icircumflex", "idieresis",
+
+ # 120
+ "ntilde", "oacute", "ograve", "ocircumflex", "odieresis",
+ "otilde", "uacute", "ugrave", "ucircumflex", "udieresis",
+
+ # 130
+ "dagger", "degree", "cent", "sterling", "section",
+ "bullet", "paragraph", "germandbls", "registered", "copyright",
+
+ # 140
+ "trademark", "acute", "dieresis", "notequal", "AE",
+ "Oslash", "infinity", "plusminus", "lessequal", "greaterequal",
+
+ # 150
+ "yen", "mu", "partialdiff", "summation", "product",
+ "pi", "integral", "ordfeminine", "ordmasculine", "Omega",
+
+ # 160
+ "ae", "oslash", "questiondown", "exclamdown", "logicalnot",
+ "radical", "florin", "approxequal", "Delta", "guillemotleft",
+
+ # 170
+ "guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde",
+ "Otilde", "OE", "oe", "endash", "emdash",
+
+ # 180
+ "quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide",
+ "lozenge", "ydieresis", "Ydieresis", "fraction", "currency",
+
+ # 190
+ "guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl",
+ "periodcentered", "quotesinglbase", "quotedblbase", "perthousand",
"Acircumflex",
- # 200
- "Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute",
- "Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex",
+ # 200
+ "Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute",
+ "Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex",
- # 210
- "apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave",
- "dotlessi", "circumflex", "tilde", "macron", "breve",
+ # 210
+ "apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave",
+ "dotlessi", "circumflex", "tilde", "macron", "breve",
- # 220
- "dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek",
- "caron", "Lslash", "lslash", "Scaron", "scaron",
+ # 220
+ "dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek",
+ "caron", "Lslash", "lslash", "Scaron", "scaron",
- # 230
- "Zcaron", "zcaron", "brokenbar", "Eth", "eth",
- "Yacute", "yacute", "Thorn", "thorn", "minus",
+ # 230
+ "Zcaron", "zcaron", "brokenbar", "Eth", "eth",
+ "Yacute", "yacute", "Thorn", "thorn", "minus",
- # 240
- "multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf",
- "onequarter", "threequarters", "franc", "Gbreve", "gbreve",
+ # 240
+ "multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf",
+ "onequarter", "threequarters", "franc", "Gbreve", "gbreve",
- # 250
- "Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute",
- "Ccaron", "ccaron", "dcroat"
+ # 250
+ "Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute",
+ "Ccaron", "ccaron", "dcroat"
]
-
# The list of standard `SID' glyph names. For the official list,
# see Annex A of document at
#
-# https://www.adobe.com/content/dam/acom/en/devnet/font/pdfs/5176.CFF.pdf .
+# https://www.adobe.com/content/dam/acom/en/devnet/font/pdfs/5176.CFF.pdf
#
-sid_standard_names = \
-[
- # 0
- ".notdef", "space", "exclam", "quotedbl", "numbersign",
- "dollar", "percent", "ampersand", "quoteright", "parenleft",
-
- # 10
- "parenright", "asterisk", "plus", "comma", "hyphen",
- "period", "slash", "zero", "one", "two",
-
- # 20
- "three", "four", "five", "six", "seven",
- "eight", "nine", "colon", "semicolon", "less",
-
- # 30
- "equal", "greater", "question", "at", "A",
- "B", "C", "D", "E", "F",
-
- # 40
- "G", "H", "I", "J", "K",
- "L", "M", "N", "O", "P",
-
- # 50
- "Q", "R", "S", "T", "U",
- "V", "W", "X", "Y", "Z",
-
- # 60
- "bracketleft", "backslash", "bracketright", "asciicircum", "underscore",
- "quoteleft", "a", "b", "c", "d",
-
- # 70
- "e", "f", "g", "h", "i",
- "j", "k", "l", "m", "n",
-
- # 80
- "o", "p", "q", "r", "s",
- "t", "u", "v", "w", "x",
-
- # 90
- "y", "z", "braceleft", "bar", "braceright",
- "asciitilde", "exclamdown", "cent", "sterling", "fraction",
-
- # 100
- "yen", "florin", "section", "currency", "quotesingle",
- "quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi",
-
- # 110
- "fl", "endash", "dagger", "daggerdbl", "periodcentered",
- "paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright",
-
- # 120
- "guillemotright", "ellipsis", "perthousand", "questiondown", "grave",
- "acute", "circumflex", "tilde", "macron", "breve",
-
- # 130
- "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
- "ogonek", "caron", "emdash", "AE", "ordfeminine",
-
- # 140
- "Lslash", "Oslash", "OE", "ordmasculine", "ae",
- "dotlessi", "lslash", "oslash", "oe", "germandbls",
-
- # 150
- "onesuperior", "logicalnot", "mu", "trademark", "Eth",
- "onehalf", "plusminus", "Thorn", "onequarter", "divide",
-
- # 160
- "brokenbar", "degree", "thorn", "threequarters", "twosuperior",
- "registered", "minus", "eth", "multiply", "threesuperior",
-
- # 170
- "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
- "Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
-
- # 180
- "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis",
- "Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis",
-
- # 190
- "Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex",
- "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron",
-
- # 200
- "aacute", "acircumflex", "adieresis", "agrave", "aring",
- "atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
-
- # 210
- "egrave", "iacute", "icircumflex", "idieresis", "igrave",
- "ntilde", "oacute", "ocircumflex", "odieresis", "ograve",
-
- # 220
- "otilde", "scaron", "uacute", "ucircumflex", "udieresis",
- "ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall",
-
- # 230
- "Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall",
+sid_standard_names = [
+ # 0
+ ".notdef", "space", "exclam", "quotedbl", "numbersign",
+ "dollar", "percent", "ampersand", "quoteright", "parenleft",
+
+ # 10
+ "parenright", "asterisk", "plus", "comma", "hyphen",
+ "period", "slash", "zero", "one", "two",
+
+ # 20
+ "three", "four", "five", "six", "seven",
+ "eight", "nine", "colon", "semicolon", "less",
+
+ # 30
+ "equal", "greater", "question", "at", "A",
+ "B", "C", "D", "E", "F",
+
+ # 40
+ "G", "H", "I", "J", "K",
+ "L", "M", "N", "O", "P",
+
+ # 50
+ "Q", "R", "S", "T", "U",
+ "V", "W", "X", "Y", "Z",
+
+ # 60
+ "bracketleft", "backslash", "bracketright", "asciicircum", "underscore",
+ "quoteleft", "a", "b", "c", "d",
+
+ # 70
+ "e", "f", "g", "h", "i",
+ "j", "k", "l", "m", "n",
+
+ # 80
+ "o", "p", "q", "r", "s",
+ "t", "u", "v", "w", "x",
+
+ # 90
+ "y", "z", "braceleft", "bar", "braceright",
+ "asciitilde", "exclamdown", "cent", "sterling", "fraction",
+
+ # 100
+ "yen", "florin", "section", "currency", "quotesingle",
+ "quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi",
+
+ # 110
+ "fl", "endash", "dagger", "daggerdbl", "periodcentered",
+ "paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright",
+
+ # 120
+ "guillemotright", "ellipsis", "perthousand", "questiondown", "grave",
+ "acute", "circumflex", "tilde", "macron", "breve",
+
+ # 130
+ "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
+ "ogonek", "caron", "emdash", "AE", "ordfeminine",
+
+ # 140
+ "Lslash", "Oslash", "OE", "ordmasculine", "ae",
+ "dotlessi", "lslash", "oslash", "oe", "germandbls",
+
+ # 150
+ "onesuperior", "logicalnot", "mu", "trademark", "Eth",
+ "onehalf", "plusminus", "Thorn", "onequarter", "divide",
+
+ # 160
+ "brokenbar", "degree", "thorn", "threequarters", "twosuperior",
+ "registered", "minus", "eth", "multiply", "threesuperior",
+
+ # 170
+ "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
+ "Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
+
+ # 180
+ "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis",
+ "Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis",
+
+ # 190
+ "Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex",
+ "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron",
+
+ # 200
+ "aacute", "acircumflex", "adieresis", "agrave", "aring",
+ "atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
+
+ # 210
+ "egrave", "iacute", "icircumflex", "idieresis", "igrave",
+ "ntilde", "oacute", "ocircumflex", "odieresis", "ograve",
+
+ # 220
+ "otilde", "scaron", "uacute", "ucircumflex", "udieresis",
+ "ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall",
+
+ # 230
+ "Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall",
"Acutesmall",
- "parenleftsuperior", "parenrightsuperior", "twodotenleader",
+ "parenleftsuperior", "parenrightsuperior", "twodotenleader",
"onedotenleader", "zerooldstyle",
- # 240
- "oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle",
+ # 240
+ "oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle",
"fiveoldstyle",
- "sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
+ "sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
"commasuperior",
- # 250
- "threequartersemdash", "periodsuperior", "questionsmall", "asuperior",
+ # 250
+ "threequartersemdash", "periodsuperior", "questionsmall", "asuperior",
"bsuperior",
- "centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
+ "centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
- # 260
- "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
- "tsuperior", "ff", "ffi", "ffl", "parenleftinferior",
+ # 260
+ "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
+ "tsuperior", "ff", "ffi", "ffl", "parenleftinferior",
- # 270
- "parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
+ # 270
+ "parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
"Asmall",
- "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall",
+ "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall",
- # 280
- "Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall",
- "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
+ # 280
+ "Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall",
+ "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
- # 290
- "Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall",
- "Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
+ # 290
+ "Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall",
+ "Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
- # 300
- "colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall",
- "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
+ # 300
+ "colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall",
+ "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
"Dieresissmall",
- # 310
- "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash",
- "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
+ # 310
+ "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash",
+ "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
"questiondownsmall",
- # 320
- "oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird",
- "twothirds", "zerosuperior", "foursuperior", "fivesuperior",
+ # 320
+ "oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird",
+ "twothirds", "zerosuperior", "foursuperior", "fivesuperior",
"sixsuperior",
- # 330
- "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior",
+ # 330
+ "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior",
"oneinferior",
- "twoinferior", "threeinferior", "fourinferior", "fiveinferior",
+ "twoinferior", "threeinferior", "fourinferior", "fiveinferior",
"sixinferior",
- # 340
- "seveninferior", "eightinferior", "nineinferior", "centinferior",
+ # 340
+ "seveninferior", "eightinferior", "nineinferior", "centinferior",
"dollarinferior",
- "periodinferior", "commainferior", "Agravesmall", "Aacutesmall",
+ "periodinferior", "commainferior", "Agravesmall", "Aacutesmall",
"Acircumflexsmall",
- # 350
- "Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall",
- "Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall",
+ # 350
+ "Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall",
+ "Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall",
"Igravesmall",
- # 360
- "Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall",
+ # 360
+ "Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall",
"Ntildesmall",
- "Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
+ "Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
"Odieresissmall",
- # 370
- "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
+ # 370
+ "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
"Ucircumflexsmall",
- "Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall",
+ "Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall",
"001.000",
- # 380
- "001.001", "001.002", "001.003", "Black", "Bold",
- "Book", "Light", "Medium", "Regular", "Roman",
+ # 380
+ "001.001", "001.002", "001.003", "Black", "Bold",
+ "Book", "Light", "Medium", "Regular", "Roman",
- # 390
- "Semibold"
+ # 390
+ "Semibold"
]
-
# This table maps character codes of the Adobe Standard Type 1
# encoding to glyph indices in the sid_standard_names table.
#
-t1_standard_encoding = \
-[
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1, 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
-
- 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
- 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
- 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
- 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
-
- 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
- 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
- 89, 90, 91, 92, 93, 94, 95, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 96, 97, 98, 99, 100, 101, 102, 103, 104,
- 105, 106, 107, 108, 109, 110, 0, 111, 112, 113,
- 114, 0, 115, 116, 117, 118, 119, 120, 121, 122,
- 0, 123, 0, 124, 125, 126, 127, 128, 129, 130,
-
- 131, 0, 132, 133, 0, 134, 135, 136, 137, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 138, 0, 139, 0, 0,
- 0, 0, 140, 141, 142, 143, 0, 0, 0, 0,
- 0, 144, 0, 0, 0, 145, 0, 0, 146, 147,
-
- 148, 149, 0, 0, 0, 0
+t1_standard_encoding = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+
+ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 0, 111, 112, 113,
+ 114, 0, 115, 116, 117, 118, 119, 120, 121, 122,
+ 0, 123, 0, 124, 125, 126, 127, 128, 129, 130,
+
+ 131, 0, 132, 133, 0, 134, 135, 136, 137, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 138, 0, 139, 0, 0,
+ 0, 0, 140, 141, 142, 143, 0, 0, 0, 0,
+ 0, 144, 0, 0, 0, 145, 0, 0, 146, 147,
+
+ 148, 149, 0, 0, 0, 0
]
-
# This table maps character codes of the Adobe Expert Type 1
# encoding to glyph indices in the sid_standard_names table.
#
-t1_expert_encoding = \
-[
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1, 229, 230, 0, 231, 232, 233, 234,
- 235, 236, 237, 238, 13, 14, 15, 99, 239, 240,
-
- 241, 242, 243, 244, 245, 246, 247, 248, 27, 28,
- 249, 250, 251, 252, 0, 253, 254, 255, 256, 257,
- 0, 0, 0, 258, 0, 0, 259, 260, 261, 262,
- 0, 0, 263, 264, 265, 0, 266, 109, 110, 267,
- 268, 269, 0, 270, 271, 272, 273, 274, 275, 276,
-
- 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
- 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
- 297, 298, 299, 300, 301, 302, 303, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 304, 305, 306, 0, 0, 307, 308, 309, 310,
- 311, 0, 312, 0, 0, 313, 0, 0, 314, 315,
- 0, 0, 316, 317, 318, 0, 0, 0, 158, 155,
- 163, 319, 320, 321, 322, 323, 324, 325, 0, 0,
-
- 326, 150, 164, 169, 327, 328, 329, 330, 331, 332,
- 333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
- 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
- 353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
- 363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
-
- 373, 374, 375, 376, 377, 378
+t1_expert_encoding = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 229, 230, 0, 231, 232, 233, 234,
+ 235, 236, 237, 238, 13, 14, 15, 99, 239, 240,
+
+ 241, 242, 243, 244, 245, 246, 247, 248, 27, 28,
+ 249, 250, 251, 252, 0, 253, 254, 255, 256, 257,
+ 0, 0, 0, 258, 0, 0, 259, 260, 261, 262,
+ 0, 0, 263, 264, 265, 0, 266, 109, 110, 267,
+ 268, 269, 0, 270, 271, 272, 273, 274, 275, 276,
+
+ 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+ 297, 298, 299, 300, 301, 302, 303, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 304, 305, 306, 0, 0, 307, 308, 309, 310,
+ 311, 0, 312, 0, 0, 313, 0, 0, 314, 315,
+ 0, 0, 316, 317, 318, 0, 0, 0, 158, 155,
+ 163, 319, 320, 321, 322, 323, 324, 325, 0, 0,
+
+ 326, 150, 164, 169, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
+ 363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
+
+ 373, 374, 375, 376, 377, 378
]
-
# This data has been taken literally from the files `glyphlist.txt'
# and `zapfdingbats.txt' version 2.0, Sept 2002. It is available from
#
@@ -4906,81 +4894,81 @@ a9;2720
# string table management
#
class StringTable:
- def __init__( self, name_list, master_table_name ):
- self.names = name_list
- self.master_table = master_table_name
- self.indices = {}
- index = 0
-
- for name in name_list:
- self.indices[name] = index
- index += len( name ) + 1
-
- self.total = index
-
- def dump( self, file ):
- write = file.write
- write( "#ifndef DEFINE_PS_TABLES_DATA\n" )
- write( "#ifdef __cplusplus\n" )
- write( ' extern "C"\n' )
- write( "#else\n" )
- write( " extern\n" )
- write( "#endif\n" )
- write( "#endif\n" )
- write( " const char " + self.master_table +
- "[" + repr( self.total ) + "]\n" )
- write( "#ifdef DEFINE_PS_TABLES_DATA\n" )
- write( " =\n" )
- write( " {\n" )
-
- line = ""
- for name in self.names:
- line += " '"
- line += string.join( ( re.findall( ".", name ) ), "','" )
- line += "', 0,\n"
-
- write( line )
- write( " }\n" )
- write( "#endif /* DEFINE_PS_TABLES_DATA */\n" )
- write( " ;\n\n\n" )
-
- def dump_sublist( self, file, table_name, macro_name, sublist ):
- write = file.write
- write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" )
-
- write( " /* Values are offsets into the `" +
- self.master_table + "' table */\n\n" )
- write( "#ifndef DEFINE_PS_TABLES_DATA\n" )
- write( "#ifdef __cplusplus\n" )
- write( ' extern "C"\n' )
- write( "#else\n" )
- write( " extern\n" )
- write( "#endif\n" )
- write( "#endif\n" )
- write( " const short " + table_name +
- "[" + macro_name + "]\n" )
- write( "#ifdef DEFINE_PS_TABLES_DATA\n" )
- write( " =\n" )
- write( " {\n" )
-
- line = " "
- comma = ""
- col = 0
-
- for name in sublist:
- line += comma
- line += "%4d" % self.indices[name]
- col += 1
- comma = ","
- if col == 14:
- col = 0
- comma = ",\n "
-
- write( line )
- write( "\n" )
- write( " }\n" )
- write( "#endif /* DEFINE_PS_TABLES_DATA */\n" )
- write( " ;\n\n\n" )
+ def __init__(self, name_list, master_table_name):
+ self.names = name_list
+ self.master_table = master_table_name
+ self.indices = {}
+ index = 0
+
+ for name in name_list:
+ self.indices[name] = index
+ index += len(name) + 1
+
+ self.total = index
+
+ def dump(self, file):
+ write = file.write
+ write("#ifndef DEFINE_PS_TABLES_DATA\n")
+ write("#ifdef __cplusplus\n")
+ write(' extern "C"\n')
+ write("#else\n")
+ write(" extern\n")
+ write("#endif\n")
+ write("#endif\n")
+ write(" const char " + self.master_table +
+ "[" + repr(self.total) + "]\n")
+ write("#ifdef DEFINE_PS_TABLES_DATA\n")
+ write(" =\n")
+ write(" {\n")
+
+ line = ""
+ for name in self.names:
+ line += " '"
+ line += "','".join(list(name))
+ line += "', 0,\n"
+
+ write(line)
+ write(" }\n")
+ write("#endif /* DEFINE_PS_TABLES_DATA */\n")
+ write(" ;\n\n\n")
+
+ def dump_sublist(self, file, table_name, macro_name, sublist):
+ write = file.write
+ write("#define " + macro_name + " " + repr(len(sublist)) + "\n\n")
+
+ write(" /* Values are offsets into the `" +
+ self.master_table + "' table */\n\n")
+ write("#ifndef DEFINE_PS_TABLES_DATA\n")
+ write("#ifdef __cplusplus\n")
+ write(' extern "C"\n')
+ write("#else\n")
+ write(" extern\n")
+ write("#endif\n")
+ write("#endif\n")
+ write(" const short " + table_name +
+ "[" + macro_name + "]\n")
+ write("#ifdef DEFINE_PS_TABLES_DATA\n")
+ write(" =\n")
+ write(" {\n")
+
+ line = " "
+ comma = ""
+ col = 0
+
+ for name in sublist:
+ line += comma
+ line += "%4d" % self.indices[name]
+ col += 1
+ comma = ","
+ if col == 14:
+ col = 0
+ comma = ",\n "
+
+ write(line)
+ write("\n")
+ write(" }\n")
+ write("#endif /* DEFINE_PS_TABLES_DATA */\n")
+ write(" ;\n\n\n")
# We now store the Adobe Glyph List in compressed form. The list is put
@@ -5059,307 +5047,312 @@ class StringTable:
# The root node has first letter = 0, and no value.
#
class StringNode:
- def __init__( self, letter, value ):
- self.letter = letter
- self.value = value
- self.children = {}
-
- def __cmp__( self, other ):
- return ord( self.letter[0] ) - ord( other.letter[0] )
-
- def add( self, word, value ):
- if len( word ) == 0:
- self.value = value
- return
-
- letter = word[0]
- word = word[1:]
+ def __init__(self, letter, value):
+ self.letter = letter
+ self.value = value
+ self.children = {}
+
+ def __cmp__(self, other):
+ return ord(self.letter[0]) - ord(other.letter[0])
+
+ def __lt__(self, other):
+ return self.letter[0] < other.letter[0]
+
+ def add(self, word, value):
+ if len(word) == 0:
+ self.value = value
+ return
+
+ letter = word[0]
+ word = word[1:]
- if self.children.has_key( letter ):
- child = self.children[letter]
- else:
- child = StringNode( letter, 0 )
- self.children[letter] = child
+ if letter in self.children:
+ child = self.children[letter]
+ else:
+ child = StringNode(letter, 0)
+ self.children[letter] = child
- child.add( word, value )
+ child.add(word, value)
- def optimize( self ):
- # optimize all children first
- children = self.children.values()
- self.children = {}
+ def optimize(self):
+ # optimize all children first
+ children = list(self.children.values())
+ self.children = {}
- for child in children:
- self.children[child.letter[0]] = child.optimize()
+ for child in children:
+ self.children[child.letter[0]] = child.optimize()
- # don't optimize if there's a value,
- # if we don't have any child or if we
- # have more than one child
- if ( self.value != 0 ) or ( not children ) or len( children ) > 1:
- return self
+ # don't optimize if there's a value,
+ # if we don't have any child or if we
+ # have more than one child
+ if (self.value != 0) or (not children) or len(children) > 1:
+ return self
- child = children[0]
+ child = children[0]
- self.letter += child.letter
- self.value = child.value
- self.children = child.children
+ self.letter += child.letter
+ self.value = child.value
+ self.children = child.children
- return self
+ return self
- def dump_debug( self, write, margin ):
- # this is used during debugging
- line = margin + "+-"
- if len( self.letter ) == 0:
- line += "<NOLETTER>"
- else:
- line += self.letter
+ def dump_debug(self, write, margin):
+ # this is used during debugging
+ line = margin + "+-"
+ if len(self.letter) == 0:
+ line += "<NOLETTER>"
+ else:
+ line += self.letter
- if self.value:
- line += " => " + repr( self.value )
+ if self.value:
+ line += " => " + repr(self.value)
- write( line + "\n" )
+ write(line + "\n")
- if self.children:
- margin += "| "
- for child in self.children.values():
- child.dump_debug( write, margin )
+ if self.children:
+ margin += "| "
+ for child in self.children.values():
+ child.dump_debug(write, margin)
- def locate( self, index ):
- self.index = index
- if len( self.letter ) > 0:
- index += len( self.letter ) + 1
- else:
- index += 2
+ def locate(self, index):
+ self.index = index
+ if len(self.letter) > 0:
+ index += len(self.letter) + 1
+ else:
+ index += 2
- if self.value != 0:
- index += 2
+ if self.value != 0:
+ index += 2
- children = self.children.values()
- children.sort()
+ children = list(self.children.values())
+ children.sort()
- index += 2 * len( children )
- for child in children:
- index = child.locate( index )
+ index += 2 * len(children)
+ for child in children:
+ index = child.locate(index)
- return index
+ return index
- def store( self, storage ):
- # write the letters
- l = len( self.letter )
- if l == 0:
- storage += struct.pack( "B", 0 )
- else:
- for n in range( l ):
- val = ord( self.letter[n] )
- if n < l - 1:
- val += 128
- storage += struct.pack( "B", val )
+ def store(self, storage):
+ # write the letters
+ length = len(self.letter)
+ if length == 0:
+ storage += struct.pack("B", 0)
+ else:
+ for n in range(length):
+ val = ord(self.letter[n])
+ if n < length - 1:
+ val += 128
+ storage += struct.pack("B", val)
- # write the count
- children = self.children.values()
- children.sort()
+ # write the count
+ children = list(self.children.values())
+ children.sort()
- count = len( children )
+ count = len(children)
- if self.value != 0:
- storage += struct.pack( "!BH", count + 128, self.value )
- else:
- storage += struct.pack( "B", count )
+ if self.value != 0:
+ storage += struct.pack("!BH", count + 128, self.value)
+ else:
+ storage += struct.pack("B", count)
- for child in children:
- storage += struct.pack( "!H", child.index )
+ for child in children:
+ storage += struct.pack("!H", child.index)
- for child in children:
- storage = child.store( storage )
+ for child in children:
+ storage = child.store(storage)
- return storage
+ return storage
def adobe_glyph_values():
- """return the list of glyph names and their unicode values"""
-
- lines = string.split( adobe_glyph_list, '\n' )
- glyphs = []
- values = []
-
- for line in lines:
- if line:
- fields = string.split( line, ';' )
-# print fields[1] + ' - ' + fields[0]
- subfields = string.split( fields[1], ' ' )
- if len( subfields ) == 1:
- glyphs.append( fields[0] )
- values.append( fields[1] )
-
- return glyphs, values
-
-
-def filter_glyph_names( alist, filter ):
- """filter `alist' by taking _out_ all glyph names that are in `filter'"""
-
- count = 0
- extras = []
-
- for name in alist:
- try:
- filtered_index = filter.index( name )
- except:
- extras.append( name )
-
- return extras
-
-
-def dump_encoding( file, encoding_name, encoding_list ):
- """dump a given encoding"""
-
- write = file.write
- write( " /* the following are indices into the SID name table */\n" )
- write( "#ifndef DEFINE_PS_TABLES_DATA\n" )
- write( "#ifdef __cplusplus\n" )
- write( ' extern "C"\n' )
- write( "#else\n" )
- write( " extern\n" )
- write( "#endif\n" )
- write( "#endif\n" )
- write( " const unsigned short " + encoding_name +
- "[" + repr( len( encoding_list ) ) + "]\n" )
- write( "#ifdef DEFINE_PS_TABLES_DATA\n" )
- write( " =\n" )
- write( " {\n" )
-
- line = " "
- comma = ""
- col = 0
- for value in encoding_list:
- line += comma
- line += "%3d" % value
- comma = ","
- col += 1
- if col == 16:
- col = 0
- comma = ",\n "
-
- write( line )
- write( "\n" )
- write( " }\n" )
- write( "#endif /* DEFINE_PS_TABLES_DATA */\n" )
- write( " ;\n\n\n" )
-
-
-def dump_array( the_array, write, array_name ):
- """dumps a given encoding"""
-
- write( "#ifndef DEFINE_PS_TABLES_DATA\n" )
- write( "#ifdef __cplusplus\n" )
- write( ' extern "C"\n' )
- write( "#else\n" )
- write( " extern\n" )
- write( "#endif\n" )
- write( "#endif\n" )
- write( " const unsigned char " + array_name +
- "[" + repr( len( the_array ) ) + "L]\n" )
- write( "#ifdef DEFINE_PS_TABLES_DATA\n" )
- write( " =\n" )
- write( " {\n" )
-
- line = ""
- comma = " "
- col = 0
-
- for value in the_array:
- line += comma
- line += "%3d" % ord( value )
- comma = ","
- col += 1
-
- if col == 16:
- col = 0
- comma = ",\n "
-
- if len( line ) > 1024:
- write( line )
- line = ""
-
- write( line )
- write( "\n" )
- write( " }\n" )
- write( "#endif /* DEFINE_PS_TABLES_DATA */\n" )
- write( " ;\n\n\n" )
+ """return the list of glyph names and their unicode values"""
+
+ lines = adobe_glyph_list.split("\n")
+ glyphs = []
+ values = []
+
+ for line in lines:
+ if line:
+ fields = line.split(';')
+ # print fields[1] + ' - ' + fields[0]
+ subfields = fields[1].split(' ')
+ if len(subfields) == 1:
+ glyphs.append(fields[0])
+ values.append(fields[1])
+
+ return glyphs, values
+
+
+def filter_glyph_names(alist, filter):
+ """filter `alist' by taking _out_ all glyph names that are in `filter'"""
+
+ count = 0
+ extras = []
+
+ for name in alist:
+ try:
+ filtered_index = filter.index(name)
+ except:
+ extras.append(name)
+
+ return extras
+
+
+def dump_encoding(file, encoding_name, encoding_list):
+ """dump a given encoding"""
+
+ write = file.write
+ write(" /* the following are indices into the SID name table */\n")
+ write("#ifndef DEFINE_PS_TABLES_DATA\n")
+ write("#ifdef __cplusplus\n")
+ write(' extern "C"\n')
+ write("#else\n")
+ write(" extern\n")
+ write("#endif\n")
+ write("#endif\n")
+ write(" const unsigned short " + encoding_name +
+ "[" + repr(len(encoding_list)) + "]\n")
+ write("#ifdef DEFINE_PS_TABLES_DATA\n")
+ write(" =\n")
+ write(" {\n")
+
+ line = " "
+ comma = ""
+ col = 0
+ for value in encoding_list:
+ line += comma
+ line += "%3d" % value
+ comma = ","
+ col += 1
+ if col == 16:
+ col = 0
+ comma = ",\n "
+
+ write(line)
+ write("\n")
+ write(" }\n")
+ write("#endif /* DEFINE_PS_TABLES_DATA */\n")
+ write(" ;\n\n\n")
+
+
+def dump_array(the_array, write, array_name):
+ """dumps a given encoding"""
+
+ write("#ifndef DEFINE_PS_TABLES_DATA\n")
+ write("#ifdef __cplusplus\n")
+ write(' extern "C"\n')
+ write("#else\n")
+ write(" extern\n")
+ write("#endif\n")
+ write("#endif\n")
+ write(" const unsigned char " + array_name +
+ "[" + repr(len(the_array)) + "L]\n")
+ write("#ifdef DEFINE_PS_TABLES_DATA\n")
+ write(" =\n")
+ write(" {\n")
+
+ line = ""
+ comma = " "
+ col = 0
+
+ for value in the_array:
+ line += comma
+ line += "%3d" % value
+ comma = ","
+ col += 1
+
+ if col == 16:
+ col = 0
+ comma = ",\n "
+
+ if len(line) > 1024:
+ write(line)
+ line = ""
+
+ write(line)
+ write("\n")
+ write(" }\n")
+ write("#endif /* DEFINE_PS_TABLES_DATA */\n")
+ write(" ;\n\n\n")
def main():
- """main program body"""
-
- if len( sys.argv ) != 2:
- print __doc__ % sys.argv[0]
- sys.exit( 1 )
-
- file = open( sys.argv[1], "wb" )
- write = file.write
-
- count_sid = len( sid_standard_names )
-
- # `mac_extras' contains the list of glyph names in the Macintosh standard
- # encoding which are not in the SID Standard Names.
- #
- mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
-
- # `base_list' contains the names of our final glyph names table.
- # It consists of the `mac_extras' glyph names, followed by the SID
- # standard names.
- #
- mac_extras_count = len( mac_extras )
- base_list = mac_extras + sid_standard_names
-
- write( "/****************************************************************************\n" )
- write( " *\n" )
-
- write( " * %-71s\n" % os.path.basename( sys.argv[1] ) )
-
- write( " *\n" )
- write( " * PostScript glyph names.\n" )
- write( " *\n" )
- write( " * Copyright 2005-2019 by\n" )
- write( " * David Turner, Robert Wilhelm, and Werner Lemberg.\n" )
- write( " *\n" )
- write( " * This file is part of the FreeType project, and may only be used,\n" )
- write( " * modified, and distributed under the terms of the FreeType project\n" )
- write( " * license, LICENSE.TXT. By continuing to use, modify, or distribute\n" )
- write( " * this file you indicate that you have read the license and\n" )
- write( " * understand and accept it fully.\n" )
- write( " *\n" )
- write( " */\n" )
- write( "\n" )
- write( "\n" )
- write( " /* This file has been generated automatically -- do not edit! */\n" )
- write( "\n" )
- write( "\n" )
-
- # dump final glyph list (mac extras + sid standard names)
- #
- st = StringTable( base_list, "ft_standard_glyph_names" )
-
- st.dump( file )
- st.dump_sublist( file, "ft_mac_names",
- "FT_NUM_MAC_NAMES", mac_standard_names )
- st.dump_sublist( file, "ft_sid_names",
- "FT_NUM_SID_NAMES", sid_standard_names )
-
- dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
- dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
-
- # dump the AGL in its compressed form
- #
- agl_glyphs, agl_values = adobe_glyph_values()
- dict = StringNode( "", 0 )
-
- for g in range( len( agl_glyphs ) ):
- dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
-
- dict = dict.optimize()
- dict_len = dict.locate( 0 )
- dict_array = dict.store( "" )
-
- write( """\
+ """main program body"""
+
+ if len(sys.argv) != 2:
+ print(__doc__ % sys.argv[0])
+ sys.exit(1)
+
+ file = open(sys.argv[1], "w")
+ write = file.write
+
+ count_sid = len(sid_standard_names)
+
+ # `mac_extras' contains the list of glyph names in the Macintosh standard
+ # encoding which are not in the SID Standard Names.
+ #
+ mac_extras = filter_glyph_names(mac_standard_names, sid_standard_names)
+
+ # `base_list' contains the names of our final glyph names table.
+ # It consists of the `mac_extras' glyph names, followed by the SID
+ # standard names.
+ #
+ mac_extras_count = len(mac_extras)
+ base_list = mac_extras + sid_standard_names
+
+ write("/*\n")
+ write(" *\n")
+ write(" * %-71s\n" % os.path.basename(sys.argv[1]))
+ write(" *\n")
+ write(" * PostScript glyph names.\n")
+ write(" *\n")
+ write(" * Copyright 2005-2022 by\n")
+ write(" * David Turner, Robert Wilhelm, and Werner Lemberg.\n")
+ write(" *\n")
+ write(" * This file is part of the FreeType project, and may only be "
+ "used,\n")
+ write(" * modified, and distributed under the terms of the FreeType "
+ "project\n")
+ write(" * license, LICENSE.TXT. By continuing to use, modify, or "
+ "distribute\n")
+ write(" * this file you indicate that you have read the license and\n")
+ write(" * understand and accept it fully.\n")
+ write(" *\n")
+ write(" */\n")
+ write("\n")
+ write("\n")
+ write(" /* This file has been generated automatically -- do not edit! */"
+ "\n")
+ write("\n")
+ write("\n")
+
+ # dump final glyph list (mac extras + sid standard names)
+ #
+ st = StringTable(base_list, "ft_standard_glyph_names")
+
+ st.dump(file)
+ st.dump_sublist(file, "ft_mac_names",
+ "FT_NUM_MAC_NAMES", mac_standard_names)
+ st.dump_sublist(file, "ft_sid_names",
+ "FT_NUM_SID_NAMES", sid_standard_names)
+
+ dump_encoding(file, "t1_standard_encoding", t1_standard_encoding)
+ dump_encoding(file, "t1_expert_encoding", t1_expert_encoding)
+
+ # dump the AGL in its compressed form
+ #
+ agl_glyphs, agl_values = adobe_glyph_values()
+ dictionary = StringNode("", 0)
+
+ for g in range(len(agl_glyphs)):
+ dictionary.add(agl_glyphs[g], eval("0x" + agl_values[g]))
+
+ dictionary = dictionary.optimize()
+ dict_len = dictionary.locate(0)
+ dict_array = dictionary.store(b"")
+
+ write("""\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
@@ -5371,13 +5364,13 @@ def main():
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
-""" )
+""")
- dump_array( dict_array, write, "ft_adobe_glyph_list" )
+ dump_array(dict_array, write, "ft_adobe_glyph_list")
- # write the lookup routine now
- #
- write( """\
+ # write the lookup routine now
+ #
+ write("""\
#ifdef DEFINE_PS_TABLES
/*
* This function searches the compressed table efficiently.
@@ -5477,64 +5470,64 @@ def main():
#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */
-""" )
+""")
- if 0: # generate unit test, or don't
- #
- # now write the unit test to check that everything works OK
- #
- write( "#ifdef TEST\n\n" )
+ if 0: # generate unit test, or don't
+ #
+ # now write the unit test to check that everything works OK
+ #
+ write("#ifdef TEST\n\n")
- write( "static const char* const the_names[] = {\n" )
- for name in agl_glyphs:
- write( ' "' + name + '",\n' )
- write( " 0\n};\n" )
+ write("static const char* const the_names[] = {\n")
+ for name in agl_glyphs:
+ write(' "' + name + '",\n')
+ write(" 0\n};\n")
- write( "static const unsigned long the_values[] = {\n" )
- for val in agl_values:
- write( ' 0x' + val + ',\n' )
- write( " 0\n};\n" )
+ write("static const unsigned long the_values[] = {\n")
+ for val in agl_values:
+ write(' 0x' + val + ',\n')
+ write(" 0\n};\n")
- write( """
+ write("""
#include <stdlib.h>
#include <stdio.h>
+#include <string.h>
- int
- main( void )
- {
- int result = 0;
- const char* const* names = the_names;
- const unsigned long* values = the_values;
+int
+main( void )
+{
+int result = 0;
+const char* const* names = the_names;
+const unsigned long* values = the_values;
- for ( ; *names; names++, values++ )
- {
- const char* name = *names;
- unsigned long reference = *values;
- unsigned long value;
+for ( ; *names; names++, values++ )
+{
+ const char* name = *names;
+ unsigned long reference = *values;
+ unsigned long value;
- value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
- if ( value != reference )
- {
- result = 1;
- fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
- name, value, reference );
- }
- }
-
- return result;
+ value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
+ if ( value != reference )
+ {
+ result = 1;
+ fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
+ name, value, reference );
}
-""" )
+}
- write( "#endif /* TEST */\n" )
+return result;
+}
+""")
- write("\n/* END */\n")
+ write("#endif /* TEST */\n")
+
+ write("\n/* END */\n")
# Now run the main routine
#
main()
-
# END
diff --git a/src/3rdparty/freetype/src/tools/make_distribution_archives.py b/src/3rdparty/freetype/src/tools/make_distribution_archives.py
new file mode 100755
index 0000000000..f29eb128c7
--- /dev/null
+++ b/src/3rdparty/freetype/src/tools/make_distribution_archives.py
@@ -0,0 +1,208 @@
+#!/usr/bin/env python3
+"""Generate distribution archives for a given FreeType 2 release."""
+
+from __future__ import print_function
+
+import argparse
+import atexit
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+
+_TOP_DIR = os.path.abspath(os.path.join(__file__, "..", "..", ".."))
+_SCRIPT_DIR = os.path.dirname(os.path.join(_TOP_DIR, "builds", "meson", ""))
+
+
+def get_cmd_output(cmd, cwd=None):
+ """Run a command and return its output as a string."""
+ if cwd is not None:
+ out = subprocess.check_output(cmd, cwd=cwd)
+ else:
+ out = subprocess.check_output(cmd)
+ return out.decode("utf-8").rstrip()
+
+
+def is_git_dir_clean(git_dir):
+ """Return True iff |git_dir| is a git directory in clean state."""
+ out = get_cmd_output(["git", "status", "--porcelain"], cwd=git_dir)
+ return len(out) == 0
+
+
+def main():
+ parser = argparse.ArgumentParser(description=__doc__)
+
+ parser.add_argument(
+ "--source_dir", default=_TOP_DIR, help="Source directory path."
+ )
+
+ parser.add_argument(
+ "--version",
+ help=(
+ "Specify alternate FreeType version (it is otherwise extracted"
+ " from current sources by default)."
+ ),
+ )
+
+ parser.add_argument(
+ "--gnu-config-dir",
+ help=(
+ "Path of input directory containing recent `config.guess` and"
+ " `config.sub` files from GNU config."
+ ),
+ )
+
+ parser.add_argument(
+ "--build-dir",
+ help="Specify build directory. Only used for debugging this script.",
+ )
+
+ parser.add_argument(
+ "--ignore-clean-check",
+ action="store_true",
+ help=(
+ "Do not check for a clean source git repository. Only used for"
+ " debugging this script."
+ ),
+ )
+
+ parser.add_argument(
+ "output_dir", help="Output directory for generated archives."
+ )
+
+ args = parser.parse_args()
+
+ git_dir = args.source_dir if args.source_dir else _TOP_DIR
+ if not args.ignore_clean_check and not is_git_dir_clean(git_dir):
+ sys.stderr.write(
+ "ERROR: Your git repository is not in a clean state: %s\n"
+ % git_dir
+ )
+ return 1
+
+ if args.version:
+ version = args.version
+ else:
+ # Extract FreeType version from sources.
+ version = get_cmd_output(
+ [
+ sys.executable,
+ os.path.join(_SCRIPT_DIR, "extract_freetype_version.py"),
+ os.path.join(_TOP_DIR, "include", "freetype", "freetype.h"),
+ ]
+ )
+
+ # Determine the build directory. This will be a temporary file that is
+ # cleaned up on script exit by default, unless --build-dir=DIR is used,
+ # in which case we only create and empty the directory, but never remove
+ # its content on exit.
+ if args.build_dir:
+ build_dir = args.build_dir
+ if not os.path.exists(build_dir):
+ os.makedirs(build_dir)
+ else:
+ # Remove anything from the build directory, if any.
+ for item in os.listdir(build_dir):
+ file_path = os.path.join(build_dir, item)
+ if os.path.isdir(file_path):
+ shutil.rmtree(file_path)
+ else:
+ os.unlink(file_path)
+ else:
+ # Create a temporary directory, and ensure it is removed on exit.
+ build_dir = tempfile.mkdtemp(prefix="freetype-dist-")
+
+ def clean_build_dir():
+ shutil.rmtree(build_dir)
+
+ atexit.register(clean_build_dir)
+
+ # Copy all source files known to git into $BUILD_DIR/freetype-$VERSION
+ # with the exception of .gitignore and .mailmap files.
+ source_files = [
+ f
+ for f in get_cmd_output(["git", "ls-files"], cwd=git_dir).split("\n")
+ if os.path.basename(f) not in (".gitignore", ".mailmap")
+ ]
+
+ freetype_dir = "freetype-" + version
+ tmp_src_dir = os.path.join(build_dir, freetype_dir)
+ os.makedirs(tmp_src_dir)
+
+ for src in source_files:
+ dst = os.path.join(tmp_src_dir, src)
+ dst_dir = os.path.dirname(dst)
+ if not os.path.exists(dst_dir):
+ os.makedirs(dst_dir)
+ shutil.copy(os.path.join(git_dir, src), dst)
+
+ # Run autogen.sh in directory.
+ subprocess.check_call(["/bin/sh", "autogen.sh"], cwd=tmp_src_dir)
+ shutil.rmtree(
+ os.path.join(tmp_src_dir, "builds", "unix", "autom4te.cache")
+ )
+
+ # Copy config.guess and config.sub if possible!
+ if args.gnu_config_dir:
+ for f in ("config.guess", "config.sub"):
+ shutil.copy(
+ os.path.join(args.gnu_config_dir, f),
+ os.path.join(tmp_src_dir, "builds", "unix", f),
+ )
+
+ # Generate reference documentation under docs/
+ subprocess.check_call(
+ [
+ sys.executable,
+ os.path.join(_SCRIPT_DIR, "generate_reference_docs.py"),
+ "--input-dir",
+ tmp_src_dir,
+ "--version",
+ version,
+ "--output-dir",
+ os.path.join(tmp_src_dir, "docs"),
+ ]
+ )
+
+ shutil.rmtree(os.path.join(tmp_src_dir, "docs", "markdown"))
+ os.unlink(os.path.join(tmp_src_dir, "docs", "mkdocs.yml"))
+
+ # Generate our archives
+ freetype_tar = freetype_dir + ".tar"
+
+ subprocess.check_call(
+ ["tar", "-H", "ustar", "-chf", freetype_tar, freetype_dir],
+ cwd=build_dir,
+ )
+
+ subprocess.check_call(
+ ["gzip", "-9", "--keep", freetype_tar], cwd=build_dir
+ )
+
+ subprocess.check_call(["xz", "--keep", freetype_tar], cwd=build_dir)
+
+ ftwinversion = "ft" + "".join(version.split("."))
+ subprocess.check_call(
+ ["zip", "-qlr9", ftwinversion + ".zip", freetype_dir], cwd=build_dir
+ )
+
+ # Copy file to output directory now.
+ if not os.path.exists(args.output_dir):
+ os.makedirs(args.output_dir)
+
+ for f in (
+ freetype_tar + ".gz",
+ freetype_tar + ".xz",
+ ftwinversion + ".zip",
+ ):
+ shutil.copy(
+ os.path.join(build_dir, f), os.path.join(args.output_dir, f)
+ )
+
+ # Done!
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/src/3rdparty/freetype/src/tools/no-copyright b/src/3rdparty/freetype/src/tools/no-copyright
index d639aa4a84..e171b76f3a 100644
--- a/src/3rdparty/freetype/src/tools/no-copyright
+++ b/src/3rdparty/freetype/src/tools/no-copyright
@@ -1,14 +1,12 @@
# Files that don't get a copyright, or which are taken from elsewhere.
#
-# All lines in this file are patterns, including the comment lines; this
-# means that e.g. `FTL.TXT' matches all files that have this string in
-# the file name (including the path relative to the current directory,
-# always starting with `./').
+# All lines in this file are patterns (relative to the top-level directory),
+# including the comment lines; this means that e.g. `FTL.TXT' matches all
+# files that have this string in the file name (including the path relative
+# to the current directory, always starting with `./').
#
# Don't put empty lines into this file!
#
-.gitignore
-#
builds/unix/pkg.m4
#
docs/FTL.TXT
@@ -44,6 +42,7 @@ src/pcf/README
src/pcf/rules.mk
#
src/gzip/adler32.c
+src/gzip/ftzconf.c
src/gzip/infblock.c
src/gzip/infblock.h
src/gzip/infcodes.c
@@ -62,4 +61,6 @@ src/gzip/zutil.h
src/tools/apinames.c
src/tools/ftrandom/ftrandom.c
#
+subprojects/dlg
+#
# EOF
diff --git a/src/3rdparty/freetype/src/tools/test_afm.c b/src/3rdparty/freetype/src/tools/test_afm.c
index 8de619bb03..a4b2268984 100644
--- a/src/3rdparty/freetype/src/tools/test_afm.c
+++ b/src/3rdparty/freetype/src/tools/test_afm.c
@@ -2,10 +2,9 @@
* gcc -DFT2_BUILD_LIBRARY -I../../include -o test_afm test_afm.c \
* -L../../objs/.libs -lfreetype -lz -static
*/
-#include <ft2build.h>
-#include FT_FREETYPE_H
-#include FT_INTERNAL_STREAM_H
-#include FT_INTERNAL_POSTSCRIPT_AUX_H
+#include <freetype/freetype.h>
+#include <freetype/internal/ftstream.h>
+#include <freetype/internal/psaux.h>
void dump_fontinfo( AFM_FontInfo fi )
{
diff --git a/src/3rdparty/freetype/src/tools/test_bbox.c b/src/3rdparty/freetype/src/tools/test_bbox.c
index 64b82c384b..d9fd932993 100644
--- a/src/3rdparty/freetype/src/tools/test_bbox.c
+++ b/src/3rdparty/freetype/src/tools/test_bbox.c
@@ -1,6 +1,5 @@
-#include <ft2build.h>
-#include FT_FREETYPE_H
-#include FT_BBOX_H
+#include <freetype/freetype.h>
+#include <freetype/ftbbox.h>
#include <time.h> /* for clock() */
diff --git a/src/3rdparty/freetype/src/tools/test_trig.c b/src/3rdparty/freetype/src/tools/test_trig.c
index 99ac1cf174..4f3410ab38 100644
--- a/src/3rdparty/freetype/src/tools/test_trig.c
+++ b/src/3rdparty/freetype/src/tools/test_trig.c
@@ -1,6 +1,5 @@
-#include <ft2build.h>
-#include FT_FREETYPE_H
-#include FT_TRIGONOMETRY_H
+#include <freetype/freetype.h>
+#include <freetype/fttrigon.h>
#include <math.h>
#include <stdio.h>
diff --git a/src/3rdparty/freetype/src/tools/update-copyright b/src/3rdparty/freetype/src/tools/update-copyright
index 4a8bf9b0ea..674823ceb2 100644..100755
--- a/src/3rdparty/freetype/src/tools/update-copyright
+++ b/src/3rdparty/freetype/src/tools/update-copyright
@@ -4,10 +4,10 @@
# taking care of exceptions stored in file `no-copyright'.
topdir=`git rev-parse --show-toplevel`
-toolsdir=$topdir/src/tools
+toolsdir=`dirname $0`
git ls-files --full-name $topdir \
-| sed 's|^|../../|' \
+| sed "s|^|$topdir/|" \
| grep -vFf $toolsdir/no-copyright \
| xargs $toolsdir/update-copyright-year
diff --git a/src/3rdparty/freetype/src/tools/update-copyright-year b/src/3rdparty/freetype/src/tools/update-copyright-year
index 2ae50d6f52..b0b60fb88e 100644..100755
--- a/src/3rdparty/freetype/src/tools/update-copyright-year
+++ b/src/3rdparty/freetype/src/tools/update-copyright-year
@@ -2,7 +2,7 @@ eval '(exit $?0)' && eval 'exec perl -wS -i "$0" ${1+"$@"}'
& eval 'exec perl -wS -i "$0" $argv:q'
if 0;
-# Copyright (C) 2015-2019 by
+# Copyright (C) 2015-2023 by
# Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
@@ -28,20 +28,20 @@ eval '(exit $?0)' && eval 'exec perl -wS -i "$0" ${1+"$@"}'
#
# or
#
-# /* Copyright 2000, 2001, 2004-2007 by */
-# /* foobar */
+# /* Copyright (c) 2000, 2001, 2004-2007 by */
+# /* foobar */
#
# and replaces them uniformly with
#
-# Copyright 2000-2015
+# Copyright (C) 2000-2021
# foobar
#
# and
#
-# /* Copyright 2000-2015 by */
-# /* foobar */
+# /* Copyright (C) 2000-2021 by */
+# /* foobar */
#
-# (assuming that the current year is 2015). As can be seen, the line length
+# (assuming that the current year is 2021). As can be seen, the line length
# is retained if there is non-whitespace after the word `by' on the same
# line.
@@ -80,11 +80,11 @@ while (<>)
{
# Fill line to the same length (if appropriate); we skip the middle
# part but insert `(C)', three spaces, and `-'.
- my $space = length($+{space1}) - 1
- + length($+{middle}) - 1
- + length($+{space2}) - 1
+ my $space = length($+{space1})
+ + length($+{middle})
+ + length($+{space2})
+ length($+{space3})
- - (length("(C)") + 1);
+ - (length("(C)") + 3 + 1);
print "$+{begin}";
print "Copyright\ (C)\ $+{first}-$year\ by";
@@ -106,19 +106,38 @@ while (<>)
(?<end>.*)
}
{
- # Fill line to the same length (if appropriate); we insert three
- # spaces, a `-', and the current year.
- my $space = length($+{space1}) - 1
- + length($+{space2}) - 1
- + length($+{space3})
- - (length($year) + 1);
+ if ($+{first} < $year)
+ {
+ # Fill line to the same length (if appropriate); we insert three
+ # spaces, the string `(C)', a `-', and the current year.
+ my $space = length($+{space1})
+ + length($+{space2})
+ + length($+{space3})
+ - (length($year) + length("(C)") + 3 + 1);
- print "$+{begin}";
- print "Copyright\ (C)\ $+{first}-$year\ by";
- # If $space is negative this inserts nothing.
- print ' ' x $space if length($+{end});
- print "$+{end}\n";
- $replaced = 1;
+ print "$+{begin}";
+ print "Copyright\ (C)\ $+{first}-$year\ by";
+ # If $space is negative this inserts nothing.
+ print ' ' x $space if length($+{end});
+ print "$+{end}\n";
+ $replaced = 1;
+ }
+ else
+ {
+ # Fill line to the same length (if appropriate); we insert three
+ # spaces and the string `(C)'.
+ my $space = length($+{space1})
+ + length($+{space2})
+ + length($+{space3})
+ - (length("(C)") + 3);
+
+ print "$+{begin}";
+ print "Copyright\ (C)\ $+{first}\ by";
+ # If $space is negative this inserts nothing.
+ print ' ' x $space if length($+{end});
+ print "$+{end}\n";
+ $replaced = 1;
+ }
}ex
||
# Otherwise print line unaltered.
diff --git a/src/3rdparty/freetype/src/tools/vms_shorten_symbol.c b/src/3rdparty/freetype/src/tools/vms_shorten_symbol.c
new file mode 100644
index 0000000000..81f2a71877
--- /dev/null
+++ b/src/3rdparty/freetype/src/tools/vms_shorten_symbol.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2010, 2017 Craig A. Berry
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/* vms_shorten_symbol
+ *
+ * This program provides shortening of long symbols (> 31 characters) using the
+ * same mechanism as the OpenVMS C compiler. The basic procedure is to compute
+ * an AUTODIN II checksum of the entire symbol, encode the checksum in base32,
+ * and glue together a shortened symbol from the first 23 characters of the
+ * original symbol plus the encoded checksum appended. The output format is
+ * the same used in the name mangler database, stored by default in
+ * [.CXX_REPOSITORY]CXX$DEMANGLER_DB.
+ *
+ * To obtain the same result as CC/NAMES=SHORTENED, run like so:
+ *
+ * $ mcr []vms_shorten_symbol "Please_forgive_this_absurdly_long_symbol_name"
+ * PLEASE_FORGIVE_THIS_ABS1ARO4QU$Please_forgive_this_absurdly_long_symbol_name
+ *
+ * To obtain the same result as CC/NAMES=(SHORTENED,AS_IS), pass a non-zero
+ * value as the second argument, like so:
+ *
+ * $ mcr []vms_shorten_symbol "Please_forgive_this_absurdly_long_symbol_name" 1
+ * Please_forgive_this_abs3rv8rnn$Please_forgive_this_absurdly_long_symbol_name
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef __VMS
+#define UINT32 unsigned int
+#else
+#include <inttypes.h>
+#define UINT32 uint32_t
+#endif
+
+extern UINT32 crc32(const char *input_string);
+extern int u32_to_base32(UINT32 input, char *output);
+extern int vms_shorten_symbol(const char *symbol, char *shortened, char as_is_flag);
+
+/*
+ * This routine implements the AUTODIN II polynomial.
+ */
+
+UINT32
+crc32(const char *input_string)
+{
+
+/*
+ * CRC code and data based partly on FreeBSD implementation, which
+ * notes:
+ *
+ * The crc32 functions and data was originally written by Spencer
+ * Garrett <srg@quick.com> and was cleaned from the PostgreSQL source
+ * tree via the files contrib/ltree/crc32.[ch]. No license was
+ * included, therefore it is assumed that this code is public
+ * domain. Attribution still noted.
+ *
+ * (I think they mean "gleaned" not "cleaned".)
+ */
+
+ static const UINT32 autodin_ii_table[256] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
+ 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
+ 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
+ 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
+ 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
+ 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
+ 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
+ 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
+ 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
+ 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
+ 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
+ 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
+ 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
+ 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
+ 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
+ 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
+ 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
+ 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
+ };
+
+ UINT32 crc = ~0U;
+ char *c;
+ for (c = (char *)input_string; *c; ++c)
+ crc = (crc >> 8) ^ autodin_ii_table[(crc ^ *c) & 0xff];
+ return ~crc;
+}
+
+/*
+ * This is the RFC2938 variant of base32, not RFC3548, Crockford's, or
+ * other newer variant. It produces an 8-byte encoded character string
+ * (plus trailing null) from a 32-bit integer input.
+ */
+
+int
+u32_to_base32(UINT32 input, char *output)
+{
+ static const char base32hex_table[32] = {
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
+ 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
+ 'u', 'v'
+ };
+ int i;
+
+ /*
+ * Grab lowest 5 bits and look up conversion in table. Lather, rinse,
+ * repeat for a total of 7, 5-bit chunks to accommodate 32 bits of input.
+ */
+ for (i = 0; i < 7; i++) {
+ output[6 - i] = base32hex_table[input & 0x1f];
+ input >>= 5; /* position to look at next 5 */
+ }
+ output[7] = '$'; /* It's DEC, so use '$' not '=' to pad. */
+ output[8] = '\0';
+ return 0;
+}
+
+/*
+ * Take an input symbol name of arbitrary length and produce a symbol shortened
+ * to 31 characters. The shortened symbol consists of the first 23 characters
+ * of the original symbol plus the 8 characters of the encoded checksum. The
+ * third argument is a boolean indicating whether to emulate the compiler's
+ * /NAMES=AS_IS option. When false (the compiler's default), the shortened
+ * symbol will be upper cased. When the original symbol is 31 characters or
+ * fewer in length, no checksum will be appended and the original symbol is
+ * returned verbatim (though upper cased if the as_is_flag is false).
+ */
+
+int
+vms_shorten_symbol(const char *input_symbol, char *shortened, char as_is_flag)
+{
+ char b32str[9];
+ UINT32 crc;
+ char *c, *symbol;
+ int symlen;
+
+ symlen = strlen(input_symbol);
+ symbol = (char *)malloc(symlen + 1);
+ if (symbol == NULL)
+ return -1;
+
+ strncpy(symbol, input_symbol, symlen);
+ symbol[symlen] = '\0';
+
+ if (!as_is_flag) {
+ for (c = symbol; *c; c++)
+ *c = toupper(*c);
+ }
+
+ if (symlen <= 31) {
+ strncpy(shortened, symbol, symlen);
+ shortened[symlen] = '\0';
+ free(symbol);
+ return 0;
+ }
+
+ /*
+ * Compute the checksum on the whole symbol.
+ */
+
+ crc = crc32(symbol);
+
+ /* The compiler does not use the inverted checksum, so we invert it
+ * back before encoding in base32.
+ */
+
+ if (u32_to_base32(~crc, (char *)&b32str) == -1) {
+ free(symbol);
+ return -1;
+ }
+
+ if (!as_is_flag) {
+ for (c = (char *)&b32str; *c; c++)
+ *c = toupper(*c);
+ }
+
+ sprintf(shortened, "%.23s%.8s", symbol, b32str);
+ shortened[31] = '\0';
+ free(symbol);
+ return 0;
+}
+
+#ifdef TEST_MAIN
+int
+main(int argc, char **argv)
+{
+ char short_symbol[32];
+ char as_is_flag = 0;
+
+ if (argc < 2) {
+ fprintf(stderr, "Usage: %s <symbol name> [<AS_IS flag>]\n", argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ if (argc > 2)
+ as_is_flag = 1;
+
+ if (vms_shorten_symbol(argv[1], (char *)&short_symbol, as_is_flag) == -1) {
+ fprintf(stderr, "Symbol shortening failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ printf("%s%s\n", (char *)&short_symbol, argv[1]);
+}
+#endif