refactor: excel parse
This commit is contained in:
@@ -0,0 +1,14 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_B_A_S_E_(BaseTTXConverter):
|
||||
"""Baseline table
|
||||
|
||||
The ``BASE`` table contains information needed to align glyphs in
|
||||
different scripts, from different fonts, or at different sizes
|
||||
within the same line of text.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/base
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,64 @@
|
||||
# Since bitmap glyph metrics are shared between EBLC and EBDT
|
||||
# this class gets its own python file.
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
bigGlyphMetricsFormat = """
|
||||
> # big endian
|
||||
height: B
|
||||
width: B
|
||||
horiBearingX: b
|
||||
horiBearingY: b
|
||||
horiAdvance: B
|
||||
vertBearingX: b
|
||||
vertBearingY: b
|
||||
vertAdvance: B
|
||||
"""
|
||||
|
||||
smallGlyphMetricsFormat = """
|
||||
> # big endian
|
||||
height: B
|
||||
width: B
|
||||
BearingX: b
|
||||
BearingY: b
|
||||
Advance: B
|
||||
"""
|
||||
|
||||
|
||||
class BitmapGlyphMetrics(object):
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag(self.__class__.__name__)
|
||||
writer.newline()
|
||||
for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
|
||||
writer.simpletag(metricName, value=getattr(self, metricName))
|
||||
writer.newline()
|
||||
writer.endtag(self.__class__.__name__)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
# Make sure this is a metric that is needed by GlyphMetrics.
|
||||
if name in metricNames:
|
||||
vars(self)[name] = safeEval(attrs["value"])
|
||||
else:
|
||||
log.warning(
|
||||
"unknown name '%s' being ignored in %s.",
|
||||
name,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
|
||||
class BigGlyphMetrics(BitmapGlyphMetrics):
|
||||
binaryFormat = bigGlyphMetricsFormat
|
||||
|
||||
|
||||
class SmallGlyphMetrics(BitmapGlyphMetrics):
|
||||
binaryFormat = smallGlyphMetricsFormat
|
||||
@@ -0,0 +1,113 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Matt Fontaine
|
||||
|
||||
|
||||
from fontTools.misc.textTools import bytesjoin
|
||||
from fontTools.misc import sstruct
|
||||
from . import E_B_D_T_
|
||||
from .BitmapGlyphMetrics import (
|
||||
BigGlyphMetrics,
|
||||
bigGlyphMetricsFormat,
|
||||
SmallGlyphMetrics,
|
||||
smallGlyphMetricsFormat,
|
||||
)
|
||||
from .E_B_D_T_ import (
|
||||
BitmapGlyph,
|
||||
BitmapPlusSmallMetricsMixin,
|
||||
BitmapPlusBigMetricsMixin,
|
||||
)
|
||||
import struct
|
||||
|
||||
|
||||
class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
|
||||
"""Color Bitmap Data table
|
||||
|
||||
The ``CBDT`` table contains color bitmap data for glyphs. It must
|
||||
be used in concert with the ``CBLC`` table.
|
||||
|
||||
It is backwards-compatible with the monochrome/grayscale ``EBDT`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/cbdt
|
||||
"""
|
||||
|
||||
# Change the data locator table being referenced.
|
||||
locatorName = "CBLC"
|
||||
|
||||
# Modify the format class accessor for color bitmap use.
|
||||
def getImageFormatClass(self, imageFormat):
|
||||
try:
|
||||
return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat)
|
||||
except KeyError:
|
||||
return cbdt_bitmap_classes[imageFormat]
|
||||
|
||||
|
||||
# Helper method for removing export features not supported by color bitmaps.
|
||||
# Write data in the parent class will default to raw if an option is unsupported.
|
||||
def _removeUnsupportedForColor(dataFunctions):
|
||||
dataFunctions = dict(dataFunctions)
|
||||
del dataFunctions["row"]
|
||||
return dataFunctions
|
||||
|
||||
|
||||
class ColorBitmapGlyph(BitmapGlyph):
|
||||
fileExtension = ".png"
|
||||
xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
|
||||
|
||||
|
||||
class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
|
||||
def decompile(self):
|
||||
self.metrics = SmallGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
|
||||
(dataLen,) = struct.unpack(">L", data[:4])
|
||||
data = data[4:]
|
||||
|
||||
# For the image data cut it to the size specified by dataLen.
|
||||
assert dataLen <= len(data), "Data overun in format 17"
|
||||
self.imageData = data[:dataLen]
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
|
||||
dataList.append(struct.pack(">L", len(self.imageData)))
|
||||
dataList.append(self.imageData)
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
|
||||
def decompile(self):
|
||||
self.metrics = BigGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
|
||||
(dataLen,) = struct.unpack(">L", data[:4])
|
||||
data = data[4:]
|
||||
|
||||
# For the image data cut it to the size specified by dataLen.
|
||||
assert dataLen <= len(data), "Data overun in format 18"
|
||||
self.imageData = data[:dataLen]
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
|
||||
dataList.append(struct.pack(">L", len(self.imageData)))
|
||||
dataList.append(self.imageData)
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
class cbdt_bitmap_format_19(ColorBitmapGlyph):
|
||||
def decompile(self):
|
||||
(dataLen,) = struct.unpack(">L", self.data[:4])
|
||||
data = self.data[4:]
|
||||
|
||||
assert dataLen <= len(data), "Data overun in format 19"
|
||||
self.imageData = data[:dataLen]
|
||||
|
||||
def compile(self, ttFont):
|
||||
return struct.pack(">L", len(self.imageData)) + self.imageData
|
||||
|
||||
|
||||
# Dict for CBDT extended formats.
|
||||
cbdt_bitmap_classes = {
|
||||
17: cbdt_bitmap_format_17,
|
||||
18: cbdt_bitmap_format_18,
|
||||
19: cbdt_bitmap_format_19,
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Matt Fontaine
|
||||
|
||||
from . import E_B_L_C_
|
||||
|
||||
|
||||
class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
|
||||
"""Color Bitmap Location table
|
||||
|
||||
The ``CBLC`` table contains the locations of color bitmaps for glyphs. It must
|
||||
be used in concert with the ``CBDT`` table.
|
||||
|
||||
It is backwards-compatible with the monochrome/grayscale ``EBLC`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/cblc
|
||||
"""
|
||||
|
||||
dependencies = ["CBDT"]
|
||||
@@ -0,0 +1,61 @@
|
||||
from io import BytesIO
|
||||
from fontTools import cffLib
|
||||
from . import DefaultTable
|
||||
|
||||
|
||||
class table_C_F_F_(DefaultTable.DefaultTable):
|
||||
"""Compact Font Format table (version 1)
|
||||
|
||||
The ``CFF`` table embeds a CFF-formatted font. The CFF font format
|
||||
predates OpenType and could be used as a standalone font file, but the
|
||||
``CFF`` table is also used to package CFF fonts into an OpenType
|
||||
container.
|
||||
|
||||
.. note::
|
||||
``CFF`` has been succeeded by ``CFF2``, which eliminates much of
|
||||
the redundancy incurred by embedding CFF version 1 in an OpenType
|
||||
font.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/cff
|
||||
"""
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.cff = cffLib.CFFFontSet()
|
||||
self._gaveGlyphOrder = False
|
||||
|
||||
def decompile(self, data, otFont):
|
||||
self.cff.decompile(BytesIO(data), otFont, isCFF2=False)
|
||||
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
|
||||
|
||||
def compile(self, otFont):
|
||||
f = BytesIO()
|
||||
self.cff.compile(f, otFont, isCFF2=False)
|
||||
return f.getvalue()
|
||||
|
||||
def haveGlyphNames(self):
|
||||
if hasattr(self.cff[self.cff.fontNames[0]], "ROS"):
|
||||
return False # CID-keyed font
|
||||
else:
|
||||
return True
|
||||
|
||||
def getGlyphOrder(self):
|
||||
if self._gaveGlyphOrder:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
|
||||
self._gaveGlyphOrder = True
|
||||
return self.cff[self.cff.fontNames[0]].getGlyphOrder()
|
||||
|
||||
def setGlyphOrder(self, glyphOrder):
|
||||
pass
|
||||
# XXX
|
||||
# self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder)
|
||||
|
||||
def toXML(self, writer, otFont):
|
||||
self.cff.toXML(writer)
|
||||
|
||||
def fromXML(self, name, attrs, content, otFont):
|
||||
if not hasattr(self, "cff"):
|
||||
self.cff = cffLib.CFFFontSet()
|
||||
self.cff.fromXML(name, attrs, content, otFont)
|
||||
@@ -0,0 +1,26 @@
|
||||
from io import BytesIO
|
||||
from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_
|
||||
|
||||
|
||||
class table_C_F_F__2(table_C_F_F_):
|
||||
"""Compact Font Format version 2 table
|
||||
|
||||
The ``CFF2`` table contains glyph data for a CFF2-flavored OpenType
|
||||
font.
|
||||
|
||||
.. note::
|
||||
``CFF2`` is the successor to ``CFF``, and eliminates much of
|
||||
the redundancy incurred by embedding CFF version 1 in an OpenType
|
||||
font.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/cff2
|
||||
"""
|
||||
|
||||
def decompile(self, data, otFont):
|
||||
self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
|
||||
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
|
||||
|
||||
def compile(self, otFont):
|
||||
f = BytesIO()
|
||||
self.cff.compile(f, otFont, isCFF2=True)
|
||||
return f.getvalue()
|
||||
@@ -0,0 +1,165 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod
|
||||
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
|
||||
|
||||
class table_C_O_L_R_(DefaultTable.DefaultTable):
|
||||
"""Color table
|
||||
|
||||
The ``COLR`` table defines color presentation of outline glyphs. It must
|
||||
be used in concert with the ``CPAL`` table, which contains the color
|
||||
descriptors used.
|
||||
|
||||
This table is structured so that you can treat it like a dictionary keyed by glyph name.
|
||||
|
||||
``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph.
|
||||
|
||||
``ttFont['COLR'][<glyphName>] = <value>`` will set the color layers for any glyph.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/colr
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _decompileColorLayersV0(table):
|
||||
if not table.LayerRecordArray:
|
||||
return {}
|
||||
colorLayerLists = {}
|
||||
layerRecords = table.LayerRecordArray.LayerRecord
|
||||
numLayerRecords = len(layerRecords)
|
||||
for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord:
|
||||
baseGlyph = baseRec.BaseGlyph
|
||||
firstLayerIndex = baseRec.FirstLayerIndex
|
||||
numLayers = baseRec.NumLayers
|
||||
assert firstLayerIndex + numLayers <= numLayerRecords
|
||||
layers = []
|
||||
for i in range(firstLayerIndex, firstLayerIndex + numLayers):
|
||||
layerRec = layerRecords[i]
|
||||
layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex))
|
||||
colorLayerLists[baseGlyph] = layers
|
||||
return colorLayerLists
|
||||
|
||||
def _toOTTable(self, ttFont):
|
||||
from . import otTables
|
||||
from fontTools.colorLib.builder import populateCOLRv0
|
||||
|
||||
tableClass = getattr(otTables, self.tableTag)
|
||||
table = tableClass()
|
||||
table.Version = self.version
|
||||
|
||||
populateCOLRv0(
|
||||
table,
|
||||
{
|
||||
baseGlyph: [(layer.name, layer.colorID) for layer in layers]
|
||||
for baseGlyph, layers in self.ColorLayers.items()
|
||||
},
|
||||
glyphMap=ttFont.getReverseGlyphMap(rebuild=True),
|
||||
)
|
||||
return table
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
from .otBase import OTTableReader
|
||||
from . import otTables
|
||||
|
||||
# We use otData to decompile, but we adapt the decompiled otTables to the
|
||||
# existing COLR v0 API for backward compatibility.
|
||||
reader = OTTableReader(data, tableTag=self.tableTag)
|
||||
tableClass = getattr(otTables, self.tableTag)
|
||||
table = tableClass()
|
||||
table.decompile(reader, ttFont)
|
||||
|
||||
self.version = table.Version
|
||||
if self.version == 0:
|
||||
self.ColorLayers = self._decompileColorLayersV0(table)
|
||||
else:
|
||||
# for new versions, keep the raw otTables around
|
||||
self.table = table
|
||||
|
||||
def compile(self, ttFont):
|
||||
from .otBase import OTTableWriter
|
||||
|
||||
if hasattr(self, "table"):
|
||||
table = self.table
|
||||
else:
|
||||
table = self._toOTTable(ttFont)
|
||||
|
||||
writer = OTTableWriter(tableTag=self.tableTag)
|
||||
table.compile(writer, ttFont)
|
||||
return writer.getAllData()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
if hasattr(self, "table"):
|
||||
self.table.toXML2(writer, ttFont)
|
||||
else:
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID):
|
||||
writer.begintag("ColorGlyph", name=baseGlyph)
|
||||
writer.newline()
|
||||
for layer in self.ColorLayers[baseGlyph]:
|
||||
layer.toXML(writer, ttFont)
|
||||
writer.endtag("ColorGlyph")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version": # old COLR v0 API
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
elif name == "ColorGlyph":
|
||||
if not hasattr(self, "ColorLayers"):
|
||||
self.ColorLayers = {}
|
||||
glyphName = attrs["name"]
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
layers = []
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
layer = LayerRecord()
|
||||
layer.fromXML(element[0], element[1], element[2], ttFont)
|
||||
layers.append(layer)
|
||||
self.ColorLayers[glyphName] = layers
|
||||
else: # new COLR v1 API
|
||||
from . import otTables
|
||||
|
||||
if not hasattr(self, "table"):
|
||||
tableClass = getattr(otTables, self.tableTag)
|
||||
self.table = tableClass()
|
||||
self.table.fromXML(name, attrs, content, ttFont)
|
||||
self.table.populateDefaults()
|
||||
self.version = self.table.Version
|
||||
|
||||
def __getitem__(self, glyphName):
|
||||
if not isinstance(glyphName, str):
|
||||
raise TypeError(f"expected str, found {type(glyphName).__name__}")
|
||||
return self.ColorLayers[glyphName]
|
||||
|
||||
def __setitem__(self, glyphName, value):
|
||||
if not isinstance(glyphName, str):
|
||||
raise TypeError(f"expected str, found {type(glyphName).__name__}")
|
||||
if value is not None:
|
||||
self.ColorLayers[glyphName] = value
|
||||
elif glyphName in self.ColorLayers:
|
||||
del self.ColorLayers[glyphName]
|
||||
|
||||
def __delitem__(self, glyphName):
|
||||
del self.ColorLayers[glyphName]
|
||||
|
||||
|
||||
class LayerRecord(object):
|
||||
def __init__(self, name=None, colorID=None):
|
||||
self.name = name
|
||||
self.colorID = colorID
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("layer", name=self.name, colorID=self.colorID)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, eltname, attrs, content, ttFont):
|
||||
for name, value in attrs.items():
|
||||
if name == "name":
|
||||
setattr(self, name, value)
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
@@ -0,0 +1,305 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod
|
||||
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval
|
||||
from . import DefaultTable
|
||||
import array
|
||||
from collections import namedtuple
|
||||
import struct
|
||||
import sys
|
||||
|
||||
|
||||
class table_C_P_A_L_(DefaultTable.DefaultTable):
|
||||
"""Color Palette table
|
||||
|
||||
The ``CPAL`` table contains a set of one or more color palettes. The color
|
||||
records in each palette can be referenced by the ``COLR`` table to specify
|
||||
the colors used in a color glyph.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/cpal
|
||||
"""
|
||||
|
||||
NO_NAME_ID = 0xFFFF
|
||||
DEFAULT_PALETTE_TYPE = 0
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.palettes = []
|
||||
self.paletteTypes = []
|
||||
self.paletteLabels = []
|
||||
self.paletteEntryLabels = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
(
|
||||
self.version,
|
||||
self.numPaletteEntries,
|
||||
numPalettes,
|
||||
numColorRecords,
|
||||
goffsetFirstColorRecord,
|
||||
) = struct.unpack(">HHHHL", data[:12])
|
||||
assert (
|
||||
self.version <= 1
|
||||
), "Version of CPAL table is higher than I know how to handle"
|
||||
self.palettes = []
|
||||
pos = 12
|
||||
for i in range(numPalettes):
|
||||
startIndex = struct.unpack(">H", data[pos : pos + 2])[0]
|
||||
assert startIndex + self.numPaletteEntries <= numColorRecords
|
||||
pos += 2
|
||||
palette = []
|
||||
ppos = goffsetFirstColorRecord + startIndex * 4
|
||||
for j in range(self.numPaletteEntries):
|
||||
palette.append(Color(*struct.unpack(">BBBB", data[ppos : ppos + 4])))
|
||||
ppos += 4
|
||||
self.palettes.append(palette)
|
||||
if self.version == 0:
|
||||
offsetToPaletteTypeArray = 0
|
||||
offsetToPaletteLabelArray = 0
|
||||
offsetToPaletteEntryLabelArray = 0
|
||||
else:
|
||||
pos = 12 + numPalettes * 2
|
||||
(
|
||||
offsetToPaletteTypeArray,
|
||||
offsetToPaletteLabelArray,
|
||||
offsetToPaletteEntryLabelArray,
|
||||
) = struct.unpack(">LLL", data[pos : pos + 12])
|
||||
self.paletteTypes = self._decompileUInt32Array(
|
||||
data,
|
||||
offsetToPaletteTypeArray,
|
||||
numPalettes,
|
||||
default=self.DEFAULT_PALETTE_TYPE,
|
||||
)
|
||||
self.paletteLabels = self._decompileUInt16Array(
|
||||
data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID
|
||||
)
|
||||
self.paletteEntryLabels = self._decompileUInt16Array(
|
||||
data,
|
||||
offsetToPaletteEntryLabelArray,
|
||||
self.numPaletteEntries,
|
||||
default=self.NO_NAME_ID,
|
||||
)
|
||||
|
||||
def _decompileUInt16Array(self, data, offset, numElements, default=0):
|
||||
if offset == 0:
|
||||
return [default] * numElements
|
||||
result = array.array("H", data[offset : offset + 2 * numElements])
|
||||
if sys.byteorder != "big":
|
||||
result.byteswap()
|
||||
assert len(result) == numElements, result
|
||||
return result.tolist()
|
||||
|
||||
def _decompileUInt32Array(self, data, offset, numElements, default=0):
|
||||
if offset == 0:
|
||||
return [default] * numElements
|
||||
result = array.array("I", data[offset : offset + 4 * numElements])
|
||||
if sys.byteorder != "big":
|
||||
result.byteswap()
|
||||
assert len(result) == numElements, result
|
||||
return result.tolist()
|
||||
|
||||
def compile(self, ttFont):
|
||||
colorRecordIndices, colorRecords = self._compileColorRecords()
|
||||
paletteTypes = self._compilePaletteTypes()
|
||||
paletteLabels = self._compilePaletteLabels()
|
||||
paletteEntryLabels = self._compilePaletteEntryLabels()
|
||||
numColorRecords = len(colorRecords) // 4
|
||||
offsetToFirstColorRecord = 12 + len(colorRecordIndices)
|
||||
if self.version >= 1:
|
||||
offsetToFirstColorRecord += 12
|
||||
header = struct.pack(
|
||||
">HHHHL",
|
||||
self.version,
|
||||
self.numPaletteEntries,
|
||||
len(self.palettes),
|
||||
numColorRecords,
|
||||
offsetToFirstColorRecord,
|
||||
)
|
||||
if self.version == 0:
|
||||
dataList = [header, colorRecordIndices, colorRecords]
|
||||
else:
|
||||
pos = offsetToFirstColorRecord + len(colorRecords)
|
||||
if len(paletteTypes) == 0:
|
||||
offsetToPaletteTypeArray = 0
|
||||
else:
|
||||
offsetToPaletteTypeArray = pos
|
||||
pos += len(paletteTypes)
|
||||
if len(paletteLabels) == 0:
|
||||
offsetToPaletteLabelArray = 0
|
||||
else:
|
||||
offsetToPaletteLabelArray = pos
|
||||
pos += len(paletteLabels)
|
||||
if len(paletteEntryLabels) == 0:
|
||||
offsetToPaletteEntryLabelArray = 0
|
||||
else:
|
||||
offsetToPaletteEntryLabelArray = pos
|
||||
pos += len(paletteLabels)
|
||||
header1 = struct.pack(
|
||||
">LLL",
|
||||
offsetToPaletteTypeArray,
|
||||
offsetToPaletteLabelArray,
|
||||
offsetToPaletteEntryLabelArray,
|
||||
)
|
||||
dataList = [
|
||||
header,
|
||||
colorRecordIndices,
|
||||
header1,
|
||||
colorRecords,
|
||||
paletteTypes,
|
||||
paletteLabels,
|
||||
paletteEntryLabels,
|
||||
]
|
||||
return bytesjoin(dataList)
|
||||
|
||||
def _compilePalette(self, palette):
|
||||
assert len(palette) == self.numPaletteEntries
|
||||
pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
|
||||
return bytesjoin([pack(color) for color in palette])
|
||||
|
||||
def _compileColorRecords(self):
|
||||
colorRecords, colorRecordIndices, pool = [], [], {}
|
||||
for palette in self.palettes:
|
||||
packedPalette = self._compilePalette(palette)
|
||||
if packedPalette in pool:
|
||||
index = pool[packedPalette]
|
||||
else:
|
||||
index = len(colorRecords)
|
||||
colorRecords.append(packedPalette)
|
||||
pool[packedPalette] = index
|
||||
colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries))
|
||||
return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
|
||||
|
||||
def _compilePaletteTypes(self):
|
||||
if self.version == 0 or not any(self.paletteTypes):
|
||||
return b""
|
||||
assert len(self.paletteTypes) == len(self.palettes)
|
||||
result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes])
|
||||
assert len(result) == 4 * len(self.palettes)
|
||||
return result
|
||||
|
||||
def _compilePaletteLabels(self):
|
||||
if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
|
||||
return b""
|
||||
assert len(self.paletteLabels) == len(self.palettes)
|
||||
result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels])
|
||||
assert len(result) == 2 * len(self.palettes)
|
||||
return result
|
||||
|
||||
def _compilePaletteEntryLabels(self):
|
||||
if self.version == 0 or all(
|
||||
l == self.NO_NAME_ID for l in self.paletteEntryLabels
|
||||
):
|
||||
return b""
|
||||
assert len(self.paletteEntryLabels) == self.numPaletteEntries
|
||||
result = bytesjoin(
|
||||
[struct.pack(">H", label) for label in self.paletteEntryLabels]
|
||||
)
|
||||
assert len(result) == 2 * self.numPaletteEntries
|
||||
return result
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
numPalettes = len(self.palettes)
|
||||
paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)}
|
||||
paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
writer.simpletag("numPaletteEntries", value=self.numPaletteEntries)
|
||||
writer.newline()
|
||||
for index, palette in enumerate(self.palettes):
|
||||
attrs = {"index": index}
|
||||
paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE)
|
||||
paletteLabel = paletteLabels.get(index, self.NO_NAME_ID)
|
||||
if self.version > 0 and paletteLabel != self.NO_NAME_ID:
|
||||
attrs["label"] = paletteLabel
|
||||
if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE:
|
||||
attrs["type"] = paletteType
|
||||
writer.begintag("palette", **attrs)
|
||||
writer.newline()
|
||||
if (
|
||||
self.version > 0
|
||||
and paletteLabel != self.NO_NAME_ID
|
||||
and ttFont
|
||||
and "name" in ttFont
|
||||
):
|
||||
name = ttFont["name"].getDebugName(paletteLabel)
|
||||
if name is not None:
|
||||
writer.comment(name)
|
||||
writer.newline()
|
||||
assert len(palette) == self.numPaletteEntries
|
||||
for cindex, color in enumerate(palette):
|
||||
color.toXML(writer, ttFont, cindex)
|
||||
writer.endtag("palette")
|
||||
writer.newline()
|
||||
if self.version > 0 and not all(
|
||||
l == self.NO_NAME_ID for l in self.paletteEntryLabels
|
||||
):
|
||||
writer.begintag("paletteEntryLabels")
|
||||
writer.newline()
|
||||
for index, label in enumerate(self.paletteEntryLabels):
|
||||
if label != self.NO_NAME_ID:
|
||||
writer.simpletag("label", index=index, value=label)
|
||||
if self.version > 0 and label and ttFont and "name" in ttFont:
|
||||
name = ttFont["name"].getDebugName(label)
|
||||
if name is not None:
|
||||
writer.comment(name)
|
||||
writer.newline()
|
||||
writer.endtag("paletteEntryLabels")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "palette":
|
||||
self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID)))
|
||||
self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE)))
|
||||
palette = []
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
attrs = element[1]
|
||||
color = Color.fromHex(attrs["value"])
|
||||
palette.append(color)
|
||||
self.palettes.append(palette)
|
||||
elif name == "paletteEntryLabels":
|
||||
colorLabels = {}
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
elementName, elementAttr, _ = element
|
||||
if elementName == "label":
|
||||
labelIndex = safeEval(elementAttr["index"])
|
||||
nameID = safeEval(elementAttr["value"])
|
||||
colorLabels[labelIndex] = nameID
|
||||
self.paletteEntryLabels = [
|
||||
colorLabels.get(i, self.NO_NAME_ID)
|
||||
for i in range(self.numPaletteEntries)
|
||||
]
|
||||
elif "value" in attrs:
|
||||
value = safeEval(attrs["value"])
|
||||
setattr(self, name, value)
|
||||
if name == "numPaletteEntries":
|
||||
self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries
|
||||
|
||||
|
||||
class Color(namedtuple("Color", "blue green red alpha")):
|
||||
def hex(self):
|
||||
return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
|
||||
|
||||
def __repr__(self):
|
||||
return self.hex()
|
||||
|
||||
def toXML(self, writer, ttFont, index=None):
|
||||
writer.simpletag("color", value=self.hex(), index=index)
|
||||
writer.newline()
|
||||
|
||||
@classmethod
|
||||
def fromHex(cls, value):
|
||||
if value[0] == "#":
|
||||
value = value[1:]
|
||||
red = int(value[0:2], 16)
|
||||
green = int(value[2:4], 16)
|
||||
blue = int(value[4:6], 16)
|
||||
alpha = int(value[6:8], 16) if len(value) >= 8 else 0xFF
|
||||
return cls(red=red, green=green, blue=blue, alpha=alpha)
|
||||
|
||||
@classmethod
|
||||
def fromRGBA(cls, red, green, blue, alpha):
|
||||
return cls(red=red, green=green, blue=blue, alpha=alpha)
|
||||
@@ -0,0 +1,158 @@
|
||||
from fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr, safeEval
|
||||
from fontTools.misc import sstruct
|
||||
from . import DefaultTable
|
||||
import base64
|
||||
|
||||
DSIG_HeaderFormat = """
|
||||
> # big endian
|
||||
ulVersion: L
|
||||
usNumSigs: H
|
||||
usFlag: H
|
||||
"""
|
||||
# followed by an array of usNumSigs DSIG_Signature records
|
||||
DSIG_SignatureFormat = """
|
||||
> # big endian
|
||||
ulFormat: L
|
||||
ulLength: L # length includes DSIG_SignatureBlock header
|
||||
ulOffset: L
|
||||
"""
|
||||
# followed by an array of usNumSigs DSIG_SignatureBlock records,
|
||||
# each followed immediately by the pkcs7 bytes
|
||||
DSIG_SignatureBlockFormat = """
|
||||
> # big endian
|
||||
usReserved1: H
|
||||
usReserved2: H
|
||||
cbSignature: l # length of following raw pkcs7 data
|
||||
"""
|
||||
|
||||
#
|
||||
# NOTE
|
||||
# the DSIG table format allows for SignatureBlocks residing
|
||||
# anywhere in the table and possibly in a different order as
|
||||
# listed in the array after the first table header
|
||||
#
|
||||
# this implementation does not keep track of any gaps and/or data
|
||||
# before or after the actual signature blocks while decompiling,
|
||||
# and puts them in the same physical order as listed in the header
|
||||
# on compilation with no padding whatsoever.
|
||||
#
|
||||
|
||||
|
||||
class table_D_S_I_G_(DefaultTable.DefaultTable):
|
||||
"""Digital Signature table
|
||||
|
||||
The ``DSIG`` table contains cryptographic signatures for the font.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/dsig
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
|
||||
assert self.ulVersion == 1, "DSIG ulVersion must be 1"
|
||||
assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
|
||||
self.signatureRecords = sigrecs = []
|
||||
for n in range(self.usNumSigs):
|
||||
sigrec, newData = sstruct.unpack2(
|
||||
DSIG_SignatureFormat, newData, SignatureRecord()
|
||||
)
|
||||
assert sigrec.ulFormat == 1, (
|
||||
"DSIG signature record #%d ulFormat must be 1" % n
|
||||
)
|
||||
sigrecs.append(sigrec)
|
||||
for sigrec in sigrecs:
|
||||
dummy, newData = sstruct.unpack2(
|
||||
DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec
|
||||
)
|
||||
assert sigrec.usReserved1 == 0, (
|
||||
"DSIG signature record #%d usReserverd1 must be 0" % n
|
||||
)
|
||||
assert sigrec.usReserved2 == 0, (
|
||||
"DSIG signature record #%d usReserverd2 must be 0" % n
|
||||
)
|
||||
sigrec.pkcs7 = newData[: sigrec.cbSignature]
|
||||
|
||||
def compile(self, ttFont):
|
||||
packed = sstruct.pack(DSIG_HeaderFormat, self)
|
||||
headers = [packed]
|
||||
offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
|
||||
data = []
|
||||
for sigrec in self.signatureRecords:
|
||||
# first pack signature block
|
||||
sigrec.cbSignature = len(sigrec.pkcs7)
|
||||
packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
|
||||
data.append(packed)
|
||||
# update redundant length field
|
||||
sigrec.ulLength = len(packed)
|
||||
# update running table offset
|
||||
sigrec.ulOffset = offset
|
||||
headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
|
||||
offset += sigrec.ulLength
|
||||
if offset % 2:
|
||||
# Pad to even bytes
|
||||
data.append(b"\0")
|
||||
return bytesjoin(headers + data)
|
||||
|
||||
def toXML(self, xmlWriter, ttFont):
|
||||
xmlWriter.comment(
|
||||
"note that the Digital Signature will be invalid after recompilation!"
|
||||
)
|
||||
xmlWriter.newline()
|
||||
xmlWriter.simpletag(
|
||||
"tableHeader",
|
||||
version=self.ulVersion,
|
||||
numSigs=self.usNumSigs,
|
||||
flag="0x%X" % self.usFlag,
|
||||
)
|
||||
for sigrec in self.signatureRecords:
|
||||
xmlWriter.newline()
|
||||
sigrec.toXML(xmlWriter, ttFont)
|
||||
xmlWriter.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "tableHeader":
|
||||
self.signatureRecords = []
|
||||
self.ulVersion = safeEval(attrs["version"])
|
||||
self.usNumSigs = safeEval(attrs["numSigs"])
|
||||
self.usFlag = safeEval(attrs["flag"])
|
||||
return
|
||||
if name == "SignatureRecord":
|
||||
sigrec = SignatureRecord()
|
||||
sigrec.fromXML(name, attrs, content, ttFont)
|
||||
self.signatureRecords.append(sigrec)
|
||||
|
||||
|
||||
pem_spam = lambda l, spam={
|
||||
"-----BEGIN PKCS7-----": True,
|
||||
"-----END PKCS7-----": True,
|
||||
"": True,
|
||||
}: not spam.get(l.strip())
|
||||
|
||||
|
||||
def b64encode(b):
|
||||
s = base64.b64encode(b)
|
||||
# Line-break at 76 chars.
|
||||
items = []
|
||||
while s:
|
||||
items.append(tostr(s[:76]))
|
||||
items.append("\n")
|
||||
s = s[76:]
|
||||
return strjoin(items)
|
||||
|
||||
|
||||
class SignatureRecord(object):
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag(self.__class__.__name__, format=self.ulFormat)
|
||||
writer.newline()
|
||||
writer.write_noindent("-----BEGIN PKCS7-----\n")
|
||||
writer.write_noindent(b64encode(self.pkcs7))
|
||||
writer.write_noindent("-----END PKCS7-----\n")
|
||||
writer.endtag(self.__class__.__name__)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.ulFormat = safeEval(attrs["format"])
|
||||
self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
|
||||
self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
|
||||
self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
|
||||
@@ -0,0 +1,35 @@
|
||||
import json
|
||||
from textwrap import indent
|
||||
|
||||
from . import DefaultTable
|
||||
from fontTools.misc.textTools import tostr
|
||||
|
||||
|
||||
class table_D__e_b_g(DefaultTable.DefaultTable):
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.data = {}
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.data = json.loads(data)
|
||||
|
||||
def compile(self, ttFont):
|
||||
return json.dumps(self.data).encode("utf-8")
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
# make sure json indentation inside CDATA block matches XMLWriter's
|
||||
data = json.dumps(self.data, indent=len(writer.indentwhite))
|
||||
prefix = tostr(writer.indentwhite) * (writer.indentlevel + 1)
|
||||
# but don't indent the first json line so it's adjacent to `<![CDATA[{`
|
||||
cdata = indent(data, prefix, lambda ln: ln != "{\n")
|
||||
|
||||
writer.begintag("json")
|
||||
writer.newline()
|
||||
writer.writecdata(cdata)
|
||||
writer.newline()
|
||||
writer.endtag("json")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "json":
|
||||
self.data = json.loads("".join(content))
|
||||
@@ -0,0 +1,49 @@
|
||||
from fontTools.misc.textTools import Tag
|
||||
from fontTools.ttLib import getClassTag
|
||||
|
||||
|
||||
class DefaultTable(object):
|
||||
dependencies = []
|
||||
|
||||
def __init__(self, tag=None):
|
||||
if tag is None:
|
||||
tag = getClassTag(self.__class__)
|
||||
self.tableTag = Tag(tag)
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.data = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
return self.data
|
||||
|
||||
def toXML(self, writer, ttFont, **kwargs):
|
||||
if hasattr(self, "ERROR"):
|
||||
writer.comment("An error occurred during the decompilation of this table")
|
||||
writer.newline()
|
||||
writer.comment(self.ERROR)
|
||||
writer.newline()
|
||||
writer.begintag("hexdata")
|
||||
writer.newline()
|
||||
writer.dumphex(self.compile(ttFont))
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
from fontTools.misc.textTools import readHex
|
||||
from fontTools import ttLib
|
||||
|
||||
if name != "hexdata":
|
||||
raise ttLib.TTLibError("can't handle '%s' element" % name)
|
||||
self.decompile(readHex(content), ttFont)
|
||||
|
||||
def __repr__(self):
|
||||
return "<'%s' table at %x>" % (self.tableTag, id(self))
|
||||
|
||||
def __eq__(self, other):
|
||||
if type(self) != type(other):
|
||||
return NotImplemented
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def __ne__(self, other):
|
||||
result = self.__eq__(other)
|
||||
return result if result is NotImplemented else not result
|
||||
@@ -0,0 +1,835 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import (
|
||||
bytechr,
|
||||
byteord,
|
||||
bytesjoin,
|
||||
strjoin,
|
||||
safeEval,
|
||||
readHex,
|
||||
hexStr,
|
||||
deHexStr,
|
||||
)
|
||||
from .BitmapGlyphMetrics import (
|
||||
BigGlyphMetrics,
|
||||
bigGlyphMetricsFormat,
|
||||
SmallGlyphMetrics,
|
||||
smallGlyphMetricsFormat,
|
||||
)
|
||||
from . import DefaultTable
|
||||
import itertools
|
||||
import os
|
||||
import struct
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
ebdtTableVersionFormat = """
|
||||
> # big endian
|
||||
version: 16.16F
|
||||
"""
|
||||
|
||||
ebdtComponentFormat = """
|
||||
> # big endian
|
||||
glyphCode: H
|
||||
xOffset: b
|
||||
yOffset: b
|
||||
"""
|
||||
|
||||
|
||||
class table_E_B_D_T_(DefaultTable.DefaultTable):
|
||||
"""Embedded Bitmap Data table
|
||||
|
||||
The ``EBDT`` table contains monochrome or grayscale bitmap data for
|
||||
glyphs. It must be used in concert with the ``EBLC`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/ebdt
|
||||
"""
|
||||
|
||||
# Keep a reference to the name of the data locator table.
|
||||
locatorName = "EBLC"
|
||||
|
||||
# This method can be overridden in subclasses to support new formats
|
||||
# without changing the other implementation. Also can be used as a
|
||||
# convenience method for coverting a font file to an alternative format.
|
||||
def getImageFormatClass(self, imageFormat):
|
||||
return ebdt_bitmap_classes[imageFormat]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
# Get the version but don't advance the slice.
|
||||
# Most of the lookup for this table is done relative
|
||||
# to the begining so slice by the offsets provided
|
||||
# in the EBLC table.
|
||||
sstruct.unpack2(ebdtTableVersionFormat, data, self)
|
||||
|
||||
# Keep a dict of glyphs that have been seen so they aren't remade.
|
||||
# This dict maps intervals of data to the BitmapGlyph.
|
||||
glyphDict = {}
|
||||
|
||||
# Pull out the EBLC table and loop through glyphs.
|
||||
# A strike is a concept that spans both tables.
|
||||
# The actual bitmap data is stored in the EBDT.
|
||||
locator = ttFont[self.__class__.locatorName]
|
||||
self.strikeData = []
|
||||
for curStrike in locator.strikes:
|
||||
bitmapGlyphDict = {}
|
||||
self.strikeData.append(bitmapGlyphDict)
|
||||
for indexSubTable in curStrike.indexSubTables:
|
||||
dataIter = zip(indexSubTable.names, indexSubTable.locations)
|
||||
for curName, curLoc in dataIter:
|
||||
# Don't create duplicate data entries for the same glyphs.
|
||||
# Instead just use the structures that already exist if they exist.
|
||||
if curLoc in glyphDict:
|
||||
curGlyph = glyphDict[curLoc]
|
||||
else:
|
||||
curGlyphData = data[slice(*curLoc)]
|
||||
imageFormatClass = self.getImageFormatClass(
|
||||
indexSubTable.imageFormat
|
||||
)
|
||||
curGlyph = imageFormatClass(curGlyphData, ttFont)
|
||||
glyphDict[curLoc] = curGlyph
|
||||
bitmapGlyphDict[curName] = curGlyph
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
|
||||
dataSize = len(dataList[0])
|
||||
|
||||
# Keep a dict of glyphs that have been seen so they aren't remade.
|
||||
# This dict maps the id of the BitmapGlyph to the interval
|
||||
# in the data.
|
||||
glyphDict = {}
|
||||
|
||||
# Go through the bitmap glyph data. Just in case the data for a glyph
|
||||
# changed the size metrics should be recalculated. There are a variety
|
||||
# of formats and they get stored in the EBLC table. That is why
|
||||
# recalculation is defered to the EblcIndexSubTable class and just
|
||||
# pass what is known about bitmap glyphs from this particular table.
|
||||
locator = ttFont[self.__class__.locatorName]
|
||||
for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
|
||||
for curIndexSubTable in curStrike.indexSubTables:
|
||||
dataLocations = []
|
||||
for curName in curIndexSubTable.names:
|
||||
# Handle the data placement based on seeing the glyph or not.
|
||||
# Just save a reference to the location if the glyph has already
|
||||
# been saved in compile. This code assumes that glyphs will only
|
||||
# be referenced multiple times from indexFormat5. By luck the
|
||||
# code may still work when referencing poorly ordered fonts with
|
||||
# duplicate references. If there is a font that is unlucky the
|
||||
# respective compile methods for the indexSubTables will fail
|
||||
# their assertions. All fonts seem to follow this assumption.
|
||||
# More complicated packing may be needed if a counter-font exists.
|
||||
glyph = curGlyphDict[curName]
|
||||
objectId = id(glyph)
|
||||
if objectId not in glyphDict:
|
||||
data = glyph.compile(ttFont)
|
||||
data = curIndexSubTable.padBitmapData(data)
|
||||
startByte = dataSize
|
||||
dataSize += len(data)
|
||||
endByte = dataSize
|
||||
dataList.append(data)
|
||||
dataLoc = (startByte, endByte)
|
||||
glyphDict[objectId] = dataLoc
|
||||
else:
|
||||
dataLoc = glyphDict[objectId]
|
||||
dataLocations.append(dataLoc)
|
||||
# Just use the new data locations in the indexSubTable.
|
||||
# The respective compile implementations will take care
|
||||
# of any of the problems in the convertion that may arise.
|
||||
curIndexSubTable.locations = dataLocations
|
||||
|
||||
return bytesjoin(dataList)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
# When exporting to XML if one of the data export formats
|
||||
# requires metrics then those metrics may be in the locator.
|
||||
# In this case populate the bitmaps with "export metrics".
|
||||
if ttFont.bitmapGlyphDataFormat in ("row", "bitwise"):
|
||||
locator = ttFont[self.__class__.locatorName]
|
||||
for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
|
||||
for curIndexSubTable in curStrike.indexSubTables:
|
||||
for curName in curIndexSubTable.names:
|
||||
glyph = curGlyphDict[curName]
|
||||
# I'm not sure which metrics have priority here.
|
||||
# For now if both metrics exist go with glyph metrics.
|
||||
if hasattr(glyph, "metrics"):
|
||||
glyph.exportMetrics = glyph.metrics
|
||||
else:
|
||||
glyph.exportMetrics = curIndexSubTable.metrics
|
||||
glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth
|
||||
|
||||
writer.simpletag("header", [("version", self.version)])
|
||||
writer.newline()
|
||||
locator = ttFont[self.__class__.locatorName]
|
||||
for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData):
|
||||
writer.begintag("strikedata", [("index", strikeIndex)])
|
||||
writer.newline()
|
||||
for curName, curBitmap in bitmapGlyphDict.items():
|
||||
curBitmap.toXML(strikeIndex, curName, writer, ttFont)
|
||||
writer.endtag("strikedata")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "header":
|
||||
self.version = safeEval(attrs["version"])
|
||||
elif name == "strikedata":
|
||||
if not hasattr(self, "strikeData"):
|
||||
self.strikeData = []
|
||||
strikeIndex = safeEval(attrs["index"])
|
||||
|
||||
bitmapGlyphDict = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]):
|
||||
imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix) :])
|
||||
glyphName = attrs["name"]
|
||||
imageFormatClass = self.getImageFormatClass(imageFormat)
|
||||
curGlyph = imageFormatClass(None, None)
|
||||
curGlyph.fromXML(name, attrs, content, ttFont)
|
||||
assert glyphName not in bitmapGlyphDict, (
|
||||
"Duplicate glyphs with the same name '%s' in the same strike."
|
||||
% glyphName
|
||||
)
|
||||
bitmapGlyphDict[glyphName] = curGlyph
|
||||
else:
|
||||
log.warning("%s being ignored by %s", name, self.__class__.__name__)
|
||||
|
||||
# Grow the strike data array to the appropriate size. The XML
|
||||
# format allows the strike index value to be out of order.
|
||||
if strikeIndex >= len(self.strikeData):
|
||||
self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData))
|
||||
assert (
|
||||
self.strikeData[strikeIndex] is None
|
||||
), "Duplicate strike EBDT indices."
|
||||
self.strikeData[strikeIndex] = bitmapGlyphDict
|
||||
|
||||
|
||||
class EbdtComponent(object):
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("ebdtComponent", [("name", self.name)])
|
||||
writer.newline()
|
||||
for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]:
|
||||
writer.simpletag(componentName, value=getattr(self, componentName))
|
||||
writer.newline()
|
||||
writer.endtag("ebdtComponent")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.name = attrs["name"]
|
||||
componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:])
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name in componentNames:
|
||||
vars(self)[name] = safeEval(attrs["value"])
|
||||
else:
|
||||
log.warning("unknown name '%s' being ignored by EbdtComponent.", name)
|
||||
|
||||
|
||||
# Helper functions for dealing with binary.
|
||||
|
||||
|
||||
def _data2binary(data, numBits):
|
||||
binaryList = []
|
||||
for curByte in data:
|
||||
value = byteord(curByte)
|
||||
numBitsCut = min(8, numBits)
|
||||
for i in range(numBitsCut):
|
||||
if value & 0x1:
|
||||
binaryList.append("1")
|
||||
else:
|
||||
binaryList.append("0")
|
||||
value = value >> 1
|
||||
numBits -= numBitsCut
|
||||
return strjoin(binaryList)
|
||||
|
||||
|
||||
def _binary2data(binary):
|
||||
byteList = []
|
||||
for bitLoc in range(0, len(binary), 8):
|
||||
byteString = binary[bitLoc : bitLoc + 8]
|
||||
curByte = 0
|
||||
for curBit in reversed(byteString):
|
||||
curByte = curByte << 1
|
||||
if curBit == "1":
|
||||
curByte |= 1
|
||||
byteList.append(bytechr(curByte))
|
||||
return bytesjoin(byteList)
|
||||
|
||||
|
||||
def _memoize(f):
|
||||
class memodict(dict):
|
||||
def __missing__(self, key):
|
||||
ret = f(key)
|
||||
if isinstance(key, int) or len(key) == 1:
|
||||
self[key] = ret
|
||||
return ret
|
||||
|
||||
return memodict().__getitem__
|
||||
|
||||
|
||||
# 00100111 -> 11100100 per byte, not to be confused with little/big endian.
|
||||
# Bitmap data per byte is in the order that binary is written on the page
|
||||
# with the least significant bit as far right as possible. This is the
|
||||
# opposite of what makes sense algorithmically and hence this function.
|
||||
@_memoize
|
||||
def _reverseBytes(data):
|
||||
r"""
|
||||
>>> bin(ord(_reverseBytes(0b00100111)))
|
||||
'0b11100100'
|
||||
>>> _reverseBytes(b'\x00\xf0')
|
||||
b'\x00\x0f'
|
||||
"""
|
||||
if isinstance(data, bytes) and len(data) != 1:
|
||||
return bytesjoin(map(_reverseBytes, data))
|
||||
byte = byteord(data)
|
||||
result = 0
|
||||
for i in range(8):
|
||||
result = result << 1
|
||||
result |= byte & 1
|
||||
byte = byte >> 1
|
||||
return bytechr(result)
|
||||
|
||||
|
||||
# This section of code is for reading and writing image data to/from XML.
|
||||
|
||||
|
||||
def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
|
||||
writer.begintag("rawimagedata")
|
||||
writer.newline()
|
||||
writer.dumphex(bitmapObject.imageData)
|
||||
writer.endtag("rawimagedata")
|
||||
writer.newline()
|
||||
|
||||
|
||||
def _readRawImageData(bitmapObject, name, attrs, content, ttFont):
|
||||
bitmapObject.imageData = readHex(content)
|
||||
|
||||
|
||||
def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
|
||||
metrics = bitmapObject.exportMetrics
|
||||
del bitmapObject.exportMetrics
|
||||
bitDepth = bitmapObject.exportBitDepth
|
||||
del bitmapObject.exportBitDepth
|
||||
|
||||
writer.begintag(
|
||||
"rowimagedata", bitDepth=bitDepth, width=metrics.width, height=metrics.height
|
||||
)
|
||||
writer.newline()
|
||||
for curRow in range(metrics.height):
|
||||
rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics)
|
||||
writer.simpletag("row", value=hexStr(rowData))
|
||||
writer.newline()
|
||||
writer.endtag("rowimagedata")
|
||||
writer.newline()
|
||||
|
||||
|
||||
def _readRowImageData(bitmapObject, name, attrs, content, ttFont):
|
||||
bitDepth = safeEval(attrs["bitDepth"])
|
||||
metrics = SmallGlyphMetrics()
|
||||
metrics.width = safeEval(attrs["width"])
|
||||
metrics.height = safeEval(attrs["height"])
|
||||
|
||||
dataRows = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attr, content = element
|
||||
# Chop off 'imagedata' from the tag to get just the option.
|
||||
if name == "row":
|
||||
dataRows.append(deHexStr(attr["value"]))
|
||||
bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics)
|
||||
|
||||
|
||||
def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
|
||||
metrics = bitmapObject.exportMetrics
|
||||
del bitmapObject.exportMetrics
|
||||
bitDepth = bitmapObject.exportBitDepth
|
||||
del bitmapObject.exportBitDepth
|
||||
|
||||
# A dict for mapping binary to more readable/artistic ASCII characters.
|
||||
binaryConv = {"0": ".", "1": "@"}
|
||||
|
||||
writer.begintag(
|
||||
"bitwiseimagedata",
|
||||
bitDepth=bitDepth,
|
||||
width=metrics.width,
|
||||
height=metrics.height,
|
||||
)
|
||||
writer.newline()
|
||||
for curRow in range(metrics.height):
|
||||
rowData = bitmapObject.getRow(
|
||||
curRow, bitDepth=1, metrics=metrics, reverseBytes=True
|
||||
)
|
||||
rowData = _data2binary(rowData, metrics.width)
|
||||
# Make the output a readable ASCII art form.
|
||||
rowData = strjoin(map(binaryConv.get, rowData))
|
||||
writer.simpletag("row", value=rowData)
|
||||
writer.newline()
|
||||
writer.endtag("bitwiseimagedata")
|
||||
writer.newline()
|
||||
|
||||
|
||||
def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont):
|
||||
bitDepth = safeEval(attrs["bitDepth"])
|
||||
metrics = SmallGlyphMetrics()
|
||||
metrics.width = safeEval(attrs["width"])
|
||||
metrics.height = safeEval(attrs["height"])
|
||||
|
||||
# A dict for mapping from ASCII to binary. All characters are considered
|
||||
# a '1' except space, period and '0' which maps to '0'.
|
||||
binaryConv = {" ": "0", ".": "0", "0": "0"}
|
||||
|
||||
dataRows = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attr, content = element
|
||||
if name == "row":
|
||||
mapParams = zip(attr["value"], itertools.repeat("1"))
|
||||
rowData = strjoin(itertools.starmap(binaryConv.get, mapParams))
|
||||
dataRows.append(_binary2data(rowData))
|
||||
|
||||
bitmapObject.setRows(
|
||||
dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True
|
||||
)
|
||||
|
||||
|
||||
def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
|
||||
try:
|
||||
folder = os.path.dirname(writer.file.name)
|
||||
except AttributeError:
|
||||
# fall back to current directory if output file's directory isn't found
|
||||
folder = "."
|
||||
folder = os.path.join(folder, "bitmaps")
|
||||
filename = glyphName + bitmapObject.fileExtension
|
||||
if not os.path.isdir(folder):
|
||||
os.makedirs(folder)
|
||||
folder = os.path.join(folder, "strike%d" % strikeIndex)
|
||||
if not os.path.isdir(folder):
|
||||
os.makedirs(folder)
|
||||
|
||||
fullPath = os.path.join(folder, filename)
|
||||
writer.simpletag("extfileimagedata", value=fullPath)
|
||||
writer.newline()
|
||||
|
||||
with open(fullPath, "wb") as file:
|
||||
file.write(bitmapObject.imageData)
|
||||
|
||||
|
||||
def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont):
|
||||
fullPath = attrs["value"]
|
||||
with open(fullPath, "rb") as file:
|
||||
bitmapObject.imageData = file.read()
|
||||
|
||||
|
||||
# End of XML writing code.
|
||||
|
||||
# Important information about the naming scheme. Used for identifying formats
|
||||
# in XML.
|
||||
_bitmapGlyphSubclassPrefix = "ebdt_bitmap_format_"
|
||||
|
||||
|
||||
class BitmapGlyph(object):
|
||||
# For the external file format. This can be changed in subclasses. This way
|
||||
# when the extfile option is turned on files have the form: glyphName.ext
|
||||
# The default is just a flat binary file with no meaning.
|
||||
fileExtension = ".bin"
|
||||
|
||||
# Keep track of reading and writing of various forms.
|
||||
xmlDataFunctions = {
|
||||
"raw": (_writeRawImageData, _readRawImageData),
|
||||
"row": (_writeRowImageData, _readRowImageData),
|
||||
"bitwise": (_writeBitwiseImageData, _readBitwiseImageData),
|
||||
"extfile": (_writeExtFileImageData, _readExtFileImageData),
|
||||
}
|
||||
|
||||
def __init__(self, data, ttFont):
|
||||
self.data = data
|
||||
self.ttFont = ttFont
|
||||
# TODO Currently non-lazy decompilation is untested here...
|
||||
# if not ttFont.lazy:
|
||||
# self.decompile()
|
||||
# del self.data
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# Allow lazy decompile.
|
||||
if attr[:2] == "__":
|
||||
raise AttributeError(attr)
|
||||
if attr == "data":
|
||||
raise AttributeError(attr)
|
||||
self.decompile()
|
||||
del self.data
|
||||
return getattr(self, attr)
|
||||
|
||||
def ensureDecompiled(self, recurse=False):
|
||||
if hasattr(self, "data"):
|
||||
self.decompile()
|
||||
del self.data
|
||||
|
||||
# Not a fan of this but it is needed for safer safety checking.
|
||||
def getFormat(self):
|
||||
return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix) :])
|
||||
|
||||
def toXML(self, strikeIndex, glyphName, writer, ttFont):
|
||||
writer.begintag(self.__class__.__name__, [("name", glyphName)])
|
||||
writer.newline()
|
||||
|
||||
self.writeMetrics(writer, ttFont)
|
||||
# Use the internal write method to write using the correct output format.
|
||||
self.writeData(strikeIndex, glyphName, writer, ttFont)
|
||||
|
||||
writer.endtag(self.__class__.__name__)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.readMetrics(name, attrs, content, ttFont)
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attr, content = element
|
||||
if not name.endswith("imagedata"):
|
||||
continue
|
||||
# Chop off 'imagedata' from the tag to get just the option.
|
||||
option = name[: -len("imagedata")]
|
||||
assert option in self.__class__.xmlDataFunctions
|
||||
self.readData(name, attr, content, ttFont)
|
||||
|
||||
# Some of the glyphs have the metrics. This allows for metrics to be
|
||||
# added if the glyph format has them. Default behavior is to do nothing.
|
||||
def writeMetrics(self, writer, ttFont):
|
||||
pass
|
||||
|
||||
# The opposite of write metrics.
|
||||
def readMetrics(self, name, attrs, content, ttFont):
|
||||
pass
|
||||
|
||||
def writeData(self, strikeIndex, glyphName, writer, ttFont):
|
||||
try:
|
||||
writeFunc, readFunc = self.__class__.xmlDataFunctions[
|
||||
ttFont.bitmapGlyphDataFormat
|
||||
]
|
||||
except KeyError:
|
||||
writeFunc = _writeRawImageData
|
||||
writeFunc(strikeIndex, glyphName, self, writer, ttFont)
|
||||
|
||||
def readData(self, name, attrs, content, ttFont):
|
||||
# Chop off 'imagedata' from the tag to get just the option.
|
||||
option = name[: -len("imagedata")]
|
||||
writeFunc, readFunc = self.__class__.xmlDataFunctions[option]
|
||||
readFunc(self, name, attrs, content, ttFont)
|
||||
|
||||
|
||||
# A closure for creating a mixin for the two types of metrics handling.
|
||||
# Most of the code is very similar so its easier to deal with here.
|
||||
# Everything works just by passing the class that the mixin is for.
|
||||
def _createBitmapPlusMetricsMixin(metricsClass):
|
||||
# Both metrics names are listed here to make meaningful error messages.
|
||||
metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__]
|
||||
curMetricsName = metricsClass.__name__
|
||||
# Find which metrics this is for and determine the opposite name.
|
||||
metricsId = metricStrings.index(curMetricsName)
|
||||
oppositeMetricsName = metricStrings[1 - metricsId]
|
||||
|
||||
class BitmapPlusMetricsMixin(object):
|
||||
def writeMetrics(self, writer, ttFont):
|
||||
self.metrics.toXML(writer, ttFont)
|
||||
|
||||
def readMetrics(self, name, attrs, content, ttFont):
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == curMetricsName:
|
||||
self.metrics = metricsClass()
|
||||
self.metrics.fromXML(name, attrs, content, ttFont)
|
||||
elif name == oppositeMetricsName:
|
||||
log.warning(
|
||||
"Warning: %s being ignored in format %d.",
|
||||
oppositeMetricsName,
|
||||
self.getFormat(),
|
||||
)
|
||||
|
||||
return BitmapPlusMetricsMixin
|
||||
|
||||
|
||||
# Since there are only two types of mixin's just create them here.
|
||||
BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics)
|
||||
BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics)
|
||||
|
||||
|
||||
# Data that is bit aligned can be tricky to deal with. These classes implement
|
||||
# helper functionality for dealing with the data and getting a particular row
|
||||
# of bitwise data. Also helps implement fancy data export/import in XML.
|
||||
class BitAlignedBitmapMixin(object):
|
||||
def _getBitRange(self, row, bitDepth, metrics):
|
||||
rowBits = bitDepth * metrics.width
|
||||
bitOffset = row * rowBits
|
||||
return (bitOffset, bitOffset + rowBits)
|
||||
|
||||
def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
|
||||
if metrics is None:
|
||||
metrics = self.metrics
|
||||
assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
|
||||
|
||||
# Loop through each byte. This can cover two bytes in the original data or
|
||||
# a single byte if things happen to be aligned. The very last entry might
|
||||
# not be aligned so take care to trim the binary data to size and pad with
|
||||
# zeros in the row data. Bit aligned data is somewhat tricky.
|
||||
#
|
||||
# Example of data cut. Data cut represented in x's.
|
||||
# '|' represents byte boundary.
|
||||
# data = ...0XX|XXXXXX00|000... => XXXXXXXX
|
||||
# or
|
||||
# data = ...0XX|XXXX0000|000... => XXXXXX00
|
||||
# or
|
||||
# data = ...000|XXXXXXXX|000... => XXXXXXXX
|
||||
# or
|
||||
# data = ...000|00XXXX00|000... => XXXX0000
|
||||
#
|
||||
dataList = []
|
||||
bitRange = self._getBitRange(row, bitDepth, metrics)
|
||||
stepRange = bitRange + (8,)
|
||||
for curBit in range(*stepRange):
|
||||
endBit = min(curBit + 8, bitRange[1])
|
||||
numBits = endBit - curBit
|
||||
cutPoint = curBit % 8
|
||||
firstByteLoc = curBit // 8
|
||||
secondByteLoc = endBit // 8
|
||||
if firstByteLoc < secondByteLoc:
|
||||
numBitsCut = 8 - cutPoint
|
||||
else:
|
||||
numBitsCut = endBit - curBit
|
||||
curByte = _reverseBytes(self.imageData[firstByteLoc])
|
||||
firstHalf = byteord(curByte) >> cutPoint
|
||||
firstHalf = ((1 << numBitsCut) - 1) & firstHalf
|
||||
newByte = firstHalf
|
||||
if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData):
|
||||
curByte = _reverseBytes(self.imageData[secondByteLoc])
|
||||
secondHalf = byteord(curByte) << numBitsCut
|
||||
newByte = (firstHalf | secondHalf) & ((1 << numBits) - 1)
|
||||
dataList.append(bytechr(newByte))
|
||||
|
||||
# The way the data is kept is opposite the algorithm used.
|
||||
data = bytesjoin(dataList)
|
||||
if not reverseBytes:
|
||||
data = _reverseBytes(data)
|
||||
return data
|
||||
|
||||
def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
|
||||
if metrics is None:
|
||||
metrics = self.metrics
|
||||
if not reverseBytes:
|
||||
dataRows = list(map(_reverseBytes, dataRows))
|
||||
|
||||
# Keep track of a list of ordinal values as they are easier to modify
|
||||
# than a list of strings. Map to actual strings later.
|
||||
numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8
|
||||
ordDataList = [0] * numBytes
|
||||
for row, data in enumerate(dataRows):
|
||||
bitRange = self._getBitRange(row, bitDepth, metrics)
|
||||
stepRange = bitRange + (8,)
|
||||
for curBit, curByte in zip(range(*stepRange), data):
|
||||
endBit = min(curBit + 8, bitRange[1])
|
||||
cutPoint = curBit % 8
|
||||
firstByteLoc = curBit // 8
|
||||
secondByteLoc = endBit // 8
|
||||
if firstByteLoc < secondByteLoc:
|
||||
numBitsCut = 8 - cutPoint
|
||||
else:
|
||||
numBitsCut = endBit - curBit
|
||||
curByte = byteord(curByte)
|
||||
firstByte = curByte & ((1 << numBitsCut) - 1)
|
||||
ordDataList[firstByteLoc] |= firstByte << cutPoint
|
||||
if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
|
||||
secondByte = (curByte >> numBitsCut) & ((1 << 8 - numBitsCut) - 1)
|
||||
ordDataList[secondByteLoc] |= secondByte
|
||||
|
||||
# Save the image data with the bits going the correct way.
|
||||
self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
|
||||
|
||||
|
||||
class ByteAlignedBitmapMixin(object):
|
||||
def _getByteRange(self, row, bitDepth, metrics):
|
||||
rowBytes = (bitDepth * metrics.width + 7) // 8
|
||||
byteOffset = row * rowBytes
|
||||
return (byteOffset, byteOffset + rowBytes)
|
||||
|
||||
def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
|
||||
if metrics is None:
|
||||
metrics = self.metrics
|
||||
assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
|
||||
byteRange = self._getByteRange(row, bitDepth, metrics)
|
||||
data = self.imageData[slice(*byteRange)]
|
||||
if reverseBytes:
|
||||
data = _reverseBytes(data)
|
||||
return data
|
||||
|
||||
def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
|
||||
if metrics is None:
|
||||
metrics = self.metrics
|
||||
if reverseBytes:
|
||||
dataRows = map(_reverseBytes, dataRows)
|
||||
self.imageData = bytesjoin(dataRows)
|
||||
|
||||
|
||||
class ebdt_bitmap_format_1(
|
||||
ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph
|
||||
):
|
||||
def decompile(self):
|
||||
self.metrics = SmallGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
|
||||
self.imageData = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
|
||||
return data + self.imageData
|
||||
|
||||
|
||||
class ebdt_bitmap_format_2(
|
||||
BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph
|
||||
):
|
||||
def decompile(self):
|
||||
self.metrics = SmallGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
|
||||
self.imageData = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
|
||||
return data + self.imageData
|
||||
|
||||
|
||||
class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph):
|
||||
def decompile(self):
|
||||
self.imageData = self.data
|
||||
|
||||
def compile(self, ttFont):
|
||||
return self.imageData
|
||||
|
||||
|
||||
class ebdt_bitmap_format_6(
|
||||
ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph
|
||||
):
|
||||
def decompile(self):
|
||||
self.metrics = BigGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
|
||||
self.imageData = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
|
||||
return data + self.imageData
|
||||
|
||||
|
||||
class ebdt_bitmap_format_7(
|
||||
BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph
|
||||
):
|
||||
def decompile(self):
|
||||
self.metrics = BigGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
|
||||
self.imageData = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
|
||||
return data + self.imageData
|
||||
|
||||
|
||||
class ComponentBitmapGlyph(BitmapGlyph):
|
||||
def toXML(self, strikeIndex, glyphName, writer, ttFont):
|
||||
writer.begintag(self.__class__.__name__, [("name", glyphName)])
|
||||
writer.newline()
|
||||
|
||||
self.writeMetrics(writer, ttFont)
|
||||
|
||||
writer.begintag("components")
|
||||
writer.newline()
|
||||
for curComponent in self.componentArray:
|
||||
curComponent.toXML(writer, ttFont)
|
||||
writer.endtag("components")
|
||||
writer.newline()
|
||||
|
||||
writer.endtag(self.__class__.__name__)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.readMetrics(name, attrs, content, ttFont)
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attr, content = element
|
||||
if name == "components":
|
||||
self.componentArray = []
|
||||
for compElement in content:
|
||||
if not isinstance(compElement, tuple):
|
||||
continue
|
||||
name, attrs, content = compElement
|
||||
if name == "ebdtComponent":
|
||||
curComponent = EbdtComponent()
|
||||
curComponent.fromXML(name, attrs, content, ttFont)
|
||||
self.componentArray.append(curComponent)
|
||||
else:
|
||||
log.warning("'%s' being ignored in component array.", name)
|
||||
|
||||
|
||||
class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph):
|
||||
def decompile(self):
|
||||
self.metrics = SmallGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
|
||||
data = data[1:]
|
||||
|
||||
(numComponents,) = struct.unpack(">H", data[:2])
|
||||
data = data[2:]
|
||||
self.componentArray = []
|
||||
for i in range(numComponents):
|
||||
curComponent = EbdtComponent()
|
||||
dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
|
||||
curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
|
||||
self.componentArray.append(curComponent)
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
|
||||
dataList.append(b"\0")
|
||||
dataList.append(struct.pack(">H", len(self.componentArray)))
|
||||
for curComponent in self.componentArray:
|
||||
curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
|
||||
dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph):
|
||||
def decompile(self):
|
||||
self.metrics = BigGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
|
||||
(numComponents,) = struct.unpack(">H", data[:2])
|
||||
data = data[2:]
|
||||
self.componentArray = []
|
||||
for i in range(numComponents):
|
||||
curComponent = EbdtComponent()
|
||||
dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
|
||||
curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
|
||||
self.componentArray.append(curComponent)
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
|
||||
dataList.append(struct.pack(">H", len(self.componentArray)))
|
||||
for curComponent in self.componentArray:
|
||||
curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
|
||||
dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
# Dictionary of bitmap formats to the class representing that format
|
||||
# currently only the ones listed in this map are the ones supported.
|
||||
ebdt_bitmap_classes = {
|
||||
1: ebdt_bitmap_format_1,
|
||||
2: ebdt_bitmap_format_2,
|
||||
5: ebdt_bitmap_format_5,
|
||||
6: ebdt_bitmap_format_6,
|
||||
7: ebdt_bitmap_format_7,
|
||||
8: ebdt_bitmap_format_8,
|
||||
9: ebdt_bitmap_format_9,
|
||||
}
|
||||
@@ -0,0 +1,718 @@
|
||||
from fontTools.misc import sstruct
|
||||
from . import DefaultTable
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval
|
||||
from .BitmapGlyphMetrics import (
|
||||
BigGlyphMetrics,
|
||||
bigGlyphMetricsFormat,
|
||||
SmallGlyphMetrics,
|
||||
smallGlyphMetricsFormat,
|
||||
)
|
||||
import struct
|
||||
import itertools
|
||||
from collections import deque
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
eblcHeaderFormat = """
|
||||
> # big endian
|
||||
version: 16.16F
|
||||
numSizes: I
|
||||
"""
|
||||
# The table format string is split to handle sbitLineMetrics simply.
|
||||
bitmapSizeTableFormatPart1 = """
|
||||
> # big endian
|
||||
indexSubTableArrayOffset: I
|
||||
indexTablesSize: I
|
||||
numberOfIndexSubTables: I
|
||||
colorRef: I
|
||||
"""
|
||||
# The compound type for hori and vert.
|
||||
sbitLineMetricsFormat = """
|
||||
> # big endian
|
||||
ascender: b
|
||||
descender: b
|
||||
widthMax: B
|
||||
caretSlopeNumerator: b
|
||||
caretSlopeDenominator: b
|
||||
caretOffset: b
|
||||
minOriginSB: b
|
||||
minAdvanceSB: b
|
||||
maxBeforeBL: b
|
||||
minAfterBL: b
|
||||
pad1: b
|
||||
pad2: b
|
||||
"""
|
||||
# hori and vert go between the two parts.
|
||||
bitmapSizeTableFormatPart2 = """
|
||||
> # big endian
|
||||
startGlyphIndex: H
|
||||
endGlyphIndex: H
|
||||
ppemX: B
|
||||
ppemY: B
|
||||
bitDepth: B
|
||||
flags: b
|
||||
"""
|
||||
|
||||
indexSubTableArrayFormat = ">HHL"
|
||||
indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
|
||||
|
||||
indexSubHeaderFormat = ">HHL"
|
||||
indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
|
||||
|
||||
codeOffsetPairFormat = ">HH"
|
||||
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
|
||||
|
||||
|
||||
class table_E_B_L_C_(DefaultTable.DefaultTable):
|
||||
"""Embedded Bitmap Location table
|
||||
|
||||
The ``EBLC`` table contains the locations of monochrome or grayscale
|
||||
bitmaps for glyphs. It must be used in concert with the ``EBDT`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/eblc
|
||||
"""
|
||||
|
||||
dependencies = ["EBDT"]
|
||||
|
||||
# This method can be overridden in subclasses to support new formats
|
||||
# without changing the other implementation. Also can be used as a
|
||||
# convenience method for coverting a font file to an alternative format.
|
||||
def getIndexFormatClass(self, indexFormat):
|
||||
return eblc_sub_table_classes[indexFormat]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
# Save the original data because offsets are from the start of the table.
|
||||
origData = data
|
||||
i = 0
|
||||
|
||||
dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
|
||||
i += 8
|
||||
|
||||
self.strikes = []
|
||||
for curStrikeIndex in range(self.numSizes):
|
||||
curStrike = Strike()
|
||||
self.strikes.append(curStrike)
|
||||
curTable = curStrike.bitmapSizeTable
|
||||
dummy = sstruct.unpack2(
|
||||
bitmapSizeTableFormatPart1, data[i : i + 16], curTable
|
||||
)
|
||||
i += 16
|
||||
for metric in ("hori", "vert"):
|
||||
metricObj = SbitLineMetrics()
|
||||
vars(curTable)[metric] = metricObj
|
||||
dummy = sstruct.unpack2(
|
||||
sbitLineMetricsFormat, data[i : i + 12], metricObj
|
||||
)
|
||||
i += 12
|
||||
dummy = sstruct.unpack(
|
||||
bitmapSizeTableFormatPart2, data[i : i + 8], curTable
|
||||
)
|
||||
i += 8
|
||||
|
||||
for curStrike in self.strikes:
|
||||
curTable = curStrike.bitmapSizeTable
|
||||
for subtableIndex in range(curTable.numberOfIndexSubTables):
|
||||
i = (
|
||||
curTable.indexSubTableArrayOffset
|
||||
+ subtableIndex * indexSubTableArraySize
|
||||
)
|
||||
|
||||
tup = struct.unpack(
|
||||
indexSubTableArrayFormat, data[i : i + indexSubTableArraySize]
|
||||
)
|
||||
(firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
|
||||
i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
|
||||
|
||||
tup = struct.unpack(
|
||||
indexSubHeaderFormat, data[i : i + indexSubHeaderSize]
|
||||
)
|
||||
(indexFormat, imageFormat, imageDataOffset) = tup
|
||||
|
||||
indexFormatClass = self.getIndexFormatClass(indexFormat)
|
||||
indexSubTable = indexFormatClass(data[i + indexSubHeaderSize :], ttFont)
|
||||
indexSubTable.firstGlyphIndex = firstGlyphIndex
|
||||
indexSubTable.lastGlyphIndex = lastGlyphIndex
|
||||
indexSubTable.additionalOffsetToIndexSubtable = (
|
||||
additionalOffsetToIndexSubtable
|
||||
)
|
||||
indexSubTable.indexFormat = indexFormat
|
||||
indexSubTable.imageFormat = imageFormat
|
||||
indexSubTable.imageDataOffset = imageDataOffset
|
||||
indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
|
||||
curStrike.indexSubTables.append(indexSubTable)
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
self.numSizes = len(self.strikes)
|
||||
dataList.append(sstruct.pack(eblcHeaderFormat, self))
|
||||
|
||||
# Data size of the header + bitmapSizeTable needs to be calculated
|
||||
# in order to form offsets. This value will hold the size of the data
|
||||
# in dataList after all the data is consolidated in dataList.
|
||||
dataSize = len(dataList[0])
|
||||
|
||||
# The table will be structured in the following order:
|
||||
# (0) header
|
||||
# (1) Each bitmapSizeTable [1 ... self.numSizes]
|
||||
# (2) Alternate between indexSubTableArray and indexSubTable
|
||||
# for each bitmapSizeTable present.
|
||||
#
|
||||
# The issue is maintaining the proper offsets when table information
|
||||
# gets moved around. All offsets and size information must be recalculated
|
||||
# when building the table to allow editing within ttLib and also allow easy
|
||||
# import/export to and from XML. All of this offset information is lost
|
||||
# when exporting to XML so everything must be calculated fresh so importing
|
||||
# from XML will work cleanly. Only byte offset and size information is
|
||||
# calculated fresh. Count information like numberOfIndexSubTables is
|
||||
# checked through assertions. If the information in this table was not
|
||||
# touched or was changed properly then these types of values should match.
|
||||
#
|
||||
# The table will be rebuilt the following way:
|
||||
# (0) Precompute the size of all the bitmapSizeTables. This is needed to
|
||||
# compute the offsets properly.
|
||||
# (1) For each bitmapSizeTable compute the indexSubTable and
|
||||
# indexSubTableArray pair. The indexSubTable must be computed first
|
||||
# so that the offset information in indexSubTableArray can be
|
||||
# calculated. Update the data size after each pairing.
|
||||
# (2) Build each bitmapSizeTable.
|
||||
# (3) Consolidate all the data into the main dataList in the correct order.
|
||||
|
||||
for _ in self.strikes:
|
||||
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
|
||||
dataSize += len(("hori", "vert")) * sstruct.calcsize(sbitLineMetricsFormat)
|
||||
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
|
||||
|
||||
indexSubTablePairDataList = []
|
||||
for curStrike in self.strikes:
|
||||
curTable = curStrike.bitmapSizeTable
|
||||
curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
|
||||
curTable.indexSubTableArrayOffset = dataSize
|
||||
|
||||
# Precompute the size of the indexSubTableArray. This information
|
||||
# is important for correctly calculating the new value for
|
||||
# additionalOffsetToIndexSubtable.
|
||||
sizeOfSubTableArray = (
|
||||
curTable.numberOfIndexSubTables * indexSubTableArraySize
|
||||
)
|
||||
lowerBound = dataSize
|
||||
dataSize += sizeOfSubTableArray
|
||||
upperBound = dataSize
|
||||
|
||||
indexSubTableDataList = []
|
||||
for indexSubTable in curStrike.indexSubTables:
|
||||
indexSubTable.additionalOffsetToIndexSubtable = (
|
||||
dataSize - curTable.indexSubTableArrayOffset
|
||||
)
|
||||
glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
|
||||
indexSubTable.firstGlyphIndex = min(glyphIds)
|
||||
indexSubTable.lastGlyphIndex = max(glyphIds)
|
||||
data = indexSubTable.compile(ttFont)
|
||||
indexSubTableDataList.append(data)
|
||||
dataSize += len(data)
|
||||
curTable.startGlyphIndex = min(
|
||||
ist.firstGlyphIndex for ist in curStrike.indexSubTables
|
||||
)
|
||||
curTable.endGlyphIndex = max(
|
||||
ist.lastGlyphIndex for ist in curStrike.indexSubTables
|
||||
)
|
||||
|
||||
for i in curStrike.indexSubTables:
|
||||
data = struct.pack(
|
||||
indexSubHeaderFormat,
|
||||
i.firstGlyphIndex,
|
||||
i.lastGlyphIndex,
|
||||
i.additionalOffsetToIndexSubtable,
|
||||
)
|
||||
indexSubTablePairDataList.append(data)
|
||||
indexSubTablePairDataList.extend(indexSubTableDataList)
|
||||
curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
|
||||
|
||||
for curStrike in self.strikes:
|
||||
curTable = curStrike.bitmapSizeTable
|
||||
data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
|
||||
dataList.append(data)
|
||||
for metric in ("hori", "vert"):
|
||||
metricObj = vars(curTable)[metric]
|
||||
data = sstruct.pack(sbitLineMetricsFormat, metricObj)
|
||||
dataList.append(data)
|
||||
data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
|
||||
dataList.append(data)
|
||||
dataList.extend(indexSubTablePairDataList)
|
||||
|
||||
return bytesjoin(dataList)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("header", [("version", self.version)])
|
||||
writer.newline()
|
||||
for curIndex, curStrike in enumerate(self.strikes):
|
||||
curStrike.toXML(curIndex, writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "header":
|
||||
self.version = safeEval(attrs["version"])
|
||||
elif name == "strike":
|
||||
if not hasattr(self, "strikes"):
|
||||
self.strikes = []
|
||||
strikeIndex = safeEval(attrs["index"])
|
||||
curStrike = Strike()
|
||||
curStrike.fromXML(name, attrs, content, ttFont, self)
|
||||
|
||||
# Grow the strike array to the appropriate size. The XML format
|
||||
# allows for the strike index value to be out of order.
|
||||
if strikeIndex >= len(self.strikes):
|
||||
self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
|
||||
assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
|
||||
self.strikes[strikeIndex] = curStrike
|
||||
|
||||
|
||||
class Strike(object):
|
||||
def __init__(self):
|
||||
self.bitmapSizeTable = BitmapSizeTable()
|
||||
self.indexSubTables = []
|
||||
|
||||
def toXML(self, strikeIndex, writer, ttFont):
|
||||
writer.begintag("strike", [("index", strikeIndex)])
|
||||
writer.newline()
|
||||
self.bitmapSizeTable.toXML(writer, ttFont)
|
||||
writer.comment(
|
||||
"GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler."
|
||||
)
|
||||
writer.newline()
|
||||
for indexSubTable in self.indexSubTables:
|
||||
indexSubTable.toXML(writer, ttFont)
|
||||
writer.endtag("strike")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont, locator):
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "bitmapSizeTable":
|
||||
self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
|
||||
elif name.startswith(_indexSubTableSubclassPrefix):
|
||||
indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix) :])
|
||||
indexFormatClass = locator.getIndexFormatClass(indexFormat)
|
||||
indexSubTable = indexFormatClass(None, None)
|
||||
indexSubTable.indexFormat = indexFormat
|
||||
indexSubTable.fromXML(name, attrs, content, ttFont)
|
||||
self.indexSubTables.append(indexSubTable)
|
||||
|
||||
|
||||
class BitmapSizeTable(object):
|
||||
# Returns all the simple metric names that bitmap size table
|
||||
# cares about in terms of XML creation.
|
||||
def _getXMLMetricNames(self):
|
||||
dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
|
||||
dataNames = {**dataNames, **sstruct.getformat(bitmapSizeTableFormatPart2)[1]}
|
||||
# Skip the first 3 data names because they are byte offsets and counts.
|
||||
return list(dataNames.keys())[3:]
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("bitmapSizeTable")
|
||||
writer.newline()
|
||||
for metric in ("hori", "vert"):
|
||||
getattr(self, metric).toXML(metric, writer, ttFont)
|
||||
for metricName in self._getXMLMetricNames():
|
||||
writer.simpletag(metricName, value=getattr(self, metricName))
|
||||
writer.newline()
|
||||
writer.endtag("bitmapSizeTable")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
# Create a lookup for all the simple names that make sense to
|
||||
# bitmap size table. Only read the information from these names.
|
||||
dataNames = set(self._getXMLMetricNames())
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "sbitLineMetrics":
|
||||
direction = attrs["direction"]
|
||||
assert direction in (
|
||||
"hori",
|
||||
"vert",
|
||||
), "SbitLineMetrics direction specified invalid."
|
||||
metricObj = SbitLineMetrics()
|
||||
metricObj.fromXML(name, attrs, content, ttFont)
|
||||
vars(self)[direction] = metricObj
|
||||
elif name in dataNames:
|
||||
vars(self)[name] = safeEval(attrs["value"])
|
||||
else:
|
||||
log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
|
||||
|
||||
|
||||
class SbitLineMetrics(object):
|
||||
def toXML(self, name, writer, ttFont):
|
||||
writer.begintag("sbitLineMetrics", [("direction", name)])
|
||||
writer.newline()
|
||||
for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
|
||||
writer.simpletag(metricName, value=getattr(self, metricName))
|
||||
writer.newline()
|
||||
writer.endtag("sbitLineMetrics")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name in metricNames:
|
||||
vars(self)[name] = safeEval(attrs["value"])
|
||||
|
||||
|
||||
# Important information about the naming scheme. Used for identifying subtables.
|
||||
_indexSubTableSubclassPrefix = "eblc_index_sub_table_"
|
||||
|
||||
|
||||
class EblcIndexSubTable(object):
|
||||
def __init__(self, data, ttFont):
|
||||
self.data = data
|
||||
self.ttFont = ttFont
|
||||
# TODO Currently non-lazy decompiling doesn't work for this class...
|
||||
# if not ttFont.lazy:
|
||||
# self.decompile()
|
||||
# del self.data, self.ttFont
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# Allow lazy decompile.
|
||||
if attr[:2] == "__":
|
||||
raise AttributeError(attr)
|
||||
if attr == "data":
|
||||
raise AttributeError(attr)
|
||||
self.decompile()
|
||||
return getattr(self, attr)
|
||||
|
||||
def ensureDecompiled(self, recurse=False):
|
||||
if hasattr(self, "data"):
|
||||
self.decompile()
|
||||
|
||||
# This method just takes care of the indexSubHeader. Implementing subclasses
|
||||
# should call it to compile the indexSubHeader and then continue compiling
|
||||
# the remainder of their unique format.
|
||||
def compile(self, ttFont):
|
||||
return struct.pack(
|
||||
indexSubHeaderFormat,
|
||||
self.indexFormat,
|
||||
self.imageFormat,
|
||||
self.imageDataOffset,
|
||||
)
|
||||
|
||||
# Creates the XML for bitmap glyphs. Each index sub table basically makes
|
||||
# the same XML except for specific metric information that is written
|
||||
# out via a method call that a subclass implements optionally.
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag(
|
||||
self.__class__.__name__,
|
||||
[
|
||||
("imageFormat", self.imageFormat),
|
||||
("firstGlyphIndex", self.firstGlyphIndex),
|
||||
("lastGlyphIndex", self.lastGlyphIndex),
|
||||
],
|
||||
)
|
||||
writer.newline()
|
||||
self.writeMetrics(writer, ttFont)
|
||||
# Write out the names as thats all thats needed to rebuild etc.
|
||||
# For font debugging of consecutive formats the ids are also written.
|
||||
# The ids are not read when moving from the XML format.
|
||||
glyphIds = map(ttFont.getGlyphID, self.names)
|
||||
for glyphName, glyphId in zip(self.names, glyphIds):
|
||||
writer.simpletag("glyphLoc", name=glyphName, id=glyphId)
|
||||
writer.newline()
|
||||
writer.endtag(self.__class__.__name__)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
# Read all the attributes. Even though the glyph indices are
|
||||
# recalculated, they are still read in case there needs to
|
||||
# be an immediate export of the data.
|
||||
self.imageFormat = safeEval(attrs["imageFormat"])
|
||||
self.firstGlyphIndex = safeEval(attrs["firstGlyphIndex"])
|
||||
self.lastGlyphIndex = safeEval(attrs["lastGlyphIndex"])
|
||||
|
||||
self.readMetrics(name, attrs, content, ttFont)
|
||||
|
||||
self.names = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "glyphLoc":
|
||||
self.names.append(attrs["name"])
|
||||
|
||||
# A helper method that writes the metrics for the index sub table. It also
|
||||
# is responsible for writing the image size for fixed size data since fixed
|
||||
# size is not recalculated on compile. Default behavior is to do nothing.
|
||||
def writeMetrics(self, writer, ttFont):
|
||||
pass
|
||||
|
||||
# A helper method that is the inverse of writeMetrics.
|
||||
def readMetrics(self, name, attrs, content, ttFont):
|
||||
pass
|
||||
|
||||
# This method is for fixed glyph data sizes. There are formats where
|
||||
# the glyph data is fixed but are actually composite glyphs. To handle
|
||||
# this the font spec in indexSubTable makes the data the size of the
|
||||
# fixed size by padding the component arrays. This function abstracts
|
||||
# out this padding process. Input is data unpadded. Output is data
|
||||
# padded only in fixed formats. Default behavior is to return the data.
|
||||
def padBitmapData(self, data):
|
||||
return data
|
||||
|
||||
# Remove any of the glyph locations and names that are flagged as skipped.
|
||||
# This only occurs in formats {1,3}.
|
||||
def removeSkipGlyphs(self):
|
||||
# Determines if a name, location pair is a valid data location.
|
||||
# Skip glyphs are marked when the size is equal to zero.
|
||||
def isValidLocation(args):
|
||||
(name, (startByte, endByte)) = args
|
||||
return startByte < endByte
|
||||
|
||||
# Remove all skip glyphs.
|
||||
dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
|
||||
self.names, self.locations = list(map(list, zip(*dataPairs)))
|
||||
|
||||
|
||||
# A closure for creating a custom mixin. This is done because formats 1 and 3
|
||||
# are very similar. The only difference between them is the size per offset
|
||||
# value. Code put in here should handle both cases generally.
|
||||
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
|
||||
# Prep the data size for the offset array data format.
|
||||
dataFormat = ">" + formatStringForDataType
|
||||
offsetDataSize = struct.calcsize(dataFormat)
|
||||
|
||||
class OffsetArrayIndexSubTableMixin(object):
|
||||
def decompile(self):
|
||||
numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
|
||||
indexingOffsets = [
|
||||
glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs + 2)
|
||||
]
|
||||
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
|
||||
offsetArray = [
|
||||
struct.unpack(dataFormat, self.data[slice(*loc)])[0]
|
||||
for loc in indexingLocations
|
||||
]
|
||||
|
||||
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
|
||||
modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
|
||||
self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
|
||||
|
||||
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
|
||||
self.removeSkipGlyphs()
|
||||
del self.data, self.ttFont
|
||||
|
||||
def compile(self, ttFont):
|
||||
# First make sure that all the data lines up properly. Formats 1 and 3
|
||||
# must have all its data lined up consecutively. If not this will fail.
|
||||
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
|
||||
assert (
|
||||
curLoc[1] == nxtLoc[0]
|
||||
), "Data must be consecutive in indexSubTable offset formats"
|
||||
|
||||
glyphIds = list(map(ttFont.getGlyphID, self.names))
|
||||
# Make sure that all ids are sorted strictly increasing.
|
||||
assert all(glyphIds[i] < glyphIds[i + 1] for i in range(len(glyphIds) - 1))
|
||||
|
||||
# Run a simple algorithm to add skip glyphs to the data locations at
|
||||
# the places where an id is not present.
|
||||
idQueue = deque(glyphIds)
|
||||
locQueue = deque(self.locations)
|
||||
allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
|
||||
allLocations = []
|
||||
for curId in allGlyphIds:
|
||||
if curId != idQueue[0]:
|
||||
allLocations.append((locQueue[0][0], locQueue[0][0]))
|
||||
else:
|
||||
idQueue.popleft()
|
||||
allLocations.append(locQueue.popleft())
|
||||
|
||||
# Now that all the locations are collected, pack them appropriately into
|
||||
# offsets. This is the form where offset[i] is the location and
|
||||
# offset[i+1]-offset[i] is the size of the data location.
|
||||
offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
|
||||
# Image data offset must be less than or equal to the minimum of locations.
|
||||
# This offset may change the value for round tripping but is safer and
|
||||
# allows imageDataOffset to not be required to be in the XML version.
|
||||
self.imageDataOffset = min(offsets)
|
||||
offsetArray = [offset - self.imageDataOffset for offset in offsets]
|
||||
|
||||
dataList = [EblcIndexSubTable.compile(self, ttFont)]
|
||||
dataList += [
|
||||
struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray
|
||||
]
|
||||
# Take care of any padding issues. Only occurs in format 3.
|
||||
if offsetDataSize * len(offsetArray) % 4 != 0:
|
||||
dataList.append(struct.pack(dataFormat, 0))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
return OffsetArrayIndexSubTableMixin
|
||||
|
||||
|
||||
# A Mixin for functionality shared between the different kinds
|
||||
# of fixed sized data handling. Both kinds have big metrics so
|
||||
# that kind of special processing is also handled in this mixin.
|
||||
class FixedSizeIndexSubTableMixin(object):
|
||||
def writeMetrics(self, writer, ttFont):
|
||||
writer.simpletag("imageSize", value=self.imageSize)
|
||||
writer.newline()
|
||||
self.metrics.toXML(writer, ttFont)
|
||||
|
||||
def readMetrics(self, name, attrs, content, ttFont):
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "imageSize":
|
||||
self.imageSize = safeEval(attrs["value"])
|
||||
elif name == BigGlyphMetrics.__name__:
|
||||
self.metrics = BigGlyphMetrics()
|
||||
self.metrics.fromXML(name, attrs, content, ttFont)
|
||||
elif name == SmallGlyphMetrics.__name__:
|
||||
log.warning(
|
||||
"SmallGlyphMetrics being ignored in format %d.", self.indexFormat
|
||||
)
|
||||
|
||||
def padBitmapData(self, data):
|
||||
# Make sure that the data isn't bigger than the fixed size.
|
||||
assert len(data) <= self.imageSize, (
|
||||
"Data in indexSubTable format %d must be less than the fixed size."
|
||||
% self.indexFormat
|
||||
)
|
||||
# Pad the data so that it matches the fixed size.
|
||||
pad = (self.imageSize - len(data)) * b"\0"
|
||||
return data + pad
|
||||
|
||||
|
||||
class eblc_index_sub_table_1(
|
||||
_createOffsetArrayIndexSubTableMixin("L"), EblcIndexSubTable
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
|
||||
def decompile(self):
|
||||
(self.imageSize,) = struct.unpack(">L", self.data[:4])
|
||||
self.metrics = BigGlyphMetrics()
|
||||
sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
|
||||
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
|
||||
offsets = [
|
||||
self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
|
||||
]
|
||||
self.locations = list(zip(offsets, offsets[1:]))
|
||||
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
|
||||
del self.data, self.ttFont
|
||||
|
||||
def compile(self, ttFont):
|
||||
glyphIds = list(map(ttFont.getGlyphID, self.names))
|
||||
# Make sure all the ids are consecutive. This is required by Format 2.
|
||||
assert glyphIds == list(
|
||||
range(self.firstGlyphIndex, self.lastGlyphIndex + 1)
|
||||
), "Format 2 ids must be consecutive."
|
||||
self.imageDataOffset = min(next(iter(zip(*self.locations))))
|
||||
|
||||
dataList = [EblcIndexSubTable.compile(self, ttFont)]
|
||||
dataList.append(struct.pack(">L", self.imageSize))
|
||||
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
class eblc_index_sub_table_3(
|
||||
_createOffsetArrayIndexSubTableMixin("H"), EblcIndexSubTable
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class eblc_index_sub_table_4(EblcIndexSubTable):
|
||||
def decompile(self):
|
||||
(numGlyphs,) = struct.unpack(">L", self.data[:4])
|
||||
data = self.data[4:]
|
||||
indexingOffsets = [
|
||||
glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs + 2)
|
||||
]
|
||||
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
|
||||
glyphArray = [
|
||||
struct.unpack(codeOffsetPairFormat, data[slice(*loc)])
|
||||
for loc in indexingLocations
|
||||
]
|
||||
glyphIds, offsets = list(map(list, zip(*glyphArray)))
|
||||
# There are one too many glyph ids. Get rid of the last one.
|
||||
glyphIds.pop()
|
||||
|
||||
offsets = [offset + self.imageDataOffset for offset in offsets]
|
||||
self.locations = list(zip(offsets, offsets[1:]))
|
||||
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
|
||||
del self.data, self.ttFont
|
||||
|
||||
def compile(self, ttFont):
|
||||
# First make sure that all the data lines up properly. Format 4
|
||||
# must have all its data lined up consecutively. If not this will fail.
|
||||
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
|
||||
assert (
|
||||
curLoc[1] == nxtLoc[0]
|
||||
), "Data must be consecutive in indexSubTable format 4"
|
||||
|
||||
offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
|
||||
# Image data offset must be less than or equal to the minimum of locations.
|
||||
# Resetting this offset may change the value for round tripping but is safer
|
||||
# and allows imageDataOffset to not be required to be in the XML version.
|
||||
self.imageDataOffset = min(offsets)
|
||||
offsets = [offset - self.imageDataOffset for offset in offsets]
|
||||
glyphIds = list(map(ttFont.getGlyphID, self.names))
|
||||
# Create an iterator over the ids plus a padding value.
|
||||
idsPlusPad = list(itertools.chain(glyphIds, [0]))
|
||||
|
||||
dataList = [EblcIndexSubTable.compile(self, ttFont)]
|
||||
dataList.append(struct.pack(">L", len(glyphIds)))
|
||||
tmp = [
|
||||
struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)
|
||||
]
|
||||
dataList += tmp
|
||||
data = bytesjoin(dataList)
|
||||
return data
|
||||
|
||||
|
||||
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
|
||||
def decompile(self):
|
||||
self.origDataLen = 0
|
||||
(self.imageSize,) = struct.unpack(">L", self.data[:4])
|
||||
data = self.data[4:]
|
||||
self.metrics, data = sstruct.unpack2(
|
||||
bigGlyphMetricsFormat, data, BigGlyphMetrics()
|
||||
)
|
||||
(numGlyphs,) = struct.unpack(">L", data[:4])
|
||||
data = data[4:]
|
||||
glyphIds = [
|
||||
struct.unpack(">H", data[2 * i : 2 * (i + 1)])[0] for i in range(numGlyphs)
|
||||
]
|
||||
|
||||
offsets = [
|
||||
self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
|
||||
]
|
||||
self.locations = list(zip(offsets, offsets[1:]))
|
||||
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
|
||||
del self.data, self.ttFont
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.imageDataOffset = min(next(iter(zip(*self.locations))))
|
||||
dataList = [EblcIndexSubTable.compile(self, ttFont)]
|
||||
dataList.append(struct.pack(">L", self.imageSize))
|
||||
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
|
||||
glyphIds = list(map(ttFont.getGlyphID, self.names))
|
||||
dataList.append(struct.pack(">L", len(glyphIds)))
|
||||
dataList += [struct.pack(">H", curId) for curId in glyphIds]
|
||||
if len(glyphIds) % 2 == 1:
|
||||
dataList.append(struct.pack(">H", 0))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
# Dictionary of indexFormat to the class representing that format.
|
||||
eblc_sub_table_classes = {
|
||||
1: eblc_index_sub_table_1,
|
||||
2: eblc_index_sub_table_2,
|
||||
3: eblc_index_sub_table_3,
|
||||
4: eblc_index_sub_table_4,
|
||||
5: eblc_index_sub_table_5,
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.misc.timeTools import timestampFromString, timestampToString
|
||||
from . import DefaultTable
|
||||
|
||||
FFTMFormat = """
|
||||
> # big endian
|
||||
version: I
|
||||
FFTimeStamp: Q
|
||||
sourceCreated: Q
|
||||
sourceModified: Q
|
||||
"""
|
||||
|
||||
|
||||
class table_F_F_T_M_(DefaultTable.DefaultTable):
|
||||
"""FontForge Time Stamp table
|
||||
|
||||
The ``FFTM`` table is used by the free-software font editor
|
||||
FontForge to record timestamps for the creation and modification
|
||||
of font source (.sfd) files and a timestamp for FontForge's
|
||||
own source code.
|
||||
|
||||
See also https://fontforge.org/docs/techref/non-standard.html
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(FFTMFormat, self)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment(
|
||||
"FontForge's timestamp, font source creation and modification dates"
|
||||
)
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(FFTMFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
|
||||
value = timestampToString(value)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
|
||||
value = timestampFromString(value)
|
||||
else:
|
||||
value = safeEval(value)
|
||||
setattr(self, name, value)
|
||||
@@ -0,0 +1,149 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import floatToFixedToStr
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
from . import grUtils
|
||||
import struct
|
||||
|
||||
Feat_hdr_format = """
|
||||
>
|
||||
version: 16.16F
|
||||
"""
|
||||
|
||||
|
||||
class table_F__e_a_t(DefaultTable.DefaultTable):
|
||||
"""Feature table
|
||||
|
||||
The ``Feat`` table is used exclusively by the Graphite shaping engine
|
||||
to store features and possible settings specified in GDL. Graphite features
|
||||
determine what rules are applied to transform a glyph stream.
|
||||
|
||||
Not to be confused with ``feat``, or the OpenType Layout tables
|
||||
``GSUB``/``GPOS``.
|
||||
|
||||
See also https://graphite.sil.org/graphite_techAbout#graphite-font-tables
|
||||
"""
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.features = {}
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
(_, data) = sstruct.unpack2(Feat_hdr_format, data, self)
|
||||
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
|
||||
(numFeats,) = struct.unpack(">H", data[:2])
|
||||
data = data[8:]
|
||||
allfeats = []
|
||||
maxsetting = 0
|
||||
for i in range(numFeats):
|
||||
if self.version >= 2.0:
|
||||
(fid, nums, _, offset, flags, lid) = struct.unpack(
|
||||
">LHHLHH", data[16 * i : 16 * (i + 1)]
|
||||
)
|
||||
offset = int((offset - 12 - 16 * numFeats) / 4)
|
||||
else:
|
||||
(fid, nums, offset, flags, lid) = struct.unpack(
|
||||
">HHLHH", data[12 * i : 12 * (i + 1)]
|
||||
)
|
||||
offset = int((offset - 12 - 12 * numFeats) / 4)
|
||||
allfeats.append((fid, nums, offset, flags, lid))
|
||||
maxsetting = max(maxsetting, offset + nums)
|
||||
data = data[16 * numFeats :]
|
||||
allsettings = []
|
||||
for i in range(maxsetting):
|
||||
if len(data) >= 4 * (i + 1):
|
||||
(val, lid) = struct.unpack(">HH", data[4 * i : 4 * (i + 1)])
|
||||
allsettings.append((val, lid))
|
||||
for i, f in enumerate(allfeats):
|
||||
(fid, nums, offset, flags, lid) = f
|
||||
fobj = Feature()
|
||||
fobj.flags = flags
|
||||
fobj.label = lid
|
||||
self.features[grUtils.num2tag(fid)] = fobj
|
||||
fobj.settings = {}
|
||||
fobj.default = None
|
||||
fobj.index = i
|
||||
for i in range(offset, offset + nums):
|
||||
if i >= len(allsettings):
|
||||
continue
|
||||
(vid, vlid) = allsettings[i]
|
||||
fobj.settings[vid] = vlid
|
||||
if fobj.default is None:
|
||||
fobj.default = vid
|
||||
|
||||
def compile(self, ttFont):
|
||||
fdat = b""
|
||||
vdat = b""
|
||||
offset = 0
|
||||
for f, v in sorted(self.features.items(), key=lambda x: x[1].index):
|
||||
fnum = grUtils.tag2num(f)
|
||||
if self.version >= 2.0:
|
||||
fdat += struct.pack(
|
||||
">LHHLHH",
|
||||
grUtils.tag2num(f),
|
||||
len(v.settings),
|
||||
0,
|
||||
offset * 4 + 12 + 16 * len(self.features),
|
||||
v.flags,
|
||||
v.label,
|
||||
)
|
||||
elif fnum > 65535: # self healing for alphabetic ids
|
||||
self.version = 2.0
|
||||
return self.compile(ttFont)
|
||||
else:
|
||||
fdat += struct.pack(
|
||||
">HHLHH",
|
||||
grUtils.tag2num(f),
|
||||
len(v.settings),
|
||||
offset * 4 + 12 + 12 * len(self.features),
|
||||
v.flags,
|
||||
v.label,
|
||||
)
|
||||
for s, l in sorted(
|
||||
v.settings.items(), key=lambda x: (-1, x[1]) if x[0] == v.default else x
|
||||
):
|
||||
vdat += struct.pack(">HH", s, l)
|
||||
offset += len(v.settings)
|
||||
hdr = sstruct.pack(Feat_hdr_format, self)
|
||||
return hdr + struct.pack(">HHL", len(self.features), 0, 0) + fdat + vdat
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", version=self.version)
|
||||
writer.newline()
|
||||
for f, v in sorted(self.features.items(), key=lambda x: x[1].index):
|
||||
writer.begintag(
|
||||
"feature",
|
||||
fid=f,
|
||||
label=v.label,
|
||||
flags=v.flags,
|
||||
default=(v.default if v.default else 0),
|
||||
)
|
||||
writer.newline()
|
||||
for s, l in sorted(v.settings.items()):
|
||||
writer.simpletag("setting", value=s, label=l)
|
||||
writer.newline()
|
||||
writer.endtag("feature")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = float(safeEval(attrs["version"]))
|
||||
elif name == "feature":
|
||||
fid = attrs["fid"]
|
||||
fobj = Feature()
|
||||
fobj.flags = int(safeEval(attrs["flags"]))
|
||||
fobj.label = int(safeEval(attrs["label"]))
|
||||
fobj.default = int(safeEval(attrs.get("default", "0")))
|
||||
fobj.index = len(self.features)
|
||||
self.features[fid] = fobj
|
||||
fobj.settings = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
tag, a, c = element
|
||||
if tag == "setting":
|
||||
fobj.settings[int(safeEval(a["value"]))] = int(safeEval(a["label"]))
|
||||
|
||||
|
||||
class Feature(object):
|
||||
pass
|
||||
@@ -0,0 +1,13 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_G_D_E_F_(BaseTTXConverter):
|
||||
"""Glyph Definition table
|
||||
|
||||
The ``GDEF`` table stores various glyph properties that are used
|
||||
by OpenType Layout.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/gdef
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,148 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import tobytes, tostr, safeEval
|
||||
from . import DefaultTable
|
||||
|
||||
GMAPFormat = """
|
||||
> # big endian
|
||||
tableVersionMajor: H
|
||||
tableVersionMinor: H
|
||||
flags: H
|
||||
recordsCount: H
|
||||
recordsOffset: H
|
||||
fontNameLength: H
|
||||
"""
|
||||
# psFontName is a byte string which follows the record above. This is zero padded
|
||||
# to the beginning of the records array. The recordsOffsst is 32 bit aligned.
|
||||
|
||||
GMAPRecordFormat1 = """
|
||||
> # big endian
|
||||
UV: L
|
||||
cid: H
|
||||
gid: H
|
||||
ggid: H
|
||||
name: 32s
|
||||
"""
|
||||
|
||||
|
||||
class GMAPRecord(object):
|
||||
def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""):
|
||||
self.UV = uv
|
||||
self.cid = cid
|
||||
self.gid = gid
|
||||
self.ggid = ggid
|
||||
self.name = name
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("GMAPRecord")
|
||||
writer.newline()
|
||||
writer.simpletag("UV", value=self.UV)
|
||||
writer.newline()
|
||||
writer.simpletag("cid", value=self.cid)
|
||||
writer.newline()
|
||||
writer.simpletag("gid", value=self.gid)
|
||||
writer.newline()
|
||||
writer.simpletag("glyphletGid", value=self.gid)
|
||||
writer.newline()
|
||||
writer.simpletag("GlyphletName", value=self.name)
|
||||
writer.newline()
|
||||
writer.endtag("GMAPRecord")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
if name == "GlyphletName":
|
||||
self.name = value
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
|
||||
def compile(self, ttFont):
|
||||
if self.UV is None:
|
||||
self.UV = 0
|
||||
nameLen = len(self.name)
|
||||
if nameLen < 32:
|
||||
self.name = self.name + "\0" * (32 - nameLen)
|
||||
data = sstruct.pack(GMAPRecordFormat1, self)
|
||||
return data
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"GMAPRecord[ UV: "
|
||||
+ str(self.UV)
|
||||
+ ", cid: "
|
||||
+ str(self.cid)
|
||||
+ ", gid: "
|
||||
+ str(self.gid)
|
||||
+ ", ggid: "
|
||||
+ str(self.ggid)
|
||||
+ ", Glyphlet Name: "
|
||||
+ str(self.name)
|
||||
+ " ]"
|
||||
)
|
||||
|
||||
|
||||
class table_G_M_A_P_(DefaultTable.DefaultTable):
|
||||
"""Glyphlets GMAP table
|
||||
|
||||
The ``GMAP`` table is used by Adobe's SING Glyphlets.
|
||||
|
||||
See also https://web.archive.org/web/20080627183635/http://www.adobe.com/devnet/opentype/gdk/topic.html
|
||||
"""
|
||||
|
||||
dependencies = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, newData = sstruct.unpack2(GMAPFormat, data, self)
|
||||
self.psFontName = tostr(newData[: self.fontNameLength])
|
||||
assert (
|
||||
self.recordsOffset % 4
|
||||
) == 0, "GMAP error: recordsOffset is not 32 bit aligned."
|
||||
newData = data[self.recordsOffset :]
|
||||
self.gmapRecords = []
|
||||
for i in range(self.recordsCount):
|
||||
gmapRecord, newData = sstruct.unpack2(
|
||||
GMAPRecordFormat1, newData, GMAPRecord()
|
||||
)
|
||||
gmapRecord.name = gmapRecord.name.strip("\0")
|
||||
self.gmapRecords.append(gmapRecord)
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.recordsCount = len(self.gmapRecords)
|
||||
self.fontNameLength = len(self.psFontName)
|
||||
self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4)
|
||||
data = sstruct.pack(GMAPFormat, self)
|
||||
data = data + tobytes(self.psFontName)
|
||||
data = data + b"\0" * (self.recordsOffset - len(data))
|
||||
for record in self.gmapRecords:
|
||||
data = data + record.compile(ttFont)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(GMAPFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
writer.simpletag("PSFontName", value=self.psFontName)
|
||||
writer.newline()
|
||||
for gmapRecord in self.gmapRecords:
|
||||
gmapRecord.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "GMAPRecord":
|
||||
if not hasattr(self, "gmapRecords"):
|
||||
self.gmapRecords = []
|
||||
gmapRecord = GMAPRecord()
|
||||
self.gmapRecords.append(gmapRecord)
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
gmapRecord.fromXML(name, attrs, content, ttFont)
|
||||
else:
|
||||
value = attrs["value"]
|
||||
if name == "PSFontName":
|
||||
self.psFontName = value
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
@@ -0,0 +1,133 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval, readHex
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import array
|
||||
|
||||
GPKGFormat = """
|
||||
> # big endian
|
||||
version: H
|
||||
flags: H
|
||||
numGMAPs: H
|
||||
numGlyplets: H
|
||||
"""
|
||||
# psFontName is a byte string which follows the record above. This is zero padded
|
||||
# to the beginning of the records array. The recordsOffsst is 32 bit aligned.
|
||||
|
||||
|
||||
class table_G_P_K_G_(DefaultTable.DefaultTable):
|
||||
"""Glyphlets GPKG table
|
||||
|
||||
The ``GPKG`` table is used by Adobe's SING Glyphlets.
|
||||
|
||||
See also https://web.archive.org/web/20080627183635/http://www.adobe.com/devnet/opentype/gdk/topic.html
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
|
||||
|
||||
GMAPoffsets = array.array("I")
|
||||
endPos = (self.numGMAPs + 1) * 4
|
||||
GMAPoffsets.frombytes(newData[:endPos])
|
||||
if sys.byteorder != "big":
|
||||
GMAPoffsets.byteswap()
|
||||
self.GMAPs = []
|
||||
for i in range(self.numGMAPs):
|
||||
start = GMAPoffsets[i]
|
||||
end = GMAPoffsets[i + 1]
|
||||
self.GMAPs.append(data[start:end])
|
||||
pos = endPos
|
||||
endPos = pos + (self.numGlyplets + 1) * 4
|
||||
glyphletOffsets = array.array("I")
|
||||
glyphletOffsets.frombytes(newData[pos:endPos])
|
||||
if sys.byteorder != "big":
|
||||
glyphletOffsets.byteswap()
|
||||
self.glyphlets = []
|
||||
for i in range(self.numGlyplets):
|
||||
start = glyphletOffsets[i]
|
||||
end = glyphletOffsets[i + 1]
|
||||
self.glyphlets.append(data[start:end])
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.numGMAPs = len(self.GMAPs)
|
||||
self.numGlyplets = len(self.glyphlets)
|
||||
GMAPoffsets = [0] * (self.numGMAPs + 1)
|
||||
glyphletOffsets = [0] * (self.numGlyplets + 1)
|
||||
|
||||
dataList = [sstruct.pack(GPKGFormat, self)]
|
||||
|
||||
pos = len(dataList[0]) + (self.numGMAPs + 1) * 4 + (self.numGlyplets + 1) * 4
|
||||
GMAPoffsets[0] = pos
|
||||
for i in range(1, self.numGMAPs + 1):
|
||||
pos += len(self.GMAPs[i - 1])
|
||||
GMAPoffsets[i] = pos
|
||||
gmapArray = array.array("I", GMAPoffsets)
|
||||
if sys.byteorder != "big":
|
||||
gmapArray.byteswap()
|
||||
dataList.append(gmapArray.tobytes())
|
||||
|
||||
glyphletOffsets[0] = pos
|
||||
for i in range(1, self.numGlyplets + 1):
|
||||
pos += len(self.glyphlets[i - 1])
|
||||
glyphletOffsets[i] = pos
|
||||
glyphletArray = array.array("I", glyphletOffsets)
|
||||
if sys.byteorder != "big":
|
||||
glyphletArray.byteswap()
|
||||
dataList.append(glyphletArray.tobytes())
|
||||
dataList += self.GMAPs
|
||||
dataList += self.glyphlets
|
||||
data = bytesjoin(dataList)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(GPKGFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
writer.begintag("GMAPs")
|
||||
writer.newline()
|
||||
for gmapData in self.GMAPs:
|
||||
writer.begintag("hexdata")
|
||||
writer.newline()
|
||||
writer.dumphex(gmapData)
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
writer.endtag("GMAPs")
|
||||
writer.newline()
|
||||
|
||||
writer.begintag("glyphlets")
|
||||
writer.newline()
|
||||
for glyphletData in self.glyphlets:
|
||||
writer.begintag("hexdata")
|
||||
writer.newline()
|
||||
writer.dumphex(glyphletData)
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
writer.endtag("glyphlets")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "GMAPs":
|
||||
if not hasattr(self, "GMAPs"):
|
||||
self.GMAPs = []
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
itemName, itemAttrs, itemContent = element
|
||||
if itemName == "hexdata":
|
||||
self.GMAPs.append(readHex(itemContent))
|
||||
elif name == "glyphlets":
|
||||
if not hasattr(self, "glyphlets"):
|
||||
self.glyphlets = []
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
itemName, itemAttrs, itemContent = element
|
||||
if itemName == "hexdata":
|
||||
self.glyphlets.append(readHex(itemContent))
|
||||
else:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
@@ -0,0 +1,14 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_G_P_O_S_(BaseTTXConverter):
|
||||
"""Glyph Positioning table
|
||||
|
||||
The ``GPOS`` table stores advanced glyph-positioning data
|
||||
used in OpenType Layout features, such as mark attachment,
|
||||
cursive attachment, kerning, and other position adjustments.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/gpos
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,13 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_G_S_U_B_(BaseTTXConverter):
|
||||
"""Glyph Substitution table
|
||||
|
||||
The ``GSUB`` table contains glyph-substitution rules used in
|
||||
OpenType Layout.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/gsub
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,5 @@
|
||||
from ._g_v_a_r import table__g_v_a_r
|
||||
|
||||
|
||||
class table_G_V_A_R_(table__g_v_a_r):
|
||||
gid_size = 3
|
||||
@@ -0,0 +1,235 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import floatToFixedToStr
|
||||
from fontTools.misc.textTools import safeEval
|
||||
|
||||
# from itertools import *
|
||||
from functools import partial
|
||||
from . import DefaultTable
|
||||
from . import grUtils
|
||||
import struct
|
||||
|
||||
|
||||
Glat_format_0 = """
|
||||
> # big endian
|
||||
version: 16.16F
|
||||
"""
|
||||
|
||||
Glat_format_3 = """
|
||||
>
|
||||
version: 16.16F
|
||||
compression:L # compression scheme or reserved
|
||||
"""
|
||||
|
||||
Glat_format_1_entry = """
|
||||
>
|
||||
attNum: B # Attribute number of first attribute
|
||||
num: B # Number of attributes in this run
|
||||
"""
|
||||
Glat_format_23_entry = """
|
||||
>
|
||||
attNum: H # Attribute number of first attribute
|
||||
num: H # Number of attributes in this run
|
||||
"""
|
||||
|
||||
Glat_format_3_octabox_metrics = """
|
||||
>
|
||||
subboxBitmap: H # Which subboxes exist on 4x4 grid
|
||||
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
|
||||
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
|
||||
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
|
||||
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
|
||||
"""
|
||||
|
||||
Glat_format_3_subbox_entry = """
|
||||
>
|
||||
left: B # xi
|
||||
right: B # xa
|
||||
bottom: B # yi
|
||||
top: B # ya
|
||||
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
|
||||
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
|
||||
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
|
||||
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
|
||||
"""
|
||||
|
||||
|
||||
class _Object:
|
||||
pass
|
||||
|
||||
|
||||
class _Dict(dict):
|
||||
pass
|
||||
|
||||
|
||||
class table_G__l_a_t(DefaultTable.DefaultTable):
|
||||
"""Graphite Glyph Attributes table
|
||||
|
||||
See also https://graphite.sil.org/graphite_techAbout#graphite-font-tables
|
||||
"""
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.scheme = 0
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack2(Glat_format_0, data, self)
|
||||
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
|
||||
if self.version <= 1.9:
|
||||
decoder = partial(self.decompileAttributes12, fmt=Glat_format_1_entry)
|
||||
elif self.version <= 2.9:
|
||||
decoder = partial(self.decompileAttributes12, fmt=Glat_format_23_entry)
|
||||
elif self.version >= 3.0:
|
||||
(data, self.scheme) = grUtils.decompress(data)
|
||||
sstruct.unpack2(Glat_format_3, data, self)
|
||||
self.hasOctaboxes = (self.compression & 1) == 1
|
||||
decoder = self.decompileAttributes3
|
||||
|
||||
gloc = ttFont["Gloc"]
|
||||
self.attributes = {}
|
||||
count = 0
|
||||
for s, e in zip(gloc, gloc[1:]):
|
||||
self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e])
|
||||
count += 1
|
||||
|
||||
def decompileAttributes12(self, data, fmt):
|
||||
attributes = _Dict()
|
||||
while len(data) > 3:
|
||||
e, data = sstruct.unpack2(fmt, data, _Object())
|
||||
keys = range(e.attNum, e.attNum + e.num)
|
||||
if len(data) >= 2 * e.num:
|
||||
vals = struct.unpack_from((">%dh" % e.num), data)
|
||||
attributes.update(zip(keys, vals))
|
||||
data = data[2 * e.num :]
|
||||
return attributes
|
||||
|
||||
def decompileAttributes3(self, data):
|
||||
if self.hasOctaboxes:
|
||||
o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object())
|
||||
numsub = bin(o.subboxBitmap).count("1")
|
||||
o.subboxes = []
|
||||
for b in range(numsub):
|
||||
if len(data) >= 8:
|
||||
subbox, data = sstruct.unpack2(
|
||||
Glat_format_3_subbox_entry, data, _Object()
|
||||
)
|
||||
o.subboxes.append(subbox)
|
||||
attrs = self.decompileAttributes12(data, Glat_format_23_entry)
|
||||
if self.hasOctaboxes:
|
||||
attrs.octabox = o
|
||||
return attrs
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(Glat_format_0, self)
|
||||
if self.version <= 1.9:
|
||||
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
|
||||
elif self.version <= 2.9:
|
||||
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
|
||||
elif self.version >= 3.0:
|
||||
self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0)
|
||||
data = sstruct.pack(Glat_format_3, self)
|
||||
encoder = self.compileAttributes3
|
||||
|
||||
glocs = []
|
||||
for n in range(len(self.attributes)):
|
||||
glocs.append(len(data))
|
||||
data += encoder(self.attributes[ttFont.getGlyphName(n)])
|
||||
glocs.append(len(data))
|
||||
ttFont["Gloc"].set(glocs)
|
||||
|
||||
if self.version >= 3.0:
|
||||
data = grUtils.compress(self.scheme, data)
|
||||
return data
|
||||
|
||||
def compileAttributes12(self, attrs, fmt):
|
||||
data = b""
|
||||
for e in grUtils.entries(attrs):
|
||||
data += sstruct.pack(fmt, {"attNum": e[0], "num": e[1]}) + struct.pack(
|
||||
(">%dh" % len(e[2])), *e[2]
|
||||
)
|
||||
return data
|
||||
|
||||
def compileAttributes3(self, attrs):
|
||||
if self.hasOctaboxes:
|
||||
o = attrs.octabox
|
||||
data = sstruct.pack(Glat_format_3_octabox_metrics, o)
|
||||
numsub = bin(o.subboxBitmap).count("1")
|
||||
for b in range(numsub):
|
||||
data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b])
|
||||
else:
|
||||
data = ""
|
||||
return data + self.compileAttributes12(attrs, Glat_format_23_entry)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", version=self.version, compressionScheme=self.scheme)
|
||||
writer.newline()
|
||||
for n, a in sorted(
|
||||
self.attributes.items(), key=lambda x: ttFont.getGlyphID(x[0])
|
||||
):
|
||||
writer.begintag("glyph", name=n)
|
||||
writer.newline()
|
||||
if hasattr(a, "octabox"):
|
||||
o = a.octabox
|
||||
formatstring, names, fixes = sstruct.getformat(
|
||||
Glat_format_3_octabox_metrics
|
||||
)
|
||||
vals = {}
|
||||
for k in names:
|
||||
if k == "subboxBitmap":
|
||||
continue
|
||||
vals[k] = "{:.3f}%".format(getattr(o, k) * 100.0 / 255)
|
||||
vals["bitmap"] = "{:0X}".format(o.subboxBitmap)
|
||||
writer.begintag("octaboxes", **vals)
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(
|
||||
Glat_format_3_subbox_entry
|
||||
)
|
||||
for s in o.subboxes:
|
||||
vals = {}
|
||||
for k in names:
|
||||
vals[k] = "{:.3f}%".format(getattr(s, k) * 100.0 / 255)
|
||||
writer.simpletag("octabox", **vals)
|
||||
writer.newline()
|
||||
writer.endtag("octaboxes")
|
||||
writer.newline()
|
||||
for k, v in sorted(a.items()):
|
||||
writer.simpletag("attribute", index=k, value=v)
|
||||
writer.newline()
|
||||
writer.endtag("glyph")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = float(safeEval(attrs["version"]))
|
||||
self.scheme = int(safeEval(attrs["compressionScheme"]))
|
||||
if name != "glyph":
|
||||
return
|
||||
if not hasattr(self, "attributes"):
|
||||
self.attributes = {}
|
||||
gname = attrs["name"]
|
||||
attributes = _Dict()
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
tag, attrs, subcontent = element
|
||||
if tag == "attribute":
|
||||
k = int(safeEval(attrs["index"]))
|
||||
v = int(safeEval(attrs["value"]))
|
||||
attributes[k] = v
|
||||
elif tag == "octaboxes":
|
||||
self.hasOctaboxes = True
|
||||
o = _Object()
|
||||
o.subboxBitmap = int(attrs["bitmap"], 16)
|
||||
o.subboxes = []
|
||||
del attrs["bitmap"]
|
||||
for k, v in attrs.items():
|
||||
setattr(o, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
|
||||
for element in subcontent:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
(tag, attrs, subcontent) = element
|
||||
so = _Object()
|
||||
for k, v in attrs.items():
|
||||
setattr(so, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
|
||||
o.subboxes.append(so)
|
||||
attributes.octabox = o
|
||||
self.attributes[gname] = attributes
|
||||
@@ -0,0 +1,85 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import array
|
||||
import sys
|
||||
|
||||
|
||||
Gloc_header = """
|
||||
> # big endian
|
||||
version: 16.16F # Table version
|
||||
flags: H # bit 0: 1=long format, 0=short format
|
||||
# bit 1: 1=attribute names, 0=no names
|
||||
numAttribs: H # NUmber of attributes
|
||||
"""
|
||||
|
||||
|
||||
class table_G__l_o_c(DefaultTable.DefaultTable):
|
||||
"""Graphite Index to Glyph Atttributes table
|
||||
|
||||
See also https://graphite.sil.org/graphite_techAbout#graphite-font-tables
|
||||
"""
|
||||
|
||||
dependencies = ["Glat"]
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.attribIds = None
|
||||
self.numAttribs = 0
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
_, data = sstruct.unpack2(Gloc_header, data, self)
|
||||
flags = self.flags
|
||||
del self.flags
|
||||
self.locations = array.array("I" if flags & 1 else "H")
|
||||
self.locations.frombytes(data[: len(data) - self.numAttribs * (flags & 2)])
|
||||
if sys.byteorder != "big":
|
||||
self.locations.byteswap()
|
||||
self.attribIds = array.array("H")
|
||||
if flags & 2:
|
||||
self.attribIds.frombytes(data[-self.numAttribs * 2 :])
|
||||
if sys.byteorder != "big":
|
||||
self.attribIds.byteswap()
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(
|
||||
Gloc_header,
|
||||
dict(
|
||||
version=1.0,
|
||||
flags=(bool(self.attribIds) << 1) + (self.locations.typecode == "I"),
|
||||
numAttribs=self.numAttribs,
|
||||
),
|
||||
)
|
||||
if sys.byteorder != "big":
|
||||
self.locations.byteswap()
|
||||
data += self.locations.tobytes()
|
||||
if sys.byteorder != "big":
|
||||
self.locations.byteswap()
|
||||
if self.attribIds:
|
||||
if sys.byteorder != "big":
|
||||
self.attribIds.byteswap()
|
||||
data += self.attribIds.tobytes()
|
||||
if sys.byteorder != "big":
|
||||
self.attribIds.byteswap()
|
||||
return data
|
||||
|
||||
def set(self, locations):
|
||||
long_format = max(locations) >= 65536
|
||||
self.locations = array.array("I" if long_format else "H", locations)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("attributes", number=self.numAttribs)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "attributes":
|
||||
self.numAttribs = int(safeEval(attrs["number"]))
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.locations[index]
|
||||
|
||||
def __len__(self):
|
||||
return len(self.locations)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.locations)
|
||||
@@ -0,0 +1,13 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_H_V_A_R_(BaseTTXConverter):
|
||||
"""Horizontal Metrics Variations table
|
||||
|
||||
The ``HVAR`` table contains variations in horizontal glyph metrics
|
||||
in variable fonts.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/hvar
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,13 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_J_S_T_F_(BaseTTXConverter):
|
||||
"""Justification table
|
||||
|
||||
The ``JSTF`` table contains glyph substitution and positioning
|
||||
data used to perform text justification.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/jstf
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,58 @@
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
import array
|
||||
|
||||
# XXX I've lowered the strictness, to make sure Apple's own Chicago
|
||||
# XXX gets through. They're looking into it, I hope to raise the standards
|
||||
# XXX back to normal eventually.
|
||||
|
||||
|
||||
class table_L_T_S_H_(DefaultTable.DefaultTable):
|
||||
"""Linear Threshold table
|
||||
|
||||
The ``LTSH`` table contains per-glyph settings indicating the ppem sizes
|
||||
at which the advance width metric should be scaled linearly, despite the
|
||||
effects of any TrueType instructions that might otherwise alter the
|
||||
advance width.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/ltsh
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
version, numGlyphs = struct.unpack(">HH", data[:4])
|
||||
data = data[4:]
|
||||
assert version == 0, "unknown version: %s" % version
|
||||
assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length"
|
||||
# ouch: the assertion is not true in Chicago!
|
||||
# assert numGlyphs == ttFont['maxp'].numGlyphs
|
||||
yPels = array.array("B")
|
||||
yPels.frombytes(data)
|
||||
self.yPels = {}
|
||||
for i in range(numGlyphs):
|
||||
self.yPels[ttFont.getGlyphName(i)] = yPels[i]
|
||||
|
||||
def compile(self, ttFont):
|
||||
version = 0
|
||||
names = list(self.yPels.keys())
|
||||
numGlyphs = len(names)
|
||||
yPels = [0] * numGlyphs
|
||||
# ouch: the assertion is not true in Chicago!
|
||||
# assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs
|
||||
for name in names:
|
||||
yPels[ttFont.getGlyphID(name)] = self.yPels[name]
|
||||
yPels = array.array("B", yPels)
|
||||
return struct.pack(">HH", version, numGlyphs) + yPels.tobytes()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
names = sorted(self.yPels.keys())
|
||||
for name in names:
|
||||
writer.simpletag("yPel", name=name, value=self.yPels[name])
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "yPels"):
|
||||
self.yPels = {}
|
||||
if name != "yPel":
|
||||
return # ignore unknown tags
|
||||
self.yPels[attrs["name"]] = safeEval(attrs["value"])
|
||||
@@ -0,0 +1,13 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_M_A_T_H_(BaseTTXConverter):
|
||||
"""Mathematical Typesetting table
|
||||
|
||||
The ``MATH`` table contains a variety of information needed to
|
||||
typeset glyphs in mathematical formulas and expressions.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/math
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,352 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import byteord, safeEval
|
||||
from . import DefaultTable
|
||||
import pdb
|
||||
import struct
|
||||
|
||||
|
||||
METAHeaderFormat = """
|
||||
> # big endian
|
||||
tableVersionMajor: H
|
||||
tableVersionMinor: H
|
||||
metaEntriesVersionMajor: H
|
||||
metaEntriesVersionMinor: H
|
||||
unicodeVersion: L
|
||||
metaFlags: H
|
||||
nMetaRecs: H
|
||||
"""
|
||||
# This record is followed by nMetaRecs of METAGlyphRecordFormat.
|
||||
# This in turn is followd by as many METAStringRecordFormat entries
|
||||
# as specified by the METAGlyphRecordFormat entries
|
||||
# this is followed by the strings specifried in the METAStringRecordFormat
|
||||
METAGlyphRecordFormat = """
|
||||
> # big endian
|
||||
glyphID: H
|
||||
nMetaEntry: H
|
||||
"""
|
||||
# This record is followd by a variable data length field:
|
||||
# USHORT or ULONG hdrOffset
|
||||
# Offset from start of META table to the beginning
|
||||
# of this glyphs array of ns Metadata string entries.
|
||||
# Size determined by metaFlags field
|
||||
# METAGlyphRecordFormat entries must be sorted by glyph ID
|
||||
|
||||
METAStringRecordFormat = """
|
||||
> # big endian
|
||||
labelID: H
|
||||
stringLen: H
|
||||
"""
|
||||
# This record is followd by a variable data length field:
|
||||
# USHORT or ULONG stringOffset
|
||||
# METAStringRecordFormat entries must be sorted in order of labelID
|
||||
# There may be more than one entry with the same labelID
|
||||
# There may be more than one strign with the same content.
|
||||
|
||||
# Strings shall be Unicode UTF-8 encoded, and null-terminated.
|
||||
|
||||
METALabelDict = {
|
||||
0: "MojikumiX4051", # An integer in the range 1-20
|
||||
1: "UNIUnifiedBaseChars",
|
||||
2: "BaseFontName",
|
||||
3: "Language",
|
||||
4: "CreationDate",
|
||||
5: "FoundryName",
|
||||
6: "FoundryCopyright",
|
||||
7: "OwnerURI",
|
||||
8: "WritingScript",
|
||||
10: "StrokeCount",
|
||||
11: "IndexingRadical",
|
||||
}
|
||||
|
||||
|
||||
def getLabelString(labelID):
|
||||
try:
|
||||
label = METALabelDict[labelID]
|
||||
except KeyError:
|
||||
label = "Unknown label"
|
||||
return str(label)
|
||||
|
||||
|
||||
class table_M_E_T_A_(DefaultTable.DefaultTable):
|
||||
"""Glyphlets META table
|
||||
|
||||
The ``META`` table is used by Adobe's SING Glyphlets.
|
||||
|
||||
See also https://web.archive.org/web/20080627183635/http://www.adobe.com/devnet/opentype/gdk/topic.html
|
||||
"""
|
||||
|
||||
dependencies = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self)
|
||||
self.glyphRecords = []
|
||||
for i in range(self.nMetaRecs):
|
||||
glyphRecord, newData = sstruct.unpack2(
|
||||
METAGlyphRecordFormat, newData, GlyphRecord()
|
||||
)
|
||||
if self.metaFlags == 0:
|
||||
[glyphRecord.offset] = struct.unpack(">H", newData[:2])
|
||||
newData = newData[2:]
|
||||
elif self.metaFlags == 1:
|
||||
[glyphRecord.offset] = struct.unpack(">H", newData[:4])
|
||||
newData = newData[4:]
|
||||
else:
|
||||
assert 0, (
|
||||
"The metaFlags field in the META table header has a value other than 0 or 1 :"
|
||||
+ str(self.metaFlags)
|
||||
)
|
||||
glyphRecord.stringRecs = []
|
||||
newData = data[glyphRecord.offset :]
|
||||
for j in range(glyphRecord.nMetaEntry):
|
||||
stringRec, newData = sstruct.unpack2(
|
||||
METAStringRecordFormat, newData, StringRecord()
|
||||
)
|
||||
if self.metaFlags == 0:
|
||||
[stringRec.offset] = struct.unpack(">H", newData[:2])
|
||||
newData = newData[2:]
|
||||
else:
|
||||
[stringRec.offset] = struct.unpack(">H", newData[:4])
|
||||
newData = newData[4:]
|
||||
stringRec.string = data[
|
||||
stringRec.offset : stringRec.offset + stringRec.stringLen
|
||||
]
|
||||
glyphRecord.stringRecs.append(stringRec)
|
||||
self.glyphRecords.append(glyphRecord)
|
||||
|
||||
def compile(self, ttFont):
|
||||
offsetOK = 0
|
||||
self.nMetaRecs = len(self.glyphRecords)
|
||||
count = 0
|
||||
while offsetOK != 1:
|
||||
count = count + 1
|
||||
if count > 4:
|
||||
pdb.set_trace()
|
||||
metaData = sstruct.pack(METAHeaderFormat, self)
|
||||
stringRecsOffset = len(metaData) + self.nMetaRecs * (
|
||||
6 + 2 * (self.metaFlags & 1)
|
||||
)
|
||||
stringRecSize = 6 + 2 * (self.metaFlags & 1)
|
||||
for glyphRec in self.glyphRecords:
|
||||
glyphRec.offset = stringRecsOffset
|
||||
if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0):
|
||||
self.metaFlags = self.metaFlags + 1
|
||||
offsetOK = -1
|
||||
break
|
||||
metaData = metaData + glyphRec.compile(self)
|
||||
stringRecsOffset = stringRecsOffset + (
|
||||
glyphRec.nMetaEntry * stringRecSize
|
||||
)
|
||||
# this will be the String Record offset for the next GlyphRecord.
|
||||
if offsetOK == -1:
|
||||
offsetOK = 0
|
||||
continue
|
||||
|
||||
# metaData now contains the header and all of the GlyphRecords. Its length should bw
|
||||
# the offset to the first StringRecord.
|
||||
stringOffset = stringRecsOffset
|
||||
for glyphRec in self.glyphRecords:
|
||||
assert glyphRec.offset == len(
|
||||
metaData
|
||||
), "Glyph record offset did not compile correctly! for rec:" + str(
|
||||
glyphRec
|
||||
)
|
||||
for stringRec in glyphRec.stringRecs:
|
||||
stringRec.offset = stringOffset
|
||||
if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0):
|
||||
self.metaFlags = self.metaFlags + 1
|
||||
offsetOK = -1
|
||||
break
|
||||
metaData = metaData + stringRec.compile(self)
|
||||
stringOffset = stringOffset + stringRec.stringLen
|
||||
if offsetOK == -1:
|
||||
offsetOK = 0
|
||||
continue
|
||||
|
||||
if ((self.metaFlags & 1) == 1) and (stringOffset < 65536):
|
||||
self.metaFlags = self.metaFlags - 1
|
||||
continue
|
||||
else:
|
||||
offsetOK = 1
|
||||
|
||||
# metaData now contains the header and all of the GlyphRecords and all of the String Records.
|
||||
# Its length should be the offset to the first string datum.
|
||||
for glyphRec in self.glyphRecords:
|
||||
for stringRec in glyphRec.stringRecs:
|
||||
assert stringRec.offset == len(
|
||||
metaData
|
||||
), "String offset did not compile correctly! for string:" + str(
|
||||
stringRec.string
|
||||
)
|
||||
metaData = metaData + stringRec.string
|
||||
|
||||
return metaData
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment(
|
||||
"Lengths and number of entries in this table will be recalculated by the compiler"
|
||||
)
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(METAHeaderFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
for glyphRec in self.glyphRecords:
|
||||
glyphRec.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "GlyphRecord":
|
||||
if not hasattr(self, "glyphRecords"):
|
||||
self.glyphRecords = []
|
||||
glyphRec = GlyphRecord()
|
||||
self.glyphRecords.append(glyphRec)
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
glyphRec.fromXML(name, attrs, content, ttFont)
|
||||
glyphRec.offset = -1
|
||||
glyphRec.nMetaEntry = len(glyphRec.stringRecs)
|
||||
else:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
|
||||
class GlyphRecord(object):
|
||||
def __init__(self):
|
||||
self.glyphID = -1
|
||||
self.nMetaEntry = -1
|
||||
self.offset = -1
|
||||
self.stringRecs = []
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("GlyphRecord")
|
||||
writer.newline()
|
||||
writer.simpletag("glyphID", value=self.glyphID)
|
||||
writer.newline()
|
||||
writer.simpletag("nMetaEntry", value=self.nMetaEntry)
|
||||
writer.newline()
|
||||
for stringRec in self.stringRecs:
|
||||
stringRec.toXML(writer, ttFont)
|
||||
writer.endtag("GlyphRecord")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "StringRecord":
|
||||
stringRec = StringRecord()
|
||||
self.stringRecs.append(stringRec)
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
stringRec.fromXML(name, attrs, content, ttFont)
|
||||
stringRec.stringLen = len(stringRec.string)
|
||||
else:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
def compile(self, parentTable):
|
||||
data = sstruct.pack(METAGlyphRecordFormat, self)
|
||||
if parentTable.metaFlags == 0:
|
||||
datum = struct.pack(">H", self.offset)
|
||||
elif parentTable.metaFlags == 1:
|
||||
datum = struct.pack(">L", self.offset)
|
||||
data = data + datum
|
||||
return data
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"GlyphRecord[ glyphID: "
|
||||
+ str(self.glyphID)
|
||||
+ ", nMetaEntry: "
|
||||
+ str(self.nMetaEntry)
|
||||
+ ", offset: "
|
||||
+ str(self.offset)
|
||||
+ " ]"
|
||||
)
|
||||
|
||||
|
||||
# XXX The following two functions are really broken around UTF-8 vs Unicode
|
||||
|
||||
|
||||
def mapXMLToUTF8(string):
|
||||
uString = str()
|
||||
strLen = len(string)
|
||||
i = 0
|
||||
while i < strLen:
|
||||
prefixLen = 0
|
||||
if string[i : i + 3] == "&#x":
|
||||
prefixLen = 3
|
||||
elif string[i : i + 7] == "&#x":
|
||||
prefixLen = 7
|
||||
if prefixLen:
|
||||
i = i + prefixLen
|
||||
j = i
|
||||
while string[i] != ";":
|
||||
i = i + 1
|
||||
valStr = string[j:i]
|
||||
|
||||
uString = uString + chr(eval("0x" + valStr))
|
||||
else:
|
||||
uString = uString + chr(byteord(string[i]))
|
||||
i = i + 1
|
||||
|
||||
return uString.encode("utf_8")
|
||||
|
||||
|
||||
def mapUTF8toXML(string):
|
||||
uString = string.decode("utf_8")
|
||||
string = ""
|
||||
for uChar in uString:
|
||||
i = ord(uChar)
|
||||
if (i < 0x80) and (i > 0x1F):
|
||||
string = string + uChar
|
||||
else:
|
||||
string = string + "&#x" + hex(i)[2:] + ";"
|
||||
return string
|
||||
|
||||
|
||||
class StringRecord(object):
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("StringRecord")
|
||||
writer.newline()
|
||||
writer.simpletag("labelID", value=self.labelID)
|
||||
writer.comment(getLabelString(self.labelID))
|
||||
writer.newline()
|
||||
writer.newline()
|
||||
writer.simpletag("string", value=mapUTF8toXML(self.string))
|
||||
writer.newline()
|
||||
writer.endtag("StringRecord")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
value = attrs["value"]
|
||||
if name == "string":
|
||||
self.string = mapXMLToUTF8(value)
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
|
||||
def compile(self, parentTable):
|
||||
data = sstruct.pack(METAStringRecordFormat, self)
|
||||
if parentTable.metaFlags == 0:
|
||||
datum = struct.pack(">H", self.offset)
|
||||
elif parentTable.metaFlags == 1:
|
||||
datum = struct.pack(">L", self.offset)
|
||||
data = data + datum
|
||||
return data
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"StringRecord [ labelID: "
|
||||
+ str(self.labelID)
|
||||
+ " aka "
|
||||
+ getLabelString(self.labelID)
|
||||
+ ", offset: "
|
||||
+ str(self.offset)
|
||||
+ ", length: "
|
||||
+ str(self.stringLen)
|
||||
+ ", string: "
|
||||
+ self.string
|
||||
+ " ]"
|
||||
)
|
||||
@@ -0,0 +1,13 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_M_V_A_R_(BaseTTXConverter):
|
||||
"""Metrics Variations table
|
||||
|
||||
The ``MVAR`` table contains variation information for font-wide
|
||||
metrics in a variable font.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/mvar
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,752 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools.misc.textTools import safeEval, num2binary, binary2num
|
||||
from fontTools.ttLib.tables import DefaultTable
|
||||
import bisect
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# panose classification
|
||||
|
||||
panoseFormat = """
|
||||
bFamilyType: B
|
||||
bSerifStyle: B
|
||||
bWeight: B
|
||||
bProportion: B
|
||||
bContrast: B
|
||||
bStrokeVariation: B
|
||||
bArmStyle: B
|
||||
bLetterForm: B
|
||||
bMidline: B
|
||||
bXHeight: B
|
||||
"""
|
||||
|
||||
|
||||
class Panose(object):
|
||||
def __init__(self, **kwargs):
|
||||
_, names, _ = sstruct.getformat(panoseFormat)
|
||||
for name in names:
|
||||
setattr(self, name, kwargs.pop(name, 0))
|
||||
for k in kwargs:
|
||||
raise TypeError(f"Panose() got an unexpected keyword argument {k!r}")
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(panoseFormat)
|
||||
for name in names:
|
||||
writer.simpletag(name, value=getattr(self, name))
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
|
||||
# 'sfnt' OS/2 and Windows Metrics table - 'OS/2'
|
||||
|
||||
OS2_format_0 = """
|
||||
> # big endian
|
||||
version: H # version
|
||||
xAvgCharWidth: h # average character width
|
||||
usWeightClass: H # degree of thickness of strokes
|
||||
usWidthClass: H # aspect ratio
|
||||
fsType: H # type flags
|
||||
ySubscriptXSize: h # subscript horizontal font size
|
||||
ySubscriptYSize: h # subscript vertical font size
|
||||
ySubscriptXOffset: h # subscript x offset
|
||||
ySubscriptYOffset: h # subscript y offset
|
||||
ySuperscriptXSize: h # superscript horizontal font size
|
||||
ySuperscriptYSize: h # superscript vertical font size
|
||||
ySuperscriptXOffset: h # superscript x offset
|
||||
ySuperscriptYOffset: h # superscript y offset
|
||||
yStrikeoutSize: h # strikeout size
|
||||
yStrikeoutPosition: h # strikeout position
|
||||
sFamilyClass: h # font family class and subclass
|
||||
panose: 10s # panose classification number
|
||||
ulUnicodeRange1: L # character range
|
||||
ulUnicodeRange2: L # character range
|
||||
ulUnicodeRange3: L # character range
|
||||
ulUnicodeRange4: L # character range
|
||||
achVendID: 4s # font vendor identification
|
||||
fsSelection: H # font selection flags
|
||||
usFirstCharIndex: H # first unicode character index
|
||||
usLastCharIndex: H # last unicode character index
|
||||
sTypoAscender: h # typographic ascender
|
||||
sTypoDescender: h # typographic descender
|
||||
sTypoLineGap: h # typographic line gap
|
||||
usWinAscent: H # Windows ascender
|
||||
usWinDescent: H # Windows descender
|
||||
"""
|
||||
|
||||
OS2_format_1_addition = """
|
||||
ulCodePageRange1: L
|
||||
ulCodePageRange2: L
|
||||
"""
|
||||
|
||||
OS2_format_2_addition = (
|
||||
OS2_format_1_addition
|
||||
+ """
|
||||
sxHeight: h
|
||||
sCapHeight: h
|
||||
usDefaultChar: H
|
||||
usBreakChar: H
|
||||
usMaxContext: H
|
||||
"""
|
||||
)
|
||||
|
||||
OS2_format_5_addition = (
|
||||
OS2_format_2_addition
|
||||
+ """
|
||||
usLowerOpticalPointSize: H
|
||||
usUpperOpticalPointSize: H
|
||||
"""
|
||||
)
|
||||
|
||||
bigendian = " > # big endian\n"
|
||||
|
||||
OS2_format_1 = OS2_format_0 + OS2_format_1_addition
|
||||
OS2_format_2 = OS2_format_0 + OS2_format_2_addition
|
||||
OS2_format_5 = OS2_format_0 + OS2_format_5_addition
|
||||
OS2_format_1_addition = bigendian + OS2_format_1_addition
|
||||
OS2_format_2_addition = bigendian + OS2_format_2_addition
|
||||
OS2_format_5_addition = bigendian + OS2_format_5_addition
|
||||
|
||||
|
||||
class table_O_S_2f_2(DefaultTable.DefaultTable):
|
||||
"""OS/2 and Windows Metrics table
|
||||
|
||||
The ``OS/2`` table contains a variety of font-wide metrics and
|
||||
parameters that may be useful to an operating system or other
|
||||
software for system-integration purposes.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/os2
|
||||
"""
|
||||
|
||||
dependencies = ["head"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, data = sstruct.unpack2(OS2_format_0, data, self)
|
||||
|
||||
if self.version == 1:
|
||||
dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self)
|
||||
elif self.version in (2, 3, 4):
|
||||
dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self)
|
||||
elif self.version == 5:
|
||||
dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self)
|
||||
self.usLowerOpticalPointSize /= 20
|
||||
self.usUpperOpticalPointSize /= 20
|
||||
elif self.version != 0:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError(
|
||||
"unknown format for OS/2 table: version %s" % self.version
|
||||
)
|
||||
if len(data):
|
||||
log.warning("too much 'OS/2' table data")
|
||||
|
||||
self.panose = sstruct.unpack(panoseFormat, self.panose, Panose())
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.updateFirstAndLastCharIndex(ttFont)
|
||||
panose = self.panose
|
||||
head = ttFont["head"]
|
||||
if (self.fsSelection & 1) and not (head.macStyle & 1 << 1):
|
||||
log.warning(
|
||||
"fsSelection bit 0 (italic) and "
|
||||
"head table macStyle bit 1 (italic) should match"
|
||||
)
|
||||
if (self.fsSelection & 1 << 5) and not (head.macStyle & 1):
|
||||
log.warning(
|
||||
"fsSelection bit 5 (bold) and "
|
||||
"head table macStyle bit 0 (bold) should match"
|
||||
)
|
||||
if (self.fsSelection & 1 << 6) and (self.fsSelection & 1 + (1 << 5)):
|
||||
log.warning(
|
||||
"fsSelection bit 6 (regular) is set, "
|
||||
"bits 0 (italic) and 5 (bold) must be clear"
|
||||
)
|
||||
if self.version < 4 and self.fsSelection & 0b1110000000:
|
||||
log.warning(
|
||||
"fsSelection bits 7, 8 and 9 are only defined in "
|
||||
"OS/2 table version 4 and up: version %s",
|
||||
self.version,
|
||||
)
|
||||
self.panose = sstruct.pack(panoseFormat, self.panose)
|
||||
if self.version == 0:
|
||||
data = sstruct.pack(OS2_format_0, self)
|
||||
elif self.version == 1:
|
||||
data = sstruct.pack(OS2_format_1, self)
|
||||
elif self.version in (2, 3, 4):
|
||||
data = sstruct.pack(OS2_format_2, self)
|
||||
elif self.version == 5:
|
||||
d = self.__dict__.copy()
|
||||
d["usLowerOpticalPointSize"] = round(self.usLowerOpticalPointSize * 20)
|
||||
d["usUpperOpticalPointSize"] = round(self.usUpperOpticalPointSize * 20)
|
||||
data = sstruct.pack(OS2_format_5, d)
|
||||
else:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError(
|
||||
"unknown format for OS/2 table: version %s" % self.version
|
||||
)
|
||||
self.panose = panose
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment(
|
||||
"The fields 'usFirstCharIndex' and 'usLastCharIndex'\n"
|
||||
"will be recalculated by the compiler"
|
||||
)
|
||||
writer.newline()
|
||||
if self.version == 1:
|
||||
format = OS2_format_1
|
||||
elif self.version in (2, 3, 4):
|
||||
format = OS2_format_2
|
||||
elif self.version == 5:
|
||||
format = OS2_format_5
|
||||
else:
|
||||
format = OS2_format_0
|
||||
formatstring, names, fixes = sstruct.getformat(format)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name == "panose":
|
||||
writer.begintag("panose")
|
||||
writer.newline()
|
||||
value.toXML(writer, ttFont)
|
||||
writer.endtag("panose")
|
||||
elif name in (
|
||||
"ulUnicodeRange1",
|
||||
"ulUnicodeRange2",
|
||||
"ulUnicodeRange3",
|
||||
"ulUnicodeRange4",
|
||||
"ulCodePageRange1",
|
||||
"ulCodePageRange2",
|
||||
):
|
||||
writer.simpletag(name, value=num2binary(value))
|
||||
elif name in ("fsType", "fsSelection"):
|
||||
writer.simpletag(name, value=num2binary(value, 16))
|
||||
elif name == "achVendID":
|
||||
writer.simpletag(name, value=repr(value)[1:-1])
|
||||
else:
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "panose":
|
||||
self.panose = panose = Panose()
|
||||
for element in content:
|
||||
if isinstance(element, tuple):
|
||||
name, attrs, content = element
|
||||
panose.fromXML(name, attrs, content, ttFont)
|
||||
elif name in (
|
||||
"ulUnicodeRange1",
|
||||
"ulUnicodeRange2",
|
||||
"ulUnicodeRange3",
|
||||
"ulUnicodeRange4",
|
||||
"ulCodePageRange1",
|
||||
"ulCodePageRange2",
|
||||
"fsType",
|
||||
"fsSelection",
|
||||
):
|
||||
setattr(self, name, binary2num(attrs["value"]))
|
||||
elif name == "achVendID":
|
||||
setattr(self, name, safeEval("'''" + attrs["value"] + "'''"))
|
||||
else:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
def updateFirstAndLastCharIndex(self, ttFont):
|
||||
if "cmap" not in ttFont:
|
||||
return
|
||||
codes = set()
|
||||
for table in getattr(ttFont["cmap"], "tables", []):
|
||||
if table.isUnicode():
|
||||
codes.update(table.cmap.keys())
|
||||
if codes:
|
||||
minCode = min(codes)
|
||||
maxCode = max(codes)
|
||||
# USHORT cannot hold codepoints greater than 0xFFFF
|
||||
self.usFirstCharIndex = min(0xFFFF, minCode)
|
||||
self.usLastCharIndex = min(0xFFFF, maxCode)
|
||||
|
||||
# misspelled attributes kept for legacy reasons
|
||||
|
||||
@property
|
||||
def usMaxContex(self):
|
||||
return self.usMaxContext
|
||||
|
||||
@usMaxContex.setter
|
||||
def usMaxContex(self, value):
|
||||
self.usMaxContext = value
|
||||
|
||||
@property
|
||||
def fsFirstCharIndex(self):
|
||||
return self.usFirstCharIndex
|
||||
|
||||
@fsFirstCharIndex.setter
|
||||
def fsFirstCharIndex(self, value):
|
||||
self.usFirstCharIndex = value
|
||||
|
||||
@property
|
||||
def fsLastCharIndex(self):
|
||||
return self.usLastCharIndex
|
||||
|
||||
@fsLastCharIndex.setter
|
||||
def fsLastCharIndex(self, value):
|
||||
self.usLastCharIndex = value
|
||||
|
||||
def getUnicodeRanges(self):
|
||||
"""Return the set of 'ulUnicodeRange*' bits currently enabled."""
|
||||
bits = set()
|
||||
ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2
|
||||
ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4
|
||||
for i in range(32):
|
||||
if ul1 & (1 << i):
|
||||
bits.add(i)
|
||||
if ul2 & (1 << i):
|
||||
bits.add(i + 32)
|
||||
if ul3 & (1 << i):
|
||||
bits.add(i + 64)
|
||||
if ul4 & (1 << i):
|
||||
bits.add(i + 96)
|
||||
return bits
|
||||
|
||||
def setUnicodeRanges(self, bits):
|
||||
"""Set the 'ulUnicodeRange*' fields to the specified 'bits'."""
|
||||
ul1, ul2, ul3, ul4 = 0, 0, 0, 0
|
||||
for bit in bits:
|
||||
if 0 <= bit < 32:
|
||||
ul1 |= 1 << bit
|
||||
elif 32 <= bit < 64:
|
||||
ul2 |= 1 << (bit - 32)
|
||||
elif 64 <= bit < 96:
|
||||
ul3 |= 1 << (bit - 64)
|
||||
elif 96 <= bit < 123:
|
||||
ul4 |= 1 << (bit - 96)
|
||||
else:
|
||||
raise ValueError("expected 0 <= int <= 122, found: %r" % bit)
|
||||
self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2
|
||||
self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4
|
||||
|
||||
def recalcUnicodeRanges(self, ttFont, pruneOnly=False):
|
||||
"""Intersect the codepoints in the font's Unicode cmap subtables with
|
||||
the Unicode block ranges defined in the OpenType specification (v1.7),
|
||||
and set the respective 'ulUnicodeRange*' bits if there is at least ONE
|
||||
intersection.
|
||||
If 'pruneOnly' is True, only clear unused bits with NO intersection.
|
||||
"""
|
||||
unicodes = set()
|
||||
for table in ttFont["cmap"].tables:
|
||||
if table.isUnicode():
|
||||
unicodes.update(table.cmap.keys())
|
||||
if pruneOnly:
|
||||
empty = intersectUnicodeRanges(unicodes, inverse=True)
|
||||
bits = self.getUnicodeRanges() - empty
|
||||
else:
|
||||
bits = intersectUnicodeRanges(unicodes)
|
||||
self.setUnicodeRanges(bits)
|
||||
return bits
|
||||
|
||||
def getCodePageRanges(self):
|
||||
"""Return the set of 'ulCodePageRange*' bits currently enabled."""
|
||||
bits = set()
|
||||
if self.version < 1:
|
||||
return bits
|
||||
ul1, ul2 = self.ulCodePageRange1, self.ulCodePageRange2
|
||||
for i in range(32):
|
||||
if ul1 & (1 << i):
|
||||
bits.add(i)
|
||||
if ul2 & (1 << i):
|
||||
bits.add(i + 32)
|
||||
return bits
|
||||
|
||||
def setCodePageRanges(self, bits):
|
||||
"""Set the 'ulCodePageRange*' fields to the specified 'bits'."""
|
||||
ul1, ul2 = 0, 0
|
||||
for bit in bits:
|
||||
if 0 <= bit < 32:
|
||||
ul1 |= 1 << bit
|
||||
elif 32 <= bit < 64:
|
||||
ul2 |= 1 << (bit - 32)
|
||||
else:
|
||||
raise ValueError(f"expected 0 <= int <= 63, found: {bit:r}")
|
||||
if self.version < 1:
|
||||
self.version = 1
|
||||
self.ulCodePageRange1, self.ulCodePageRange2 = ul1, ul2
|
||||
|
||||
def recalcCodePageRanges(self, ttFont, pruneOnly=False):
|
||||
unicodes = set()
|
||||
for table in ttFont["cmap"].tables:
|
||||
if table.isUnicode():
|
||||
unicodes.update(table.cmap.keys())
|
||||
bits = calcCodePageRanges(unicodes)
|
||||
if pruneOnly:
|
||||
bits &= self.getCodePageRanges()
|
||||
# when no codepage ranges can be enabled, fall back to enabling bit 0
|
||||
# (Latin 1) so that the font works in MS Word:
|
||||
# https://github.com/googlei18n/fontmake/issues/468
|
||||
if not bits:
|
||||
bits = {0}
|
||||
self.setCodePageRanges(bits)
|
||||
return bits
|
||||
|
||||
def recalcAvgCharWidth(self, ttFont):
|
||||
"""Recalculate xAvgCharWidth using metrics from ttFont's 'hmtx' table.
|
||||
|
||||
Set it to 0 if the unlikely event 'hmtx' table is not found.
|
||||
"""
|
||||
avg_width = 0
|
||||
hmtx = ttFont.get("hmtx")
|
||||
if hmtx is not None:
|
||||
widths = [width for width, _ in hmtx.metrics.values() if width > 0]
|
||||
if widths:
|
||||
avg_width = otRound(sum(widths) / len(widths))
|
||||
self.xAvgCharWidth = avg_width
|
||||
return avg_width
|
||||
|
||||
|
||||
# Unicode ranges data from the OpenType OS/2 table specification v1.7
|
||||
|
||||
OS2_UNICODE_RANGES = (
|
||||
(("Basic Latin", (0x0000, 0x007F)),),
|
||||
(("Latin-1 Supplement", (0x0080, 0x00FF)),),
|
||||
(("Latin Extended-A", (0x0100, 0x017F)),),
|
||||
(("Latin Extended-B", (0x0180, 0x024F)),),
|
||||
(
|
||||
("IPA Extensions", (0x0250, 0x02AF)),
|
||||
("Phonetic Extensions", (0x1D00, 0x1D7F)),
|
||||
("Phonetic Extensions Supplement", (0x1D80, 0x1DBF)),
|
||||
),
|
||||
(
|
||||
("Spacing Modifier Letters", (0x02B0, 0x02FF)),
|
||||
("Modifier Tone Letters", (0xA700, 0xA71F)),
|
||||
),
|
||||
(
|
||||
("Combining Diacritical Marks", (0x0300, 0x036F)),
|
||||
("Combining Diacritical Marks Supplement", (0x1DC0, 0x1DFF)),
|
||||
),
|
||||
(("Greek and Coptic", (0x0370, 0x03FF)),),
|
||||
(("Coptic", (0x2C80, 0x2CFF)),),
|
||||
(
|
||||
("Cyrillic", (0x0400, 0x04FF)),
|
||||
("Cyrillic Supplement", (0x0500, 0x052F)),
|
||||
("Cyrillic Extended-A", (0x2DE0, 0x2DFF)),
|
||||
("Cyrillic Extended-B", (0xA640, 0xA69F)),
|
||||
),
|
||||
(("Armenian", (0x0530, 0x058F)),),
|
||||
(("Hebrew", (0x0590, 0x05FF)),),
|
||||
(("Vai", (0xA500, 0xA63F)),),
|
||||
(("Arabic", (0x0600, 0x06FF)), ("Arabic Supplement", (0x0750, 0x077F))),
|
||||
(("NKo", (0x07C0, 0x07FF)),),
|
||||
(("Devanagari", (0x0900, 0x097F)),),
|
||||
(("Bengali", (0x0980, 0x09FF)),),
|
||||
(("Gurmukhi", (0x0A00, 0x0A7F)),),
|
||||
(("Gujarati", (0x0A80, 0x0AFF)),),
|
||||
(("Oriya", (0x0B00, 0x0B7F)),),
|
||||
(("Tamil", (0x0B80, 0x0BFF)),),
|
||||
(("Telugu", (0x0C00, 0x0C7F)),),
|
||||
(("Kannada", (0x0C80, 0x0CFF)),),
|
||||
(("Malayalam", (0x0D00, 0x0D7F)),),
|
||||
(("Thai", (0x0E00, 0x0E7F)),),
|
||||
(("Lao", (0x0E80, 0x0EFF)),),
|
||||
(("Georgian", (0x10A0, 0x10FF)), ("Georgian Supplement", (0x2D00, 0x2D2F))),
|
||||
(("Balinese", (0x1B00, 0x1B7F)),),
|
||||
(("Hangul Jamo", (0x1100, 0x11FF)),),
|
||||
(
|
||||
("Latin Extended Additional", (0x1E00, 0x1EFF)),
|
||||
("Latin Extended-C", (0x2C60, 0x2C7F)),
|
||||
("Latin Extended-D", (0xA720, 0xA7FF)),
|
||||
),
|
||||
(("Greek Extended", (0x1F00, 0x1FFF)),),
|
||||
(
|
||||
("General Punctuation", (0x2000, 0x206F)),
|
||||
("Supplemental Punctuation", (0x2E00, 0x2E7F)),
|
||||
),
|
||||
(("Superscripts And Subscripts", (0x2070, 0x209F)),),
|
||||
(("Currency Symbols", (0x20A0, 0x20CF)),),
|
||||
(("Combining Diacritical Marks For Symbols", (0x20D0, 0x20FF)),),
|
||||
(("Letterlike Symbols", (0x2100, 0x214F)),),
|
||||
(("Number Forms", (0x2150, 0x218F)),),
|
||||
(
|
||||
("Arrows", (0x2190, 0x21FF)),
|
||||
("Supplemental Arrows-A", (0x27F0, 0x27FF)),
|
||||
("Supplemental Arrows-B", (0x2900, 0x297F)),
|
||||
("Miscellaneous Symbols and Arrows", (0x2B00, 0x2BFF)),
|
||||
),
|
||||
(
|
||||
("Mathematical Operators", (0x2200, 0x22FF)),
|
||||
("Supplemental Mathematical Operators", (0x2A00, 0x2AFF)),
|
||||
("Miscellaneous Mathematical Symbols-A", (0x27C0, 0x27EF)),
|
||||
("Miscellaneous Mathematical Symbols-B", (0x2980, 0x29FF)),
|
||||
),
|
||||
(("Miscellaneous Technical", (0x2300, 0x23FF)),),
|
||||
(("Control Pictures", (0x2400, 0x243F)),),
|
||||
(("Optical Character Recognition", (0x2440, 0x245F)),),
|
||||
(("Enclosed Alphanumerics", (0x2460, 0x24FF)),),
|
||||
(("Box Drawing", (0x2500, 0x257F)),),
|
||||
(("Block Elements", (0x2580, 0x259F)),),
|
||||
(("Geometric Shapes", (0x25A0, 0x25FF)),),
|
||||
(("Miscellaneous Symbols", (0x2600, 0x26FF)),),
|
||||
(("Dingbats", (0x2700, 0x27BF)),),
|
||||
(("CJK Symbols And Punctuation", (0x3000, 0x303F)),),
|
||||
(("Hiragana", (0x3040, 0x309F)),),
|
||||
(
|
||||
("Katakana", (0x30A0, 0x30FF)),
|
||||
("Katakana Phonetic Extensions", (0x31F0, 0x31FF)),
|
||||
),
|
||||
(("Bopomofo", (0x3100, 0x312F)), ("Bopomofo Extended", (0x31A0, 0x31BF))),
|
||||
(("Hangul Compatibility Jamo", (0x3130, 0x318F)),),
|
||||
(("Phags-pa", (0xA840, 0xA87F)),),
|
||||
(("Enclosed CJK Letters And Months", (0x3200, 0x32FF)),),
|
||||
(("CJK Compatibility", (0x3300, 0x33FF)),),
|
||||
(("Hangul Syllables", (0xAC00, 0xD7AF)),),
|
||||
(("Non-Plane 0 *", (0xD800, 0xDFFF)),),
|
||||
(("Phoenician", (0x10900, 0x1091F)),),
|
||||
(
|
||||
("CJK Unified Ideographs", (0x4E00, 0x9FFF)),
|
||||
("CJK Radicals Supplement", (0x2E80, 0x2EFF)),
|
||||
("Kangxi Radicals", (0x2F00, 0x2FDF)),
|
||||
("Ideographic Description Characters", (0x2FF0, 0x2FFF)),
|
||||
("CJK Unified Ideographs Extension A", (0x3400, 0x4DBF)),
|
||||
("CJK Unified Ideographs Extension B", (0x20000, 0x2A6DF)),
|
||||
("Kanbun", (0x3190, 0x319F)),
|
||||
),
|
||||
(("Private Use Area (plane 0)", (0xE000, 0xF8FF)),),
|
||||
(
|
||||
("CJK Strokes", (0x31C0, 0x31EF)),
|
||||
("CJK Compatibility Ideographs", (0xF900, 0xFAFF)),
|
||||
("CJK Compatibility Ideographs Supplement", (0x2F800, 0x2FA1F)),
|
||||
),
|
||||
(("Alphabetic Presentation Forms", (0xFB00, 0xFB4F)),),
|
||||
(("Arabic Presentation Forms-A", (0xFB50, 0xFDFF)),),
|
||||
(("Combining Half Marks", (0xFE20, 0xFE2F)),),
|
||||
(
|
||||
("Vertical Forms", (0xFE10, 0xFE1F)),
|
||||
("CJK Compatibility Forms", (0xFE30, 0xFE4F)),
|
||||
),
|
||||
(("Small Form Variants", (0xFE50, 0xFE6F)),),
|
||||
(("Arabic Presentation Forms-B", (0xFE70, 0xFEFF)),),
|
||||
(("Halfwidth And Fullwidth Forms", (0xFF00, 0xFFEF)),),
|
||||
(("Specials", (0xFFF0, 0xFFFF)),),
|
||||
(("Tibetan", (0x0F00, 0x0FFF)),),
|
||||
(("Syriac", (0x0700, 0x074F)),),
|
||||
(("Thaana", (0x0780, 0x07BF)),),
|
||||
(("Sinhala", (0x0D80, 0x0DFF)),),
|
||||
(("Myanmar", (0x1000, 0x109F)),),
|
||||
(
|
||||
("Ethiopic", (0x1200, 0x137F)),
|
||||
("Ethiopic Supplement", (0x1380, 0x139F)),
|
||||
("Ethiopic Extended", (0x2D80, 0x2DDF)),
|
||||
),
|
||||
(("Cherokee", (0x13A0, 0x13FF)),),
|
||||
(("Unified Canadian Aboriginal Syllabics", (0x1400, 0x167F)),),
|
||||
(("Ogham", (0x1680, 0x169F)),),
|
||||
(("Runic", (0x16A0, 0x16FF)),),
|
||||
(("Khmer", (0x1780, 0x17FF)), ("Khmer Symbols", (0x19E0, 0x19FF))),
|
||||
(("Mongolian", (0x1800, 0x18AF)),),
|
||||
(("Braille Patterns", (0x2800, 0x28FF)),),
|
||||
(("Yi Syllables", (0xA000, 0xA48F)), ("Yi Radicals", (0xA490, 0xA4CF))),
|
||||
(
|
||||
("Tagalog", (0x1700, 0x171F)),
|
||||
("Hanunoo", (0x1720, 0x173F)),
|
||||
("Buhid", (0x1740, 0x175F)),
|
||||
("Tagbanwa", (0x1760, 0x177F)),
|
||||
),
|
||||
(("Old Italic", (0x10300, 0x1032F)),),
|
||||
(("Gothic", (0x10330, 0x1034F)),),
|
||||
(("Deseret", (0x10400, 0x1044F)),),
|
||||
(
|
||||
("Byzantine Musical Symbols", (0x1D000, 0x1D0FF)),
|
||||
("Musical Symbols", (0x1D100, 0x1D1FF)),
|
||||
("Ancient Greek Musical Notation", (0x1D200, 0x1D24F)),
|
||||
),
|
||||
(("Mathematical Alphanumeric Symbols", (0x1D400, 0x1D7FF)),),
|
||||
(
|
||||
("Private Use (plane 15)", (0xF0000, 0xFFFFD)),
|
||||
("Private Use (plane 16)", (0x100000, 0x10FFFD)),
|
||||
),
|
||||
(
|
||||
("Variation Selectors", (0xFE00, 0xFE0F)),
|
||||
("Variation Selectors Supplement", (0xE0100, 0xE01EF)),
|
||||
),
|
||||
(("Tags", (0xE0000, 0xE007F)),),
|
||||
(("Limbu", (0x1900, 0x194F)),),
|
||||
(("Tai Le", (0x1950, 0x197F)),),
|
||||
(("New Tai Lue", (0x1980, 0x19DF)),),
|
||||
(("Buginese", (0x1A00, 0x1A1F)),),
|
||||
(("Glagolitic", (0x2C00, 0x2C5F)),),
|
||||
(("Tifinagh", (0x2D30, 0x2D7F)),),
|
||||
(("Yijing Hexagram Symbols", (0x4DC0, 0x4DFF)),),
|
||||
(("Syloti Nagri", (0xA800, 0xA82F)),),
|
||||
(
|
||||
("Linear B Syllabary", (0x10000, 0x1007F)),
|
||||
("Linear B Ideograms", (0x10080, 0x100FF)),
|
||||
("Aegean Numbers", (0x10100, 0x1013F)),
|
||||
),
|
||||
(("Ancient Greek Numbers", (0x10140, 0x1018F)),),
|
||||
(("Ugaritic", (0x10380, 0x1039F)),),
|
||||
(("Old Persian", (0x103A0, 0x103DF)),),
|
||||
(("Shavian", (0x10450, 0x1047F)),),
|
||||
(("Osmanya", (0x10480, 0x104AF)),),
|
||||
(("Cypriot Syllabary", (0x10800, 0x1083F)),),
|
||||
(("Kharoshthi", (0x10A00, 0x10A5F)),),
|
||||
(("Tai Xuan Jing Symbols", (0x1D300, 0x1D35F)),),
|
||||
(
|
||||
("Cuneiform", (0x12000, 0x123FF)),
|
||||
("Cuneiform Numbers and Punctuation", (0x12400, 0x1247F)),
|
||||
),
|
||||
(("Counting Rod Numerals", (0x1D360, 0x1D37F)),),
|
||||
(("Sundanese", (0x1B80, 0x1BBF)),),
|
||||
(("Lepcha", (0x1C00, 0x1C4F)),),
|
||||
(("Ol Chiki", (0x1C50, 0x1C7F)),),
|
||||
(("Saurashtra", (0xA880, 0xA8DF)),),
|
||||
(("Kayah Li", (0xA900, 0xA92F)),),
|
||||
(("Rejang", (0xA930, 0xA95F)),),
|
||||
(("Cham", (0xAA00, 0xAA5F)),),
|
||||
(("Ancient Symbols", (0x10190, 0x101CF)),),
|
||||
(("Phaistos Disc", (0x101D0, 0x101FF)),),
|
||||
(
|
||||
("Carian", (0x102A0, 0x102DF)),
|
||||
("Lycian", (0x10280, 0x1029F)),
|
||||
("Lydian", (0x10920, 0x1093F)),
|
||||
),
|
||||
(("Domino Tiles", (0x1F030, 0x1F09F)), ("Mahjong Tiles", (0x1F000, 0x1F02F))),
|
||||
)
|
||||
|
||||
|
||||
_unicodeStarts = []
|
||||
_unicodeValues = [None]
|
||||
|
||||
|
||||
def _getUnicodeRanges():
|
||||
# build the ranges of codepoints for each unicode range bit, and cache result
|
||||
if not _unicodeStarts:
|
||||
unicodeRanges = [
|
||||
(start, (stop, bit))
|
||||
for bit, blocks in enumerate(OS2_UNICODE_RANGES)
|
||||
for _, (start, stop) in blocks
|
||||
]
|
||||
for start, (stop, bit) in sorted(unicodeRanges):
|
||||
_unicodeStarts.append(start)
|
||||
_unicodeValues.append((stop, bit))
|
||||
return _unicodeStarts, _unicodeValues
|
||||
|
||||
|
||||
def intersectUnicodeRanges(unicodes, inverse=False):
|
||||
"""Intersect a sequence of (int) Unicode codepoints with the Unicode block
|
||||
ranges defined in the OpenType specification v1.7, and return the set of
|
||||
'ulUnicodeRanges' bits for which there is at least ONE intersection.
|
||||
If 'inverse' is True, return the the bits for which there is NO intersection.
|
||||
|
||||
>>> intersectUnicodeRanges([0x0410]) == {9}
|
||||
True
|
||||
>>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122}
|
||||
True
|
||||
>>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == (
|
||||
... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122})
|
||||
True
|
||||
"""
|
||||
unicodes = set(unicodes)
|
||||
unicodestarts, unicodevalues = _getUnicodeRanges()
|
||||
bits = set()
|
||||
for code in unicodes:
|
||||
stop, bit = unicodevalues[bisect.bisect(unicodestarts, code)]
|
||||
if code <= stop:
|
||||
bits.add(bit)
|
||||
# The spec says that bit 57 ("Non Plane 0") implies that there's
|
||||
# at least one codepoint beyond the BMP; so I also include all
|
||||
# the non-BMP codepoints here
|
||||
if any(0x10000 <= code < 0x110000 for code in unicodes):
|
||||
bits.add(57)
|
||||
return set(range(len(OS2_UNICODE_RANGES))) - bits if inverse else bits
|
||||
|
||||
|
||||
def calcCodePageRanges(unicodes):
|
||||
"""Given a set of Unicode codepoints (integers), calculate the
|
||||
corresponding OS/2 CodePage range bits.
|
||||
This is a direct translation of FontForge implementation:
|
||||
https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158
|
||||
"""
|
||||
bits = set()
|
||||
hasAscii = set(range(0x20, 0x7E)).issubset(unicodes)
|
||||
hasLineart = ord("┤") in unicodes
|
||||
|
||||
for uni in unicodes:
|
||||
if uni == ord("Þ") and hasAscii:
|
||||
bits.add(0) # Latin 1
|
||||
elif uni == ord("Ľ") and hasAscii:
|
||||
bits.add(1) # Latin 2: Eastern Europe
|
||||
if hasLineart:
|
||||
bits.add(58) # Latin 2
|
||||
elif uni == ord("Б"):
|
||||
bits.add(2) # Cyrillic
|
||||
if ord("Ѕ") in unicodes and hasLineart:
|
||||
bits.add(57) # IBM Cyrillic
|
||||
if ord("╜") in unicodes and hasLineart:
|
||||
bits.add(49) # MS-DOS Russian
|
||||
elif uni == ord("Ά"):
|
||||
bits.add(3) # Greek
|
||||
if hasLineart and ord("½") in unicodes:
|
||||
bits.add(48) # IBM Greek
|
||||
if hasLineart and ord("√") in unicodes:
|
||||
bits.add(60) # Greek, former 437 G
|
||||
elif uni == ord("İ") and hasAscii:
|
||||
bits.add(4) # Turkish
|
||||
if hasLineart:
|
||||
bits.add(56) # IBM turkish
|
||||
elif uni == ord("א"):
|
||||
bits.add(5) # Hebrew
|
||||
if hasLineart and ord("√") in unicodes:
|
||||
bits.add(53) # Hebrew
|
||||
elif uni == ord("ر"):
|
||||
bits.add(6) # Arabic
|
||||
if ord("√") in unicodes:
|
||||
bits.add(51) # Arabic
|
||||
if hasLineart:
|
||||
bits.add(61) # Arabic; ASMO 708
|
||||
elif uni == ord("ŗ") and hasAscii:
|
||||
bits.add(7) # Windows Baltic
|
||||
if hasLineart:
|
||||
bits.add(59) # MS-DOS Baltic
|
||||
elif uni == ord("₫") and hasAscii:
|
||||
bits.add(8) # Vietnamese
|
||||
elif uni == ord("ๅ"):
|
||||
bits.add(16) # Thai
|
||||
elif uni == ord("エ"):
|
||||
bits.add(17) # JIS/Japan
|
||||
elif uni == ord("ㄅ"):
|
||||
bits.add(18) # Chinese: Simplified
|
||||
elif uni == ord("ㄱ"):
|
||||
bits.add(19) # Korean wansung
|
||||
elif uni == ord("央"):
|
||||
bits.add(20) # Chinese: Traditional
|
||||
elif uni == ord("곴"):
|
||||
bits.add(21) # Korean Johab
|
||||
elif uni == ord("♥") and hasAscii:
|
||||
bits.add(30) # OEM Character Set
|
||||
# TODO: Symbol bit has a special meaning (check the spec), we need
|
||||
# to confirm if this is wanted by default.
|
||||
# elif chr(0xF000) <= char <= chr(0xF0FF):
|
||||
# codepageRanges.add(31) # Symbol Character Set
|
||||
elif uni == ord("þ") and hasAscii and hasLineart:
|
||||
bits.add(54) # MS-DOS Icelandic
|
||||
elif uni == ord("╚") and hasAscii:
|
||||
bits.add(62) # WE/Latin 1
|
||||
bits.add(63) # US
|
||||
elif hasAscii and hasLineart and ord("√") in unicodes:
|
||||
if uni == ord("Å"):
|
||||
bits.add(50) # MS-DOS Nordic
|
||||
elif uni == ord("é"):
|
||||
bits.add(52) # MS-DOS Canadian French
|
||||
elif uni == ord("õ"):
|
||||
bits.add(55) # MS-DOS Portuguese
|
||||
|
||||
if hasAscii and ord("‰") in unicodes and ord("∑") in unicodes:
|
||||
bits.add(29) # Macintosh Character Set (US Roman)
|
||||
|
||||
return bits
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
@@ -0,0 +1,99 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval
|
||||
from . import DefaultTable
|
||||
|
||||
SINGFormat = """
|
||||
> # big endian
|
||||
tableVersionMajor: H
|
||||
tableVersionMinor: H
|
||||
glyphletVersion: H
|
||||
permissions: h
|
||||
mainGID: H
|
||||
unitsPerEm: H
|
||||
vertAdvance: h
|
||||
vertOrigin: h
|
||||
uniqueName: 28s
|
||||
METAMD5: 16s
|
||||
nameLength: 1s
|
||||
"""
|
||||
# baseGlyphName is a byte string which follows the record above.
|
||||
|
||||
|
||||
class table_S_I_N_G_(DefaultTable.DefaultTable):
|
||||
"""Glyphlets SING table
|
||||
|
||||
The ``SING`` table is used by Adobe's SING Glyphlets.
|
||||
|
||||
See also https://web.archive.org/web/20080627183635/http://www.adobe.com/devnet/opentype/gdk/topic.html
|
||||
"""
|
||||
|
||||
dependencies = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, rest = sstruct.unpack2(SINGFormat, data, self)
|
||||
self.uniqueName = self.decompileUniqueName(self.uniqueName)
|
||||
self.nameLength = byteord(self.nameLength)
|
||||
assert len(rest) == self.nameLength
|
||||
self.baseGlyphName = tostr(rest)
|
||||
|
||||
rawMETAMD5 = self.METAMD5
|
||||
self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
|
||||
for char in rawMETAMD5[1:]:
|
||||
self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
|
||||
self.METAMD5 = self.METAMD5 + "]"
|
||||
|
||||
def decompileUniqueName(self, data):
|
||||
name = ""
|
||||
for char in data:
|
||||
val = byteord(char)
|
||||
if val == 0:
|
||||
break
|
||||
if (val > 31) or (val < 128):
|
||||
name += chr(val)
|
||||
else:
|
||||
octString = oct(val)
|
||||
if len(octString) > 3:
|
||||
octString = octString[1:] # chop off that leading zero.
|
||||
elif len(octString) < 3:
|
||||
octString.zfill(3)
|
||||
name += "\\" + octString
|
||||
return name
|
||||
|
||||
def compile(self, ttFont):
|
||||
d = self.__dict__.copy()
|
||||
d["nameLength"] = bytechr(len(self.baseGlyphName))
|
||||
d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
|
||||
METAMD5List = eval(self.METAMD5)
|
||||
d["METAMD5"] = b""
|
||||
for val in METAMD5List:
|
||||
d["METAMD5"] += bytechr(val)
|
||||
assert len(d["METAMD5"]) == 16, "Failed to pack 16 byte MD5 hash in SING table"
|
||||
data = sstruct.pack(SINGFormat, d)
|
||||
data = data + tobytes(self.baseGlyphName)
|
||||
return data
|
||||
|
||||
def compilecompileUniqueName(self, name, length):
|
||||
nameLen = len(name)
|
||||
if length <= nameLen:
|
||||
name = name[: length - 1] + "\000"
|
||||
else:
|
||||
name += (nameLen - length) * "\000"
|
||||
return name
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(SINGFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
writer.simpletag("baseGlyphName", value=self.baseGlyphName)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
|
||||
setattr(self, name, value)
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
@@ -0,0 +1,15 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_S_T_A_T_(BaseTTXConverter):
|
||||
"""Style Attributes table
|
||||
|
||||
The ``STAT`` table records stylistic or typeface-design attributes that
|
||||
differentiate the individual fonts within a font family from one another.
|
||||
Those attributes can be used to assist users when navigating the style
|
||||
variations of a variable font or a family of static fonts.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/stat
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,223 @@
|
||||
"""Compiles/decompiles SVG table.
|
||||
|
||||
https://docs.microsoft.com/en-us/typography/opentype/spec/svg
|
||||
|
||||
The XML format is:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<SVG>
|
||||
<svgDoc endGlyphID="1" startGlyphID="1">
|
||||
<![CDATA[ <complete SVG doc> ]]
|
||||
</svgDoc>
|
||||
...
|
||||
<svgDoc endGlyphID="n" startGlyphID="m">
|
||||
<![CDATA[ <complete SVG doc> ]]
|
||||
</svgDoc>
|
||||
</SVG>
|
||||
"""
|
||||
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval, strjoin, tobytes, tostr
|
||||
from fontTools.misc import sstruct
|
||||
from . import DefaultTable
|
||||
from collections.abc import Sequence
|
||||
from dataclasses import dataclass, astuple
|
||||
from io import BytesIO
|
||||
import struct
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
SVG_format_0 = """
|
||||
> # big endian
|
||||
version: H
|
||||
offsetToSVGDocIndex: L
|
||||
reserved: L
|
||||
"""
|
||||
|
||||
SVG_format_0Size = sstruct.calcsize(SVG_format_0)
|
||||
|
||||
doc_index_entry_format_0 = """
|
||||
> # big endian
|
||||
startGlyphID: H
|
||||
endGlyphID: H
|
||||
svgDocOffset: L
|
||||
svgDocLength: L
|
||||
"""
|
||||
|
||||
doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0)
|
||||
|
||||
|
||||
class table_S_V_G_(DefaultTable.DefaultTable):
|
||||
"""Scalable Vector Graphics table
|
||||
|
||||
The ``SVG`` table contains representations for glyphs in the SVG
|
||||
image format.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/stat
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.docList = []
|
||||
# Version 0 is the standardized version of the table; and current.
|
||||
# https://www.microsoft.com/typography/otspec/svg.htm
|
||||
sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self)
|
||||
if self.version != 0:
|
||||
log.warning(
|
||||
"Unknown SVG table version '%s'. Decompiling as version 0.",
|
||||
self.version,
|
||||
)
|
||||
# read in SVG Documents Index
|
||||
# data starts with the first entry of the entry list.
|
||||
pos = subTableStart = self.offsetToSVGDocIndex
|
||||
self.numEntries = struct.unpack(">H", data[pos : pos + 2])[0]
|
||||
pos += 2
|
||||
if self.numEntries > 0:
|
||||
data2 = data[pos:]
|
||||
entries = []
|
||||
for i in range(self.numEntries):
|
||||
record_data = data2[
|
||||
i
|
||||
* doc_index_entry_format_0Size : (i + 1)
|
||||
* doc_index_entry_format_0Size
|
||||
]
|
||||
docIndexEntry = sstruct.unpack(
|
||||
doc_index_entry_format_0, record_data, DocumentIndexEntry()
|
||||
)
|
||||
entries.append(docIndexEntry)
|
||||
|
||||
for entry in entries:
|
||||
start = entry.svgDocOffset + subTableStart
|
||||
end = start + entry.svgDocLength
|
||||
doc = data[start:end]
|
||||
compressed = False
|
||||
if doc.startswith(b"\x1f\x8b"):
|
||||
import gzip
|
||||
|
||||
bytesIO = BytesIO(doc)
|
||||
with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper:
|
||||
doc = gunzipper.read()
|
||||
del bytesIO
|
||||
compressed = True
|
||||
doc = tostr(doc, "utf_8")
|
||||
self.docList.append(
|
||||
SVGDocument(doc, entry.startGlyphID, entry.endGlyphID, compressed)
|
||||
)
|
||||
|
||||
def compile(self, ttFont):
|
||||
version = 0
|
||||
offsetToSVGDocIndex = (
|
||||
SVG_format_0Size # I start the SVGDocIndex right after the header.
|
||||
)
|
||||
# get SGVDoc info.
|
||||
docList = []
|
||||
entryList = []
|
||||
numEntries = len(self.docList)
|
||||
datum = struct.pack(">H", numEntries)
|
||||
entryList.append(datum)
|
||||
curOffset = len(datum) + doc_index_entry_format_0Size * numEntries
|
||||
seenDocs = {}
|
||||
allCompressed = getattr(self, "compressed", False)
|
||||
for i, doc in enumerate(self.docList):
|
||||
if isinstance(doc, (list, tuple)):
|
||||
doc = SVGDocument(*doc)
|
||||
self.docList[i] = doc
|
||||
docBytes = tobytes(doc.data, encoding="utf_8")
|
||||
if (allCompressed or doc.compressed) and not docBytes.startswith(
|
||||
b"\x1f\x8b"
|
||||
):
|
||||
import gzip
|
||||
|
||||
bytesIO = BytesIO()
|
||||
# mtime=0 strips the useless timestamp and makes gzip output reproducible;
|
||||
# equivalent to `gzip -n`
|
||||
with gzip.GzipFile(None, "w", fileobj=bytesIO, mtime=0) as gzipper:
|
||||
gzipper.write(docBytes)
|
||||
gzipped = bytesIO.getvalue()
|
||||
if len(gzipped) < len(docBytes):
|
||||
docBytes = gzipped
|
||||
del gzipped, bytesIO
|
||||
docLength = len(docBytes)
|
||||
if docBytes in seenDocs:
|
||||
docOffset = seenDocs[docBytes]
|
||||
else:
|
||||
docOffset = curOffset
|
||||
curOffset += docLength
|
||||
seenDocs[docBytes] = docOffset
|
||||
docList.append(docBytes)
|
||||
entry = struct.pack(
|
||||
">HHLL", doc.startGlyphID, doc.endGlyphID, docOffset, docLength
|
||||
)
|
||||
entryList.append(entry)
|
||||
entryList.extend(docList)
|
||||
svgDocData = bytesjoin(entryList)
|
||||
|
||||
reserved = 0
|
||||
header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved)
|
||||
data = [header, svgDocData]
|
||||
data = bytesjoin(data)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
for i, doc in enumerate(self.docList):
|
||||
if isinstance(doc, (list, tuple)):
|
||||
doc = SVGDocument(*doc)
|
||||
self.docList[i] = doc
|
||||
attrs = {"startGlyphID": doc.startGlyphID, "endGlyphID": doc.endGlyphID}
|
||||
if doc.compressed:
|
||||
attrs["compressed"] = 1
|
||||
writer.begintag("svgDoc", **attrs)
|
||||
writer.newline()
|
||||
writer.writecdata(doc.data)
|
||||
writer.newline()
|
||||
writer.endtag("svgDoc")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "svgDoc":
|
||||
if not hasattr(self, "docList"):
|
||||
self.docList = []
|
||||
doc = strjoin(content)
|
||||
doc = doc.strip()
|
||||
startGID = int(attrs["startGlyphID"])
|
||||
endGID = int(attrs["endGlyphID"])
|
||||
compressed = bool(safeEval(attrs.get("compressed", "0")))
|
||||
self.docList.append(SVGDocument(doc, startGID, endGID, compressed))
|
||||
else:
|
||||
log.warning("Unknown %s %s", name, content)
|
||||
|
||||
|
||||
class DocumentIndexEntry(object):
|
||||
def __init__(self):
|
||||
self.startGlyphID = None # USHORT
|
||||
self.endGlyphID = None # USHORT
|
||||
self.svgDocOffset = None # ULONG
|
||||
self.svgDocLength = None # ULONG
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s"
|
||||
% (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength)
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SVGDocument(Sequence):
|
||||
data: str
|
||||
startGlyphID: int
|
||||
endGlyphID: int
|
||||
compressed: bool = False
|
||||
|
||||
# Previously, the SVG table's docList attribute contained a lists of 3 items:
|
||||
# [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute.
|
||||
# For backward compatibility with code that depends of them being sequences of
|
||||
# fixed length=3, we subclass the Sequence abstract base class and pretend only
|
||||
# the first three items are present. 'compressed' is only accessible via named
|
||||
# attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]`
|
||||
def __getitem__(self, index):
|
||||
return astuple(self)[:3][index]
|
||||
|
||||
def __len__(self):
|
||||
return 3
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,92 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import floatToFixedToStr
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
from . import grUtils
|
||||
import struct
|
||||
|
||||
Sill_hdr = """
|
||||
>
|
||||
version: 16.16F
|
||||
"""
|
||||
|
||||
|
||||
class table_S__i_l_l(DefaultTable.DefaultTable):
|
||||
"""Graphite Languages table
|
||||
|
||||
See also https://graphite.sil.org/graphite_techAbout#graphite-font-tables
|
||||
"""
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.langs = {}
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
(_, data) = sstruct.unpack2(Sill_hdr, data, self)
|
||||
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
|
||||
(numLangs,) = struct.unpack(">H", data[:2])
|
||||
data = data[8:]
|
||||
maxsetting = 0
|
||||
langinfo = []
|
||||
for i in range(numLangs):
|
||||
(langcode, numsettings, offset) = struct.unpack(
|
||||
">4sHH", data[i * 8 : (i + 1) * 8]
|
||||
)
|
||||
offset = int(offset / 8) - (numLangs + 1)
|
||||
langcode = langcode.replace(b"\000", b"")
|
||||
langinfo.append((langcode.decode("utf-8"), numsettings, offset))
|
||||
maxsetting = max(maxsetting, offset + numsettings)
|
||||
data = data[numLangs * 8 :]
|
||||
finfo = []
|
||||
for i in range(maxsetting):
|
||||
(fid, val, _) = struct.unpack(">LHH", data[i * 8 : (i + 1) * 8])
|
||||
finfo.append((fid, val))
|
||||
self.langs = {}
|
||||
for c, n, o in langinfo:
|
||||
self.langs[c] = []
|
||||
for i in range(o, o + n):
|
||||
self.langs[c].append(finfo[i])
|
||||
|
||||
def compile(self, ttFont):
|
||||
ldat = b""
|
||||
fdat = b""
|
||||
offset = len(self.langs)
|
||||
for c, inf in sorted(self.langs.items()):
|
||||
ldat += struct.pack(">4sHH", c.encode("utf8"), len(inf), 8 * offset + 20)
|
||||
for fid, val in inf:
|
||||
fdat += struct.pack(">LHH", fid, val, 0)
|
||||
offset += len(inf)
|
||||
ldat += struct.pack(">LHH", 0x80808080, 0, 8 * offset + 20)
|
||||
return (
|
||||
sstruct.pack(Sill_hdr, self)
|
||||
+ grUtils.bininfo(len(self.langs))
|
||||
+ ldat
|
||||
+ fdat
|
||||
)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", version=self.version)
|
||||
writer.newline()
|
||||
for c, inf in sorted(self.langs.items()):
|
||||
writer.begintag("lang", name=c)
|
||||
writer.newline()
|
||||
for fid, val in inf:
|
||||
writer.simpletag("feature", fid=grUtils.num2tag(fid), val=val)
|
||||
writer.newline()
|
||||
writer.endtag("lang")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = float(safeEval(attrs["version"]))
|
||||
elif name == "lang":
|
||||
c = attrs["name"]
|
||||
self.langs[c] = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
tag, a, subcontent = element
|
||||
if tag == "feature":
|
||||
self.langs[c].append(
|
||||
(grUtils.tag2num(a["fid"]), int(safeEval(a["val"])))
|
||||
)
|
||||
@@ -0,0 +1,13 @@
|
||||
""" TSI{B,C,D,J,P,S,V} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its table source data.
|
||||
|
||||
TSIB contains the source text for the ``BASE`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
|
||||
class table_T_S_I_B_(table_T_S_I_V_):
|
||||
pass
|
||||
@@ -0,0 +1,14 @@
|
||||
""" TSI{B,C,D,J,P,S,V} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its table source data.
|
||||
|
||||
TSIC contains the source text for the Variation CVT window and data for
|
||||
the ``cvar`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_T_S_I_C_(BaseTTXConverter):
|
||||
pass
|
||||
@@ -0,0 +1,13 @@
|
||||
""" TSI{B,C,D,J,P,S,V} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its table source data.
|
||||
|
||||
TSID contains the source text for the ``GDEF`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
|
||||
class table_T_S_I_D_(table_T_S_I_V_):
|
||||
pass
|
||||
@@ -0,0 +1,13 @@
|
||||
""" TSI{B,C,D,J,P,S,V} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its table source data.
|
||||
|
||||
TSIJ contains the source text for the ``JSTF`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
|
||||
class table_T_S_I_J_(table_T_S_I_V_):
|
||||
pass
|
||||
@@ -0,0 +1,13 @@
|
||||
""" TSI{B,C,D,J,P,S,V} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its table source data.
|
||||
|
||||
TSIP contains the source text for the ``GPOS`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
|
||||
class table_T_S_I_P_(table_T_S_I_V_):
|
||||
pass
|
||||
@@ -0,0 +1,13 @@
|
||||
""" TSI{B,C,D,J,P,S,V} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its table source data.
|
||||
|
||||
TSIS contains the source text for the ``GSUB`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
|
||||
class table_T_S_I_S_(table_T_S_I_V_):
|
||||
pass
|
||||
@@ -0,0 +1,26 @@
|
||||
""" TSI{B,C,D,J,P,S,V} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its table source data.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
from fontTools.misc.textTools import strjoin, tobytes, tostr
|
||||
from . import asciiTable
|
||||
|
||||
|
||||
class table_T_S_I_V_(asciiTable.asciiTable):
|
||||
def toXML(self, writer, ttFont):
|
||||
data = tostr(self.data)
|
||||
# removing null bytes. XXX needed??
|
||||
data = data.split("\0")
|
||||
data = strjoin(data)
|
||||
writer.begintag("source")
|
||||
writer.newline()
|
||||
writer.write_noindent(data.replace("\r", "\n"))
|
||||
writer.newline()
|
||||
writer.endtag("source")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
lines = strjoin(content).split("\n")
|
||||
self.data = tobytes("\r".join(lines[1:-1]))
|
||||
@@ -0,0 +1,70 @@
|
||||
"""TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI0 is the index table containing the lengths and offsets for the glyph
|
||||
programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained
|
||||
in the TSI1 table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
import logging
|
||||
import struct
|
||||
|
||||
from . import DefaultTable
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
tsi0Format = ">HHL"
|
||||
|
||||
|
||||
def fixlongs(glyphID, textLength, textOffset):
|
||||
return int(glyphID), int(textLength), textOffset
|
||||
|
||||
|
||||
class table_T_S_I__0(DefaultTable.DefaultTable):
|
||||
dependencies = ["TSI1"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
indices = []
|
||||
size = struct.calcsize(tsi0Format)
|
||||
numEntries = len(data) // size
|
||||
if numEntries != numGlyphs + 5:
|
||||
diff = numEntries - numGlyphs - 5
|
||||
log.warning(
|
||||
"Number of glyphPrograms differs from the number of glyphs in the font "
|
||||
f"by {abs(diff)} ({numEntries - 5} programs vs. {numGlyphs} glyphs)."
|
||||
)
|
||||
for _ in range(numEntries):
|
||||
glyphID, textLength, textOffset = fixlongs(
|
||||
*struct.unpack(tsi0Format, data[:size])
|
||||
)
|
||||
indices.append((glyphID, textLength, textOffset))
|
||||
data = data[size:]
|
||||
assert len(data) == 0
|
||||
assert indices[-5] == (0xFFFE, 0, 0xABFC1F34), "bad magic number"
|
||||
self.indices = indices[:-5]
|
||||
self.extra_indices = indices[-4:]
|
||||
|
||||
def compile(self, ttFont):
|
||||
if not hasattr(self, "indices"):
|
||||
# We have no corresponding table (TSI1 or TSI3); let's return
|
||||
# no data, which effectively means "ignore us".
|
||||
return b""
|
||||
data = b""
|
||||
for index, textLength, textOffset in self.indices:
|
||||
data = data + struct.pack(tsi0Format, index, textLength, textOffset)
|
||||
data = data + struct.pack(tsi0Format, 0xFFFE, 0, 0xABFC1F34)
|
||||
for index, textLength, textOffset in self.extra_indices:
|
||||
data = data + struct.pack(tsi0Format, index, textLength, textOffset)
|
||||
return data
|
||||
|
||||
def set(self, indices, extra_indices):
|
||||
# gets called by 'TSI1' or 'TSI3'
|
||||
self.indices = indices
|
||||
self.extra_indices = extra_indices
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("This table will be calculated by the compiler")
|
||||
writer.newline()
|
||||
@@ -0,0 +1,163 @@
|
||||
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI1 contains the text of the glyph programs in the form of low-level assembly
|
||||
code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
from . import DefaultTable
|
||||
from fontTools.misc.loggingTools import LogMixin
|
||||
from fontTools.misc.textTools import strjoin, tobytes, tostr
|
||||
|
||||
|
||||
class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable):
|
||||
extras = {0xFFFA: "ppgm", 0xFFFB: "cvt", 0xFFFC: "reserved", 0xFFFD: "fpgm"}
|
||||
|
||||
indextable = "TSI0"
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
totalLength = len(data)
|
||||
indextable = ttFont[self.indextable]
|
||||
for indices, isExtra in zip(
|
||||
(indextable.indices, indextable.extra_indices), (False, True)
|
||||
):
|
||||
programs = {}
|
||||
for i, (glyphID, textLength, textOffset) in enumerate(indices):
|
||||
if isExtra:
|
||||
name = self.extras[glyphID]
|
||||
else:
|
||||
name = ttFont.getGlyphName(glyphID)
|
||||
if textOffset > totalLength:
|
||||
self.log.warning("textOffset > totalLength; %r skipped" % name)
|
||||
continue
|
||||
if textLength < 0x8000:
|
||||
# If the length stored in the record is less than 32768, then use
|
||||
# that as the length of the record.
|
||||
pass
|
||||
elif textLength == 0x8000:
|
||||
# If the length is 32768, compute the actual length as follows:
|
||||
isLast = i == (len(indices) - 1)
|
||||
if isLast:
|
||||
if isExtra:
|
||||
# For the last "extra" record (the very last record of the
|
||||
# table), the length is the difference between the total
|
||||
# length of the TSI1 table and the textOffset of the final
|
||||
# record.
|
||||
nextTextOffset = totalLength
|
||||
else:
|
||||
# For the last "normal" record (the last record just prior
|
||||
# to the record containing the "magic number"), the length
|
||||
# is the difference between the textOffset of the record
|
||||
# following the "magic number" (0xFFFE) record (i.e. the
|
||||
# first "extra" record), and the textOffset of the last
|
||||
# "normal" record.
|
||||
nextTextOffset = indextable.extra_indices[0][2]
|
||||
else:
|
||||
# For all other records with a length of 0x8000, the length is
|
||||
# the difference between the textOffset of the record in
|
||||
# question and the textOffset of the next record.
|
||||
nextTextOffset = indices[i + 1][2]
|
||||
assert nextTextOffset >= textOffset, "entries not sorted by offset"
|
||||
if nextTextOffset > totalLength:
|
||||
self.log.warning(
|
||||
"nextTextOffset > totalLength; %r truncated" % name
|
||||
)
|
||||
nextTextOffset = totalLength
|
||||
textLength = nextTextOffset - textOffset
|
||||
else:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError(
|
||||
"%r textLength (%d) must not be > 32768" % (name, textLength)
|
||||
)
|
||||
text = data[textOffset : textOffset + textLength]
|
||||
assert len(text) == textLength
|
||||
text = tostr(text, encoding="utf-8")
|
||||
if text:
|
||||
programs[name] = text
|
||||
if isExtra:
|
||||
self.extraPrograms = programs
|
||||
else:
|
||||
self.glyphPrograms = programs
|
||||
|
||||
def compile(self, ttFont):
|
||||
if not hasattr(self, "glyphPrograms"):
|
||||
self.glyphPrograms = {}
|
||||
self.extraPrograms = {}
|
||||
data = b""
|
||||
indextable = ttFont[self.indextable]
|
||||
glyphNames = ttFont.getGlyphOrder()
|
||||
|
||||
indices = []
|
||||
for i, name in enumerate(glyphNames):
|
||||
if len(data) % 2:
|
||||
data = (
|
||||
data + b"\015"
|
||||
) # align on 2-byte boundaries, fill with return chars. Yum.
|
||||
if name in self.glyphPrograms:
|
||||
text = tobytes(self.glyphPrograms[name], encoding="utf-8")
|
||||
else:
|
||||
text = b""
|
||||
textLength = len(text)
|
||||
if textLength >= 0x8000:
|
||||
textLength = 0x8000
|
||||
indices.append((i, textLength, len(data)))
|
||||
data = data + text
|
||||
|
||||
extra_indices = []
|
||||
for code, name in sorted(self.extras.items()):
|
||||
if len(data) % 2:
|
||||
data = (
|
||||
data + b"\015"
|
||||
) # align on 2-byte boundaries, fill with return chars.
|
||||
if name in self.extraPrograms:
|
||||
text = tobytes(self.extraPrograms[name], encoding="utf-8")
|
||||
else:
|
||||
text = b""
|
||||
textLength = len(text)
|
||||
if textLength >= 0x8000:
|
||||
textLength = 0x8000
|
||||
extra_indices.append((code, textLength, len(data)))
|
||||
data = data + text
|
||||
indextable.set(indices, extra_indices)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
names = sorted(self.glyphPrograms.keys())
|
||||
writer.newline()
|
||||
for name in names:
|
||||
text = self.glyphPrograms[name]
|
||||
if not text:
|
||||
continue
|
||||
writer.begintag("glyphProgram", name=name)
|
||||
writer.newline()
|
||||
writer.write_noindent(text.replace("\r", "\n"))
|
||||
writer.newline()
|
||||
writer.endtag("glyphProgram")
|
||||
writer.newline()
|
||||
writer.newline()
|
||||
extra_names = sorted(self.extraPrograms.keys())
|
||||
for name in extra_names:
|
||||
text = self.extraPrograms[name]
|
||||
if not text:
|
||||
continue
|
||||
writer.begintag("extraProgram", name=name)
|
||||
writer.newline()
|
||||
writer.write_noindent(text.replace("\r", "\n"))
|
||||
writer.newline()
|
||||
writer.endtag("extraProgram")
|
||||
writer.newline()
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "glyphPrograms"):
|
||||
self.glyphPrograms = {}
|
||||
self.extraPrograms = {}
|
||||
lines = strjoin(content).replace("\r", "\n").split("\n")
|
||||
text = "\r".join(lines[1:-1])
|
||||
if name == "glyphProgram":
|
||||
self.glyphPrograms[attrs["name"]] = text
|
||||
elif name == "extraProgram":
|
||||
self.extraPrograms[attrs["name"]] = text
|
||||
@@ -0,0 +1,17 @@
|
||||
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI2 is the index table containing the lengths and offsets for the glyph
|
||||
programs that are contained in the TSI3 table. It uses the same format as
|
||||
the TSI0 table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
from fontTools import ttLib
|
||||
|
||||
superclass = ttLib.getTableClass("TSI0")
|
||||
|
||||
|
||||
class table_T_S_I__2(superclass):
|
||||
dependencies = ["TSI3"]
|
||||
@@ -0,0 +1,22 @@
|
||||
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI3 contains the text of the glyph programs in the form of 'VTTTalk' code.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
from fontTools import ttLib
|
||||
|
||||
superclass = ttLib.getTableClass("TSI1")
|
||||
|
||||
|
||||
class table_T_S_I__3(superclass):
|
||||
extras = {
|
||||
0xFFFA: "reserved0",
|
||||
0xFFFB: "reserved1",
|
||||
0xFFFC: "reserved2",
|
||||
0xFFFD: "reserved3",
|
||||
}
|
||||
|
||||
indextable = "TSI2"
|
||||
@@ -0,0 +1,60 @@
|
||||
"""TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI5 contains the VTT character groups.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/tools/vtt/tsi-tables
|
||||
"""
|
||||
|
||||
import array
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from fontTools.misc.textTools import safeEval
|
||||
|
||||
from . import DefaultTable
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class table_T_S_I__5(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
a = array.array("H")
|
||||
a.frombytes(data)
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
self.glyphGrouping = {}
|
||||
numEntries = len(data) // 2
|
||||
if numEntries != numGlyphs:
|
||||
diff = numEntries - numGlyphs
|
||||
log.warning(
|
||||
"Number of entries differs from the number of glyphs in the font "
|
||||
f"by {abs(diff)} ({numEntries} entries vs. {numGlyphs} glyphs)."
|
||||
)
|
||||
for i in range(numEntries):
|
||||
self.glyphGrouping[ttFont.getGlyphName(i)] = a[i]
|
||||
|
||||
def compile(self, ttFont):
|
||||
glyphNames = ttFont.getGlyphOrder()
|
||||
a = array.array("H")
|
||||
for glyphName in glyphNames:
|
||||
a.append(self.glyphGrouping.get(glyphName, 0))
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
return a.tobytes()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
names = sorted(self.glyphGrouping.keys())
|
||||
for glyphName in names:
|
||||
writer.simpletag(
|
||||
"glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName]
|
||||
)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "glyphGrouping"):
|
||||
self.glyphGrouping = {}
|
||||
if name != "glyphgroup":
|
||||
return
|
||||
self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"])
|
||||
@@ -0,0 +1,14 @@
|
||||
from . import asciiTable
|
||||
|
||||
|
||||
class table_T_T_F_A_(asciiTable.asciiTable):
|
||||
"""ttfautohint parameters table
|
||||
|
||||
The ``TTFA`` table is used by the free-software `ttfautohint` program
|
||||
to record the parameters that `ttfautohint` was called with when it
|
||||
was used to auto-hint the font.
|
||||
|
||||
See also http://freetype.org/ttfautohint/doc/ttfautohint.html#miscellaneous-1
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,884 @@
|
||||
from fontTools.misc.fixedTools import (
|
||||
fixedToFloat as fi2fl,
|
||||
floatToFixed as fl2fi,
|
||||
floatToFixedToStr as fl2str,
|
||||
strToFixedToFloat as str2fl,
|
||||
otRound,
|
||||
)
|
||||
from fontTools.misc.textTools import safeEval
|
||||
import array
|
||||
from collections import Counter, defaultdict
|
||||
import io
|
||||
import logging
|
||||
import struct
|
||||
import sys
|
||||
|
||||
|
||||
# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
|
||||
|
||||
EMBEDDED_PEAK_TUPLE = 0x8000
|
||||
INTERMEDIATE_REGION = 0x4000
|
||||
PRIVATE_POINT_NUMBERS = 0x2000
|
||||
|
||||
DELTAS_ARE_ZERO = 0x80
|
||||
DELTAS_ARE_WORDS = 0x40
|
||||
DELTAS_ARE_LONGS = 0xC0
|
||||
DELTAS_SIZE_MASK = 0xC0
|
||||
DELTA_RUN_COUNT_MASK = 0x3F
|
||||
|
||||
POINTS_ARE_WORDS = 0x80
|
||||
POINT_RUN_COUNT_MASK = 0x7F
|
||||
|
||||
TUPLES_SHARE_POINT_NUMBERS = 0x8000
|
||||
TUPLE_COUNT_MASK = 0x0FFF
|
||||
TUPLE_INDEX_MASK = 0x0FFF
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TupleVariation(object):
|
||||
def __init__(self, axes, coordinates):
|
||||
self.axes = axes.copy()
|
||||
self.coordinates = list(coordinates)
|
||||
|
||||
def __repr__(self):
|
||||
axes = ",".join(
|
||||
sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])
|
||||
)
|
||||
return "<TupleVariation %s %s>" % (axes, self.coordinates)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.coordinates == other.coordinates and self.axes == other.axes
|
||||
|
||||
def getUsedPoints(self):
|
||||
# Empty set means "all points used".
|
||||
if None not in self.coordinates:
|
||||
return frozenset()
|
||||
used = frozenset([i for i, p in enumerate(self.coordinates) if p is not None])
|
||||
# Return None if no points used.
|
||||
return used if used else None
|
||||
|
||||
def hasImpact(self):
|
||||
"""Returns True if this TupleVariation has any visible impact.
|
||||
|
||||
If the result is False, the TupleVariation can be omitted from the font
|
||||
without making any visible difference.
|
||||
"""
|
||||
return any(c is not None for c in self.coordinates)
|
||||
|
||||
def toXML(self, writer, axisTags):
|
||||
writer.begintag("tuple")
|
||||
writer.newline()
|
||||
for axis in axisTags:
|
||||
value = self.axes.get(axis)
|
||||
if value is not None:
|
||||
minValue, value, maxValue = value
|
||||
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
|
||||
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
|
||||
if minValue == defaultMinValue and maxValue == defaultMaxValue:
|
||||
writer.simpletag("coord", axis=axis, value=fl2str(value, 14))
|
||||
else:
|
||||
attrs = [
|
||||
("axis", axis),
|
||||
("min", fl2str(minValue, 14)),
|
||||
("value", fl2str(value, 14)),
|
||||
("max", fl2str(maxValue, 14)),
|
||||
]
|
||||
writer.simpletag("coord", attrs)
|
||||
writer.newline()
|
||||
wrote_any_deltas = False
|
||||
for i, delta in enumerate(self.coordinates):
|
||||
if type(delta) == tuple and len(delta) == 2:
|
||||
writer.simpletag("delta", pt=i, x=delta[0], y=delta[1])
|
||||
writer.newline()
|
||||
wrote_any_deltas = True
|
||||
elif type(delta) == int:
|
||||
writer.simpletag("delta", cvt=i, value=delta)
|
||||
writer.newline()
|
||||
wrote_any_deltas = True
|
||||
elif delta is not None:
|
||||
log.error("bad delta format")
|
||||
writer.comment("bad delta #%d" % i)
|
||||
writer.newline()
|
||||
wrote_any_deltas = True
|
||||
if not wrote_any_deltas:
|
||||
writer.comment("no deltas")
|
||||
writer.newline()
|
||||
writer.endtag("tuple")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, _content):
|
||||
if name == "coord":
|
||||
axis = attrs["axis"]
|
||||
value = str2fl(attrs["value"], 14)
|
||||
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
|
||||
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
|
||||
minValue = str2fl(attrs.get("min", defaultMinValue), 14)
|
||||
maxValue = str2fl(attrs.get("max", defaultMaxValue), 14)
|
||||
self.axes[axis] = (minValue, value, maxValue)
|
||||
elif name == "delta":
|
||||
if "pt" in attrs:
|
||||
point = safeEval(attrs["pt"])
|
||||
x = safeEval(attrs["x"])
|
||||
y = safeEval(attrs["y"])
|
||||
self.coordinates[point] = (x, y)
|
||||
elif "cvt" in attrs:
|
||||
cvt = safeEval(attrs["cvt"])
|
||||
value = safeEval(attrs["value"])
|
||||
self.coordinates[cvt] = value
|
||||
else:
|
||||
log.warning("bad delta format: %s" % ", ".join(sorted(attrs.keys())))
|
||||
|
||||
def compile(
|
||||
self, axisTags, sharedCoordIndices={}, pointData=None, *, optimizeSize=True
|
||||
):
|
||||
assert set(self.axes.keys()) <= set(axisTags), (
|
||||
"Unknown axis tag found.",
|
||||
self.axes.keys(),
|
||||
axisTags,
|
||||
)
|
||||
|
||||
tupleData = []
|
||||
auxData = []
|
||||
|
||||
if pointData is None:
|
||||
usedPoints = self.getUsedPoints()
|
||||
if usedPoints is None: # Nothing to encode
|
||||
return b"", b""
|
||||
pointData = self.compilePoints(usedPoints)
|
||||
|
||||
coord = self.compileCoord(axisTags)
|
||||
flags = sharedCoordIndices.get(coord)
|
||||
if flags is None:
|
||||
flags = EMBEDDED_PEAK_TUPLE
|
||||
tupleData.append(coord)
|
||||
|
||||
intermediateCoord = self.compileIntermediateCoord(axisTags)
|
||||
if intermediateCoord is not None:
|
||||
flags |= INTERMEDIATE_REGION
|
||||
tupleData.append(intermediateCoord)
|
||||
|
||||
# pointData of b'' implies "use shared points".
|
||||
if pointData:
|
||||
flags |= PRIVATE_POINT_NUMBERS
|
||||
auxData.append(pointData)
|
||||
|
||||
auxData.append(self.compileDeltas(optimizeSize=optimizeSize))
|
||||
auxData = b"".join(auxData)
|
||||
|
||||
tupleData.insert(0, struct.pack(">HH", len(auxData), flags))
|
||||
return b"".join(tupleData), auxData
|
||||
|
||||
def compileCoord(self, axisTags):
|
||||
result = []
|
||||
axes = self.axes
|
||||
for axis in axisTags:
|
||||
triple = axes.get(axis)
|
||||
if triple is None:
|
||||
result.append(b"\0\0")
|
||||
else:
|
||||
result.append(struct.pack(">h", fl2fi(triple[1], 14)))
|
||||
return b"".join(result)
|
||||
|
||||
def compileIntermediateCoord(self, axisTags):
|
||||
needed = False
|
||||
for axis in axisTags:
|
||||
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
|
||||
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
|
||||
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
|
||||
if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
|
||||
needed = True
|
||||
break
|
||||
if not needed:
|
||||
return None
|
||||
minCoords = []
|
||||
maxCoords = []
|
||||
for axis in axisTags:
|
||||
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
|
||||
minCoords.append(struct.pack(">h", fl2fi(minValue, 14)))
|
||||
maxCoords.append(struct.pack(">h", fl2fi(maxValue, 14)))
|
||||
return b"".join(minCoords + maxCoords)
|
||||
|
||||
@staticmethod
|
||||
def decompileCoord_(axisTags, data, offset):
|
||||
coord = {}
|
||||
pos = offset
|
||||
for axis in axisTags:
|
||||
coord[axis] = fi2fl(struct.unpack(">h", data[pos : pos + 2])[0], 14)
|
||||
pos += 2
|
||||
return coord, pos
|
||||
|
||||
@staticmethod
|
||||
def compilePoints(points):
|
||||
# If the set consists of all points in the glyph, it gets encoded with
|
||||
# a special encoding: a single zero byte.
|
||||
#
|
||||
# To use this optimization, points passed in must be empty set.
|
||||
# The following two lines are not strictly necessary as the main code
|
||||
# below would emit the same. But this is most common and faster.
|
||||
if not points:
|
||||
return b"\0"
|
||||
|
||||
# In the 'gvar' table, the packing of point numbers is a little surprising.
|
||||
# It consists of multiple runs, each being a delta-encoded list of integers.
|
||||
# For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
|
||||
# [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
|
||||
# There are two types of runs, with values being either 8 or 16 bit unsigned
|
||||
# integers.
|
||||
points = list(points)
|
||||
points.sort()
|
||||
numPoints = len(points)
|
||||
|
||||
result = bytearray()
|
||||
# The binary representation starts with the total number of points in the set,
|
||||
# encoded into one or two bytes depending on the value.
|
||||
if numPoints < 0x80:
|
||||
result.append(numPoints)
|
||||
else:
|
||||
result.append((numPoints >> 8) | 0x80)
|
||||
result.append(numPoints & 0xFF)
|
||||
|
||||
MAX_RUN_LENGTH = 127
|
||||
pos = 0
|
||||
lastValue = 0
|
||||
while pos < numPoints:
|
||||
runLength = 0
|
||||
|
||||
headerPos = len(result)
|
||||
result.append(0)
|
||||
|
||||
useByteEncoding = None
|
||||
while pos < numPoints and runLength <= MAX_RUN_LENGTH:
|
||||
curValue = points[pos]
|
||||
delta = curValue - lastValue
|
||||
if useByteEncoding is None:
|
||||
useByteEncoding = 0 <= delta <= 0xFF
|
||||
if useByteEncoding and (delta > 0xFF or delta < 0):
|
||||
# we need to start a new run (which will not use byte encoding)
|
||||
break
|
||||
# TODO This never switches back to a byte-encoding from a short-encoding.
|
||||
# That's suboptimal.
|
||||
if useByteEncoding:
|
||||
result.append(delta)
|
||||
else:
|
||||
result.append(delta >> 8)
|
||||
result.append(delta & 0xFF)
|
||||
lastValue = curValue
|
||||
pos += 1
|
||||
runLength += 1
|
||||
if useByteEncoding:
|
||||
result[headerPos] = runLength - 1
|
||||
else:
|
||||
result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def decompilePoints_(numPoints, data, offset, tableTag):
|
||||
"""(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
|
||||
assert tableTag in ("cvar", "gvar")
|
||||
pos = offset
|
||||
numPointsInData = data[pos]
|
||||
pos += 1
|
||||
if (numPointsInData & POINTS_ARE_WORDS) != 0:
|
||||
numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
|
||||
pos += 1
|
||||
if numPointsInData == 0:
|
||||
return (range(numPoints), pos)
|
||||
|
||||
result = []
|
||||
while len(result) < numPointsInData:
|
||||
runHeader = data[pos]
|
||||
pos += 1
|
||||
numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
|
||||
point = 0
|
||||
if (runHeader & POINTS_ARE_WORDS) != 0:
|
||||
points = array.array("H")
|
||||
pointsSize = numPointsInRun * 2
|
||||
else:
|
||||
points = array.array("B")
|
||||
pointsSize = numPointsInRun
|
||||
points.frombytes(data[pos : pos + pointsSize])
|
||||
if sys.byteorder != "big":
|
||||
points.byteswap()
|
||||
|
||||
assert len(points) == numPointsInRun
|
||||
pos += pointsSize
|
||||
|
||||
result.extend(points)
|
||||
|
||||
# Convert relative to absolute
|
||||
absolute = []
|
||||
current = 0
|
||||
for delta in result:
|
||||
current += delta
|
||||
absolute.append(current)
|
||||
result = absolute
|
||||
del absolute
|
||||
|
||||
badPoints = {str(p) for p in result if p < 0 or p >= numPoints}
|
||||
if badPoints:
|
||||
log.warning(
|
||||
"point %s out of range in '%s' table"
|
||||
% (",".join(sorted(badPoints)), tableTag)
|
||||
)
|
||||
return (result, pos)
|
||||
|
||||
def compileDeltas(self, optimizeSize=True):
|
||||
deltaX = []
|
||||
deltaY = []
|
||||
if self.getCoordWidth() == 2:
|
||||
for c in self.coordinates:
|
||||
if c is None:
|
||||
continue
|
||||
deltaX.append(c[0])
|
||||
deltaY.append(c[1])
|
||||
else:
|
||||
for c in self.coordinates:
|
||||
if c is None:
|
||||
continue
|
||||
deltaX.append(c)
|
||||
bytearr = bytearray()
|
||||
self.compileDeltaValues_(deltaX, bytearr, optimizeSize=optimizeSize)
|
||||
self.compileDeltaValues_(deltaY, bytearr, optimizeSize=optimizeSize)
|
||||
return bytearr
|
||||
|
||||
@staticmethod
|
||||
def compileDeltaValues_(deltas, bytearr=None, *, optimizeSize=True):
|
||||
"""[value1, value2, value3, ...] --> bytearray
|
||||
|
||||
Emits a sequence of runs. Each run starts with a
|
||||
byte-sized header whose 6 least significant bits
|
||||
(header & 0x3F) indicate how many values are encoded
|
||||
in this run. The stored length is the actual length
|
||||
minus one; run lengths are thus in the range [1..64].
|
||||
If the header byte has its most significant bit (0x80)
|
||||
set, all values in this run are zero, and no data
|
||||
follows. Otherwise, the header byte is followed by
|
||||
((header & 0x3F) + 1) signed values. If (header &
|
||||
0x40) is clear, the delta values are stored as signed
|
||||
bytes; if (header & 0x40) is set, the delta values are
|
||||
signed 16-bit integers.
|
||||
""" # Explaining the format because the 'gvar' spec is hard to understand.
|
||||
if bytearr is None:
|
||||
bytearr = bytearray()
|
||||
|
||||
pos = 0
|
||||
numDeltas = len(deltas)
|
||||
|
||||
if optimizeSize:
|
||||
while pos < numDeltas:
|
||||
value = deltas[pos]
|
||||
if value == 0:
|
||||
pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
|
||||
elif -128 <= value <= 127:
|
||||
pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
|
||||
elif -32768 <= value <= 32767:
|
||||
pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
|
||||
else:
|
||||
pos = TupleVariation.encodeDeltaRunAsLongs_(deltas, pos, bytearr)
|
||||
else:
|
||||
minVal, maxVal = min(deltas), max(deltas)
|
||||
if minVal == 0 == maxVal:
|
||||
pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
|
||||
elif -128 <= minVal <= maxVal <= 127:
|
||||
pos = TupleVariation.encodeDeltaRunAsBytes_(
|
||||
deltas, pos, bytearr, optimizeSize=False
|
||||
)
|
||||
elif -32768 <= minVal <= maxVal <= 32767:
|
||||
pos = TupleVariation.encodeDeltaRunAsWords_(
|
||||
deltas, pos, bytearr, optimizeSize=False
|
||||
)
|
||||
else:
|
||||
pos = TupleVariation.encodeDeltaRunAsLongs_(
|
||||
deltas, pos, bytearr, optimizeSize=False
|
||||
)
|
||||
|
||||
assert pos == numDeltas, (pos, numDeltas)
|
||||
|
||||
return bytearr
|
||||
|
||||
@staticmethod
|
||||
def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
|
||||
pos = offset
|
||||
numDeltas = len(deltas)
|
||||
while pos < numDeltas and deltas[pos] == 0:
|
||||
pos += 1
|
||||
runLength = pos - offset
|
||||
while runLength >= 64:
|
||||
bytearr.append(DELTAS_ARE_ZERO | 63)
|
||||
runLength -= 64
|
||||
if runLength:
|
||||
bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
|
||||
return pos
|
||||
|
||||
@staticmethod
|
||||
def encodeDeltaRunAsBytes_(deltas, offset, bytearr, optimizeSize=True):
|
||||
pos = offset
|
||||
numDeltas = len(deltas)
|
||||
while pos < numDeltas:
|
||||
value = deltas[pos]
|
||||
if not (-128 <= value <= 127):
|
||||
break
|
||||
# Within a byte-encoded run of deltas, a single zero
|
||||
# is best stored literally as 0x00 value. However,
|
||||
# if are two or more zeroes in a sequence, it is
|
||||
# better to start a new run. For example, the sequence
|
||||
# of deltas [15, 15, 0, 15, 15] becomes 6 bytes
|
||||
# (04 0F 0F 00 0F 0F) when storing the zero value
|
||||
# literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
|
||||
# when starting a new run.
|
||||
if (
|
||||
optimizeSize
|
||||
and value == 0
|
||||
and pos + 1 < numDeltas
|
||||
and deltas[pos + 1] == 0
|
||||
):
|
||||
break
|
||||
pos += 1
|
||||
runLength = pos - offset
|
||||
while runLength >= 64:
|
||||
bytearr.append(63)
|
||||
bytearr.extend(array.array("b", deltas[offset : offset + 64]))
|
||||
offset += 64
|
||||
runLength -= 64
|
||||
if runLength:
|
||||
bytearr.append(runLength - 1)
|
||||
bytearr.extend(array.array("b", deltas[offset:pos]))
|
||||
return pos
|
||||
|
||||
@staticmethod
|
||||
def encodeDeltaRunAsWords_(deltas, offset, bytearr, optimizeSize=True):
|
||||
pos = offset
|
||||
numDeltas = len(deltas)
|
||||
while pos < numDeltas:
|
||||
value = deltas[pos]
|
||||
|
||||
# Within a word-encoded run of deltas, it is easiest
|
||||
# to start a new run (with a different encoding)
|
||||
# whenever we encounter a zero value. For example,
|
||||
# the sequence [0x6666, 0, 0x7777] needs 7 bytes when
|
||||
# storing the zero literally (42 66 66 00 00 77 77),
|
||||
# and equally 7 bytes when starting a new run
|
||||
# (40 66 66 80 40 77 77).
|
||||
if optimizeSize and value == 0:
|
||||
break
|
||||
|
||||
# Within a word-encoded run of deltas, a single value
|
||||
# in the range (-128..127) should be encoded literally
|
||||
# because it is more compact. For example, the sequence
|
||||
# [0x6666, 2, 0x7777] becomes 7 bytes when storing
|
||||
# the value literally (42 66 66 00 02 77 77), but 8 bytes
|
||||
# when starting a new run (40 66 66 00 02 40 77 77).
|
||||
if (
|
||||
optimizeSize
|
||||
and (-128 <= value <= 127)
|
||||
and pos + 1 < numDeltas
|
||||
and (-128 <= deltas[pos + 1] <= 127)
|
||||
):
|
||||
break
|
||||
|
||||
if not (-32768 <= value <= 32767):
|
||||
break
|
||||
|
||||
pos += 1
|
||||
runLength = pos - offset
|
||||
while runLength >= 64:
|
||||
bytearr.append(DELTAS_ARE_WORDS | 63)
|
||||
a = array.array("h", deltas[offset : offset + 64])
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
bytearr.extend(a)
|
||||
offset += 64
|
||||
runLength -= 64
|
||||
if runLength:
|
||||
bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
|
||||
a = array.array("h", deltas[offset:pos])
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
bytearr.extend(a)
|
||||
return pos
|
||||
|
||||
@staticmethod
|
||||
def encodeDeltaRunAsLongs_(deltas, offset, bytearr, optimizeSize=True):
|
||||
pos = offset
|
||||
numDeltas = len(deltas)
|
||||
while pos < numDeltas:
|
||||
value = deltas[pos]
|
||||
if optimizeSize and -32768 <= value <= 32767:
|
||||
break
|
||||
pos += 1
|
||||
runLength = pos - offset
|
||||
while runLength >= 64:
|
||||
bytearr.append(DELTAS_ARE_LONGS | 63)
|
||||
a = array.array("i", deltas[offset : offset + 64])
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
bytearr.extend(a)
|
||||
offset += 64
|
||||
runLength -= 64
|
||||
if runLength:
|
||||
bytearr.append(DELTAS_ARE_LONGS | (runLength - 1))
|
||||
a = array.array("i", deltas[offset:pos])
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
bytearr.extend(a)
|
||||
return pos
|
||||
|
||||
@staticmethod
|
||||
def decompileDeltas_(numDeltas, data, offset=0):
|
||||
"""(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
|
||||
result = []
|
||||
pos = offset
|
||||
while len(result) < numDeltas if numDeltas is not None else pos < len(data):
|
||||
runHeader = data[pos]
|
||||
pos += 1
|
||||
numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
|
||||
if (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_ZERO:
|
||||
result.extend([0] * numDeltasInRun)
|
||||
else:
|
||||
if (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_LONGS:
|
||||
deltas = array.array("i")
|
||||
deltasSize = numDeltasInRun * 4
|
||||
elif (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_WORDS:
|
||||
deltas = array.array("h")
|
||||
deltasSize = numDeltasInRun * 2
|
||||
else:
|
||||
deltas = array.array("b")
|
||||
deltasSize = numDeltasInRun
|
||||
deltas.frombytes(data[pos : pos + deltasSize])
|
||||
if sys.byteorder != "big":
|
||||
deltas.byteswap()
|
||||
assert len(deltas) == numDeltasInRun, (len(deltas), numDeltasInRun)
|
||||
pos += deltasSize
|
||||
result.extend(deltas)
|
||||
assert numDeltas is None or len(result) == numDeltas
|
||||
return (result, pos)
|
||||
|
||||
@staticmethod
|
||||
def getTupleSize_(flags, axisCount):
|
||||
size = 4
|
||||
if (flags & EMBEDDED_PEAK_TUPLE) != 0:
|
||||
size += axisCount * 2
|
||||
if (flags & INTERMEDIATE_REGION) != 0:
|
||||
size += axisCount * 4
|
||||
return size
|
||||
|
||||
def getCoordWidth(self):
|
||||
"""Return 2 if coordinates are (x, y) as in gvar, 1 if single values
|
||||
as in cvar, or 0 if empty.
|
||||
"""
|
||||
firstDelta = next((c for c in self.coordinates if c is not None), None)
|
||||
if firstDelta is None:
|
||||
return 0 # empty or has no impact
|
||||
if type(firstDelta) in (int, float):
|
||||
return 1
|
||||
if type(firstDelta) is tuple and len(firstDelta) == 2:
|
||||
return 2
|
||||
raise TypeError(
|
||||
"invalid type of delta; expected (int or float) number, or "
|
||||
"Tuple[number, number]: %r" % firstDelta
|
||||
)
|
||||
|
||||
def scaleDeltas(self, scalar):
|
||||
if scalar == 1.0:
|
||||
return # no change
|
||||
coordWidth = self.getCoordWidth()
|
||||
self.coordinates = [
|
||||
(
|
||||
None
|
||||
if d is None
|
||||
else d * scalar if coordWidth == 1 else (d[0] * scalar, d[1] * scalar)
|
||||
)
|
||||
for d in self.coordinates
|
||||
]
|
||||
|
||||
def roundDeltas(self):
|
||||
coordWidth = self.getCoordWidth()
|
||||
self.coordinates = [
|
||||
(
|
||||
None
|
||||
if d is None
|
||||
else otRound(d) if coordWidth == 1 else (otRound(d[0]), otRound(d[1]))
|
||||
)
|
||||
for d in self.coordinates
|
||||
]
|
||||
|
||||
def calcInferredDeltas(self, origCoords, endPts):
|
||||
from fontTools.varLib.iup import iup_delta
|
||||
|
||||
if self.getCoordWidth() == 1:
|
||||
raise TypeError("Only 'gvar' TupleVariation can have inferred deltas")
|
||||
if None in self.coordinates:
|
||||
if len(self.coordinates) != len(origCoords):
|
||||
raise ValueError(
|
||||
"Expected len(origCoords) == %d; found %d"
|
||||
% (len(self.coordinates), len(origCoords))
|
||||
)
|
||||
self.coordinates = iup_delta(self.coordinates, origCoords, endPts)
|
||||
|
||||
def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False):
|
||||
from fontTools.varLib.iup import iup_delta_optimize
|
||||
|
||||
if None in self.coordinates:
|
||||
return # already optimized
|
||||
|
||||
deltaOpt = iup_delta_optimize(
|
||||
self.coordinates, origCoords, endPts, tolerance=tolerance
|
||||
)
|
||||
if None in deltaOpt:
|
||||
if isComposite and all(d is None for d in deltaOpt):
|
||||
# Fix for macOS composites
|
||||
# https://github.com/fonttools/fonttools/issues/1381
|
||||
deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1)
|
||||
# Use "optimized" version only if smaller...
|
||||
varOpt = TupleVariation(self.axes, deltaOpt)
|
||||
|
||||
# Shouldn't matter that this is different from fvar...?
|
||||
axisTags = sorted(self.axes.keys())
|
||||
tupleData, auxData = self.compile(axisTags)
|
||||
unoptimizedLength = len(tupleData) + len(auxData)
|
||||
tupleData, auxData = varOpt.compile(axisTags)
|
||||
optimizedLength = len(tupleData) + len(auxData)
|
||||
|
||||
if optimizedLength < unoptimizedLength:
|
||||
self.coordinates = varOpt.coordinates
|
||||
|
||||
def __imul__(self, scalar):
|
||||
self.scaleDeltas(scalar)
|
||||
return self
|
||||
|
||||
def __iadd__(self, other):
|
||||
if not isinstance(other, TupleVariation):
|
||||
return NotImplemented
|
||||
deltas1 = self.coordinates
|
||||
length = len(deltas1)
|
||||
deltas2 = other.coordinates
|
||||
if len(deltas2) != length:
|
||||
raise ValueError("cannot sum TupleVariation deltas with different lengths")
|
||||
# 'None' values have different meanings in gvar vs cvar TupleVariations:
|
||||
# within the gvar, when deltas are not provided explicitly for some points,
|
||||
# they need to be inferred; whereas for the 'cvar' table, if deltas are not
|
||||
# provided for some CVT values, then no adjustments are made (i.e. None == 0).
|
||||
# Thus, we cannot sum deltas for gvar TupleVariations if they contain
|
||||
# inferred inferred deltas (the latter need to be computed first using
|
||||
# 'calcInferredDeltas' method), but we can treat 'None' values in cvar
|
||||
# deltas as if they are zeros.
|
||||
if self.getCoordWidth() == 2:
|
||||
for i, d2 in zip(range(length), deltas2):
|
||||
d1 = deltas1[i]
|
||||
try:
|
||||
deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1])
|
||||
except TypeError:
|
||||
raise ValueError("cannot sum gvar deltas with inferred points")
|
||||
else:
|
||||
for i, d2 in zip(range(length), deltas2):
|
||||
d1 = deltas1[i]
|
||||
if d1 is not None and d2 is not None:
|
||||
deltas1[i] = d1 + d2
|
||||
elif d1 is None and d2 is not None:
|
||||
deltas1[i] = d2
|
||||
# elif d2 is None do nothing
|
||||
return self
|
||||
|
||||
|
||||
def decompileSharedTuples(axisTags, sharedTupleCount, data, offset):
|
||||
result = []
|
||||
for _ in range(sharedTupleCount):
|
||||
t, offset = TupleVariation.decompileCoord_(axisTags, data, offset)
|
||||
result.append(t)
|
||||
return result
|
||||
|
||||
|
||||
def compileSharedTuples(
|
||||
axisTags, variations, MAX_NUM_SHARED_COORDS=TUPLE_INDEX_MASK + 1
|
||||
):
|
||||
coordCount = Counter()
|
||||
for var in variations:
|
||||
coord = var.compileCoord(axisTags)
|
||||
coordCount[coord] += 1
|
||||
# In python < 3.7, most_common() ordering is non-deterministic
|
||||
# so apply a sort to make sure the ordering is consistent.
|
||||
sharedCoords = sorted(
|
||||
coordCount.most_common(MAX_NUM_SHARED_COORDS),
|
||||
key=lambda item: (-item[1], item[0]),
|
||||
)
|
||||
return [c[0] for c in sharedCoords if c[1] > 1]
|
||||
|
||||
|
||||
def compileTupleVariationStore(
|
||||
variations,
|
||||
pointCount,
|
||||
axisTags,
|
||||
sharedTupleIndices,
|
||||
useSharedPoints=True,
|
||||
*,
|
||||
optimizeSize=True,
|
||||
):
|
||||
# pointCount is actually unused. Keeping for API compat.
|
||||
del pointCount
|
||||
newVariations = []
|
||||
pointDatas = []
|
||||
# Compile all points and figure out sharing if desired
|
||||
sharedPoints = None
|
||||
|
||||
# Collect, count, and compile point-sets for all variation sets
|
||||
pointSetCount = defaultdict(int)
|
||||
for v in variations:
|
||||
points = v.getUsedPoints()
|
||||
if points is None: # Empty variations
|
||||
continue
|
||||
pointSetCount[points] += 1
|
||||
newVariations.append(v)
|
||||
pointDatas.append(points)
|
||||
variations = newVariations
|
||||
del newVariations
|
||||
|
||||
if not variations:
|
||||
return (0, b"", b"")
|
||||
|
||||
n = len(variations[0].coordinates)
|
||||
assert all(
|
||||
len(v.coordinates) == n for v in variations
|
||||
), "Variation sets have different sizes"
|
||||
|
||||
compiledPoints = {
|
||||
pointSet: TupleVariation.compilePoints(pointSet) for pointSet in pointSetCount
|
||||
}
|
||||
|
||||
tupleVariationCount = len(variations)
|
||||
tuples = []
|
||||
data = []
|
||||
|
||||
if useSharedPoints:
|
||||
# Find point-set which saves most bytes.
|
||||
def key(pn):
|
||||
pointSet = pn[0]
|
||||
count = pn[1]
|
||||
return len(compiledPoints[pointSet]) * (count - 1)
|
||||
|
||||
sharedPoints = max(pointSetCount.items(), key=key)[0]
|
||||
|
||||
data.append(compiledPoints[sharedPoints])
|
||||
tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS
|
||||
|
||||
# b'' implies "use shared points"
|
||||
pointDatas = [
|
||||
compiledPoints[points] if points != sharedPoints else b""
|
||||
for points in pointDatas
|
||||
]
|
||||
|
||||
for v, p in zip(variations, pointDatas):
|
||||
thisTuple, thisData = v.compile(
|
||||
axisTags, sharedTupleIndices, pointData=p, optimizeSize=optimizeSize
|
||||
)
|
||||
|
||||
tuples.append(thisTuple)
|
||||
data.append(thisData)
|
||||
|
||||
tuples = b"".join(tuples)
|
||||
data = b"".join(data)
|
||||
return tupleVariationCount, tuples, data
|
||||
|
||||
|
||||
def decompileTupleVariationStore(
|
||||
tableTag,
|
||||
axisTags,
|
||||
tupleVariationCount,
|
||||
pointCount,
|
||||
sharedTuples,
|
||||
data,
|
||||
pos,
|
||||
dataPos,
|
||||
):
|
||||
numAxes = len(axisTags)
|
||||
result = []
|
||||
if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0:
|
||||
sharedPoints, dataPos = TupleVariation.decompilePoints_(
|
||||
pointCount, data, dataPos, tableTag
|
||||
)
|
||||
else:
|
||||
sharedPoints = []
|
||||
for _ in range(tupleVariationCount & TUPLE_COUNT_MASK):
|
||||
dataSize, flags = struct.unpack(">HH", data[pos : pos + 4])
|
||||
tupleSize = TupleVariation.getTupleSize_(flags, numAxes)
|
||||
tupleData = data[pos : pos + tupleSize]
|
||||
pointDeltaData = data[dataPos : dataPos + dataSize]
|
||||
result.append(
|
||||
decompileTupleVariation_(
|
||||
pointCount,
|
||||
sharedTuples,
|
||||
sharedPoints,
|
||||
tableTag,
|
||||
axisTags,
|
||||
tupleData,
|
||||
pointDeltaData,
|
||||
)
|
||||
)
|
||||
pos += tupleSize
|
||||
dataPos += dataSize
|
||||
return result
|
||||
|
||||
|
||||
def decompileTupleVariation_(
|
||||
pointCount, sharedTuples, sharedPoints, tableTag, axisTags, data, tupleData
|
||||
):
|
||||
assert tableTag in ("cvar", "gvar"), tableTag
|
||||
flags = struct.unpack(">H", data[2:4])[0]
|
||||
pos = 4
|
||||
if (flags & EMBEDDED_PEAK_TUPLE) == 0:
|
||||
peak = sharedTuples[flags & TUPLE_INDEX_MASK]
|
||||
else:
|
||||
peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
|
||||
if (flags & INTERMEDIATE_REGION) != 0:
|
||||
start, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
|
||||
end, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
|
||||
else:
|
||||
start, end = inferRegion_(peak)
|
||||
axes = {}
|
||||
for axis in axisTags:
|
||||
region = start[axis], peak[axis], end[axis]
|
||||
if region != (0.0, 0.0, 0.0):
|
||||
axes[axis] = region
|
||||
pos = 0
|
||||
if (flags & PRIVATE_POINT_NUMBERS) != 0:
|
||||
points, pos = TupleVariation.decompilePoints_(
|
||||
pointCount, tupleData, pos, tableTag
|
||||
)
|
||||
else:
|
||||
points = sharedPoints
|
||||
|
||||
deltas = [None] * pointCount
|
||||
|
||||
if tableTag == "cvar":
|
||||
deltas_cvt, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
|
||||
for p, delta in zip(points, deltas_cvt):
|
||||
if 0 <= p < pointCount:
|
||||
deltas[p] = delta
|
||||
|
||||
elif tableTag == "gvar":
|
||||
deltas_x, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
|
||||
deltas_y, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
|
||||
for p, x, y in zip(points, deltas_x, deltas_y):
|
||||
if 0 <= p < pointCount:
|
||||
deltas[p] = (x, y)
|
||||
|
||||
return TupleVariation(axes, deltas)
|
||||
|
||||
|
||||
def inferRegion_(peak):
|
||||
"""Infer start and end for a (non-intermediate) region
|
||||
|
||||
This helper function computes the applicability region for
|
||||
variation tuples whose INTERMEDIATE_REGION flag is not set in the
|
||||
TupleVariationHeader structure. Variation tuples apply only to
|
||||
certain regions of the variation space; outside that region, the
|
||||
tuple has no effect. To make the binary encoding more compact,
|
||||
TupleVariationHeaders can omit the intermediateStartTuple and
|
||||
intermediateEndTuple fields.
|
||||
"""
|
||||
start, end = {}, {}
|
||||
for axis, value in peak.items():
|
||||
start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
|
||||
end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
|
||||
return (start, end)
|
||||
@@ -0,0 +1,12 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_V_A_R_C_(BaseTTXConverter):
|
||||
"""Variable Components table
|
||||
|
||||
The ``VARC`` table contains variation information for composite glyphs.
|
||||
|
||||
See also https://github.com/harfbuzz/boring-expansion-spec/blob/main/VARC.md
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,249 @@
|
||||
from . import DefaultTable
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
import struct
|
||||
|
||||
VDMX_HeaderFmt = """
|
||||
> # big endian
|
||||
version: H # Version number (0 or 1)
|
||||
numRecs: H # Number of VDMX groups present
|
||||
numRatios: H # Number of aspect ratio groupings
|
||||
"""
|
||||
# the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect
|
||||
# ratio ranges);
|
||||
VDMX_RatRangeFmt = """
|
||||
> # big endian
|
||||
bCharSet: B # Character set
|
||||
xRatio: B # Value to use for x-Ratio
|
||||
yStartRatio: B # Starting y-Ratio value
|
||||
yEndRatio: B # Ending y-Ratio value
|
||||
"""
|
||||
# followed by an array of offset[numRatios] from start of VDMX table to the
|
||||
# VDMX Group for this ratio range (offsets will be re-calculated on compile);
|
||||
# followed by an array of Group[numRecs] records;
|
||||
VDMX_GroupFmt = """
|
||||
> # big endian
|
||||
recs: H # Number of height records in this group
|
||||
startsz: B # Starting yPelHeight
|
||||
endsz: B # Ending yPelHeight
|
||||
"""
|
||||
# followed by an array of vTable[recs] records.
|
||||
VDMX_vTableFmt = """
|
||||
> # big endian
|
||||
yPelHeight: H # yPelHeight to which values apply
|
||||
yMax: h # Maximum value (in pels) for this yPelHeight
|
||||
yMin: h # Minimum value (in pels) for this yPelHeight
|
||||
"""
|
||||
|
||||
|
||||
class table_V_D_M_X_(DefaultTable.DefaultTable):
|
||||
"""Vertical Device Metrics table
|
||||
|
||||
The ``VDMX`` table records changes to the vertical glyph minima
|
||||
and maxima that result from Truetype instructions.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/vdmx
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
pos = 0 # track current position from to start of VDMX table
|
||||
dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self)
|
||||
pos += sstruct.calcsize(VDMX_HeaderFmt)
|
||||
self.ratRanges = []
|
||||
for i in range(self.numRatios):
|
||||
ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data)
|
||||
pos += sstruct.calcsize(VDMX_RatRangeFmt)
|
||||
# the mapping between a ratio and a group is defined further below
|
||||
ratio["groupIndex"] = None
|
||||
self.ratRanges.append(ratio)
|
||||
lenOffset = struct.calcsize(">H")
|
||||
_offsets = [] # temporarily store offsets to groups
|
||||
for i in range(self.numRatios):
|
||||
offset = struct.unpack(">H", data[0:lenOffset])[0]
|
||||
data = data[lenOffset:]
|
||||
pos += lenOffset
|
||||
_offsets.append(offset)
|
||||
self.groups = []
|
||||
for groupIndex in range(self.numRecs):
|
||||
# the offset to this group from beginning of the VDMX table
|
||||
currOffset = pos
|
||||
group, data = sstruct.unpack2(VDMX_GroupFmt, data)
|
||||
# the group lenght and bounding sizes are re-calculated on compile
|
||||
recs = group.pop("recs")
|
||||
startsz = group.pop("startsz")
|
||||
endsz = group.pop("endsz")
|
||||
pos += sstruct.calcsize(VDMX_GroupFmt)
|
||||
for j in range(recs):
|
||||
vTable, data = sstruct.unpack2(VDMX_vTableFmt, data)
|
||||
vTableLength = sstruct.calcsize(VDMX_vTableFmt)
|
||||
pos += vTableLength
|
||||
# group is a dict of (yMax, yMin) tuples keyed by yPelHeight
|
||||
group[vTable["yPelHeight"]] = (vTable["yMax"], vTable["yMin"])
|
||||
# make sure startsz and endsz match the calculated values
|
||||
minSize = min(group.keys())
|
||||
maxSize = max(group.keys())
|
||||
assert (
|
||||
startsz == minSize
|
||||
), "startsz (%s) must equal min yPelHeight (%s): group %d" % (
|
||||
group.startsz,
|
||||
minSize,
|
||||
groupIndex,
|
||||
)
|
||||
assert (
|
||||
endsz == maxSize
|
||||
), "endsz (%s) must equal max yPelHeight (%s): group %d" % (
|
||||
group.endsz,
|
||||
maxSize,
|
||||
groupIndex,
|
||||
)
|
||||
self.groups.append(group)
|
||||
# match the defined offsets with the current group's offset
|
||||
for offsetIndex, offsetValue in enumerate(_offsets):
|
||||
# when numRecs < numRatios there can more than one ratio range
|
||||
# sharing the same VDMX group
|
||||
if currOffset == offsetValue:
|
||||
# map the group with the ratio range thas has the same
|
||||
# index as the offset to that group (it took me a while..)
|
||||
self.ratRanges[offsetIndex]["groupIndex"] = groupIndex
|
||||
# check that all ratio ranges have a group
|
||||
for i in range(self.numRatios):
|
||||
ratio = self.ratRanges[i]
|
||||
if ratio["groupIndex"] is None:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("no group defined for ratRange %d" % i)
|
||||
|
||||
def _getOffsets(self):
|
||||
"""
|
||||
Calculate offsets to VDMX_Group records.
|
||||
For each ratRange return a list of offset values from the beginning of
|
||||
the VDMX table to a VDMX_Group.
|
||||
"""
|
||||
lenHeader = sstruct.calcsize(VDMX_HeaderFmt)
|
||||
lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt)
|
||||
lenOffset = struct.calcsize(">H")
|
||||
lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt)
|
||||
lenVTable = sstruct.calcsize(VDMX_vTableFmt)
|
||||
# offset to the first group
|
||||
pos = lenHeader + self.numRatios * lenRatRange + self.numRatios * lenOffset
|
||||
groupOffsets = []
|
||||
for group in self.groups:
|
||||
groupOffsets.append(pos)
|
||||
lenGroup = lenGroupHeader + len(group) * lenVTable
|
||||
pos += lenGroup # offset to next group
|
||||
offsets = []
|
||||
for ratio in self.ratRanges:
|
||||
groupIndex = ratio["groupIndex"]
|
||||
offsets.append(groupOffsets[groupIndex])
|
||||
return offsets
|
||||
|
||||
def compile(self, ttFont):
|
||||
if not (self.version == 0 or self.version == 1):
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError(
|
||||
"unknown format for VDMX table: version %s" % self.version
|
||||
)
|
||||
data = sstruct.pack(VDMX_HeaderFmt, self)
|
||||
for ratio in self.ratRanges:
|
||||
data += sstruct.pack(VDMX_RatRangeFmt, ratio)
|
||||
# recalculate offsets to VDMX groups
|
||||
for offset in self._getOffsets():
|
||||
data += struct.pack(">H", offset)
|
||||
for group in self.groups:
|
||||
recs = len(group)
|
||||
startsz = min(group.keys())
|
||||
endsz = max(group.keys())
|
||||
gHeader = {"recs": recs, "startsz": startsz, "endsz": endsz}
|
||||
data += sstruct.pack(VDMX_GroupFmt, gHeader)
|
||||
for yPelHeight, (yMax, yMin) in sorted(group.items()):
|
||||
vTable = {"yPelHeight": yPelHeight, "yMax": yMax, "yMin": yMin}
|
||||
data += sstruct.pack(VDMX_vTableFmt, vTable)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
writer.begintag("ratRanges")
|
||||
writer.newline()
|
||||
for ratio in self.ratRanges:
|
||||
groupIndex = ratio["groupIndex"]
|
||||
writer.simpletag(
|
||||
"ratRange",
|
||||
bCharSet=ratio["bCharSet"],
|
||||
xRatio=ratio["xRatio"],
|
||||
yStartRatio=ratio["yStartRatio"],
|
||||
yEndRatio=ratio["yEndRatio"],
|
||||
groupIndex=groupIndex,
|
||||
)
|
||||
writer.newline()
|
||||
writer.endtag("ratRanges")
|
||||
writer.newline()
|
||||
writer.begintag("groups")
|
||||
writer.newline()
|
||||
for groupIndex in range(self.numRecs):
|
||||
group = self.groups[groupIndex]
|
||||
recs = len(group)
|
||||
startsz = min(group.keys())
|
||||
endsz = max(group.keys())
|
||||
writer.begintag("group", index=groupIndex)
|
||||
writer.newline()
|
||||
writer.comment("recs=%d, startsz=%d, endsz=%d" % (recs, startsz, endsz))
|
||||
writer.newline()
|
||||
for yPelHeight, (yMax, yMin) in sorted(group.items()):
|
||||
writer.simpletag(
|
||||
"record",
|
||||
[("yPelHeight", yPelHeight), ("yMax", yMax), ("yMin", yMin)],
|
||||
)
|
||||
writer.newline()
|
||||
writer.endtag("group")
|
||||
writer.newline()
|
||||
writer.endtag("groups")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = safeEval(attrs["value"])
|
||||
elif name == "ratRanges":
|
||||
if not hasattr(self, "ratRanges"):
|
||||
self.ratRanges = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "ratRange":
|
||||
if not hasattr(self, "numRatios"):
|
||||
self.numRatios = 1
|
||||
else:
|
||||
self.numRatios += 1
|
||||
ratio = {
|
||||
"bCharSet": safeEval(attrs["bCharSet"]),
|
||||
"xRatio": safeEval(attrs["xRatio"]),
|
||||
"yStartRatio": safeEval(attrs["yStartRatio"]),
|
||||
"yEndRatio": safeEval(attrs["yEndRatio"]),
|
||||
"groupIndex": safeEval(attrs["groupIndex"]),
|
||||
}
|
||||
self.ratRanges.append(ratio)
|
||||
elif name == "groups":
|
||||
if not hasattr(self, "groups"):
|
||||
self.groups = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "group":
|
||||
if not hasattr(self, "numRecs"):
|
||||
self.numRecs = 1
|
||||
else:
|
||||
self.numRecs += 1
|
||||
group = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "record":
|
||||
yPelHeight = safeEval(attrs["yPelHeight"])
|
||||
yMax = safeEval(attrs["yMax"])
|
||||
yMin = safeEval(attrs["yMin"])
|
||||
group[yPelHeight] = (yMax, yMin)
|
||||
self.groups.append(group)
|
||||
@@ -0,0 +1,165 @@
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
|
||||
class table_V_O_R_G_(DefaultTable.DefaultTable):
|
||||
"""Vertical Origin table
|
||||
|
||||
The ``VORG`` table contains the vertical origin of each glyph
|
||||
in a `CFF` or `CFF2` font.
|
||||
|
||||
This table is structured so that you can treat it like a dictionary keyed by glyph name.
|
||||
|
||||
``ttFont['VORG'][<glyphName>]`` will return the vertical origin for any glyph.
|
||||
|
||||
``ttFont['VORG'][<glyphName>] = <value>`` will set the vertical origin for any glyph.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/vorg
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.getGlyphName = (
|
||||
ttFont.getGlyphName
|
||||
) # for use in get/set item functions, for access by GID
|
||||
(
|
||||
self.majorVersion,
|
||||
self.minorVersion,
|
||||
self.defaultVertOriginY,
|
||||
self.numVertOriginYMetrics,
|
||||
) = struct.unpack(">HHhH", data[:8])
|
||||
assert (
|
||||
self.majorVersion <= 1
|
||||
), "Major version of VORG table is higher than I know how to handle"
|
||||
data = data[8:]
|
||||
vids = []
|
||||
gids = []
|
||||
pos = 0
|
||||
for i in range(self.numVertOriginYMetrics):
|
||||
gid, vOrigin = struct.unpack(">Hh", data[pos : pos + 4])
|
||||
pos += 4
|
||||
gids.append(gid)
|
||||
vids.append(vOrigin)
|
||||
|
||||
self.VOriginRecords = vOrig = {}
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
try:
|
||||
names = [glyphOrder[gid] for gid in gids]
|
||||
except IndexError:
|
||||
getGlyphName = self.getGlyphName
|
||||
names = map(getGlyphName, gids)
|
||||
|
||||
for name, vid in zip(names, vids):
|
||||
vOrig[name] = vid
|
||||
|
||||
def compile(self, ttFont):
|
||||
vorgs = list(self.VOriginRecords.values())
|
||||
names = list(self.VOriginRecords.keys())
|
||||
nameMap = ttFont.getReverseGlyphMap()
|
||||
try:
|
||||
gids = [nameMap[name] for name in names]
|
||||
except KeyError:
|
||||
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
|
||||
gids = [nameMap[name] for name in names]
|
||||
vOriginTable = list(zip(gids, vorgs))
|
||||
self.numVertOriginYMetrics = len(vorgs)
|
||||
vOriginTable.sort() # must be in ascending GID order
|
||||
dataList = [struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable]
|
||||
header = struct.pack(
|
||||
">HHhH",
|
||||
self.majorVersion,
|
||||
self.minorVersion,
|
||||
self.defaultVertOriginY,
|
||||
self.numVertOriginYMetrics,
|
||||
)
|
||||
dataList.insert(0, header)
|
||||
data = bytesjoin(dataList)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("majorVersion", value=self.majorVersion)
|
||||
writer.newline()
|
||||
writer.simpletag("minorVersion", value=self.minorVersion)
|
||||
writer.newline()
|
||||
writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY)
|
||||
writer.newline()
|
||||
writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics)
|
||||
writer.newline()
|
||||
vOriginTable = []
|
||||
glyphNames = self.VOriginRecords.keys()
|
||||
for glyphName in glyphNames:
|
||||
try:
|
||||
gid = ttFont.getGlyphID(glyphName)
|
||||
except:
|
||||
assert 0, (
|
||||
"VORG table contains a glyph name not in ttFont.getGlyphNames(): "
|
||||
+ str(glyphName)
|
||||
)
|
||||
vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]])
|
||||
vOriginTable.sort()
|
||||
for entry in vOriginTable:
|
||||
vOriginRec = VOriginRecord(entry[1], entry[2])
|
||||
vOriginRec.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "VOriginRecords"):
|
||||
self.VOriginRecords = {}
|
||||
self.getGlyphName = (
|
||||
ttFont.getGlyphName
|
||||
) # for use in get/set item functions, for access by GID
|
||||
if name == "VOriginRecord":
|
||||
vOriginRec = VOriginRecord()
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
vOriginRec.fromXML(name, attrs, content, ttFont)
|
||||
self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin
|
||||
elif "value" in attrs:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
def __getitem__(self, glyphSelector):
|
||||
if isinstance(glyphSelector, int):
|
||||
# its a gid, convert to glyph name
|
||||
glyphSelector = self.getGlyphName(glyphSelector)
|
||||
|
||||
if glyphSelector not in self.VOriginRecords:
|
||||
return self.defaultVertOriginY
|
||||
|
||||
return self.VOriginRecords[glyphSelector]
|
||||
|
||||
def __setitem__(self, glyphSelector, value):
|
||||
if isinstance(glyphSelector, int):
|
||||
# its a gid, convert to glyph name
|
||||
glyphSelector = self.getGlyphName(glyphSelector)
|
||||
|
||||
if value != self.defaultVertOriginY:
|
||||
self.VOriginRecords[glyphSelector] = value
|
||||
elif glyphSelector in self.VOriginRecords:
|
||||
del self.VOriginRecords[glyphSelector]
|
||||
|
||||
def __delitem__(self, glyphSelector):
|
||||
del self.VOriginRecords[glyphSelector]
|
||||
|
||||
|
||||
class VOriginRecord(object):
|
||||
def __init__(self, name=None, vOrigin=None):
|
||||
self.glyphName = name
|
||||
self.vOrigin = vOrigin
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("VOriginRecord")
|
||||
writer.newline()
|
||||
writer.simpletag("glyphName", value=self.glyphName)
|
||||
writer.newline()
|
||||
writer.simpletag("vOrigin", value=self.vOrigin)
|
||||
writer.newline()
|
||||
writer.endtag("VOriginRecord")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
if name == "glyphName":
|
||||
setattr(self, name, value)
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
@@ -0,0 +1,13 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_V_V_A_R_(BaseTTXConverter):
|
||||
"""Vertical Metrics Variations table
|
||||
|
||||
The ``VVAR`` table contains variation data for per-glyph vertical metrics
|
||||
in a variable font.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/vvar
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,98 @@
|
||||
# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.
|
||||
def _moduleFinderHint():
|
||||
"""Dummy function to let modulefinder know what tables may be
|
||||
dynamically imported. Generated by MetaTools/buildTableList.py.
|
||||
|
||||
>>> _moduleFinderHint()
|
||||
"""
|
||||
from . import B_A_S_E_
|
||||
from . import C_B_D_T_
|
||||
from . import C_B_L_C_
|
||||
from . import C_F_F_
|
||||
from . import C_F_F__2
|
||||
from . import C_O_L_R_
|
||||
from . import C_P_A_L_
|
||||
from . import D_S_I_G_
|
||||
from . import D__e_b_g
|
||||
from . import E_B_D_T_
|
||||
from . import E_B_L_C_
|
||||
from . import F_F_T_M_
|
||||
from . import F__e_a_t
|
||||
from . import G_D_E_F_
|
||||
from . import G_M_A_P_
|
||||
from . import G_P_K_G_
|
||||
from . import G_P_O_S_
|
||||
from . import G_S_U_B_
|
||||
from . import G_V_A_R_
|
||||
from . import G__l_a_t
|
||||
from . import G__l_o_c
|
||||
from . import H_V_A_R_
|
||||
from . import J_S_T_F_
|
||||
from . import L_T_S_H_
|
||||
from . import M_A_T_H_
|
||||
from . import M_E_T_A_
|
||||
from . import M_V_A_R_
|
||||
from . import O_S_2f_2
|
||||
from . import S_I_N_G_
|
||||
from . import S_T_A_T_
|
||||
from . import S_V_G_
|
||||
from . import S__i_l_f
|
||||
from . import S__i_l_l
|
||||
from . import T_S_I_B_
|
||||
from . import T_S_I_C_
|
||||
from . import T_S_I_D_
|
||||
from . import T_S_I_J_
|
||||
from . import T_S_I_P_
|
||||
from . import T_S_I_S_
|
||||
from . import T_S_I_V_
|
||||
from . import T_S_I__0
|
||||
from . import T_S_I__1
|
||||
from . import T_S_I__2
|
||||
from . import T_S_I__3
|
||||
from . import T_S_I__5
|
||||
from . import T_T_F_A_
|
||||
from . import V_A_R_C_
|
||||
from . import V_D_M_X_
|
||||
from . import V_O_R_G_
|
||||
from . import V_V_A_R_
|
||||
from . import _a_n_k_r
|
||||
from . import _a_v_a_r
|
||||
from . import _b_s_l_n
|
||||
from . import _c_i_d_g
|
||||
from . import _c_m_a_p
|
||||
from . import _c_v_a_r
|
||||
from . import _c_v_t
|
||||
from . import _f_e_a_t
|
||||
from . import _f_p_g_m
|
||||
from . import _f_v_a_r
|
||||
from . import _g_a_s_p
|
||||
from . import _g_c_i_d
|
||||
from . import _g_l_y_f
|
||||
from . import _g_v_a_r
|
||||
from . import _h_d_m_x
|
||||
from . import _h_e_a_d
|
||||
from . import _h_h_e_a
|
||||
from . import _h_m_t_x
|
||||
from . import _k_e_r_n
|
||||
from . import _l_c_a_r
|
||||
from . import _l_o_c_a
|
||||
from . import _l_t_a_g
|
||||
from . import _m_a_x_p
|
||||
from . import _m_e_t_a
|
||||
from . import _m_o_r_t
|
||||
from . import _m_o_r_x
|
||||
from . import _n_a_m_e
|
||||
from . import _o_p_b_d
|
||||
from . import _p_o_s_t
|
||||
from . import _p_r_e_p
|
||||
from . import _p_r_o_p
|
||||
from . import _s_b_i_x
|
||||
from . import _t_r_a_k
|
||||
from . import _v_h_e_a
|
||||
from . import _v_m_t_x
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
@@ -0,0 +1,15 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table__a_n_k_r(BaseTTXConverter):
|
||||
"""Anchor Point table
|
||||
|
||||
The anchor point table provides a way to define anchor points.
|
||||
These are points within the coordinate space of a given glyph,
|
||||
independent of the control points used to render the glyph.
|
||||
Anchor points are used in conjunction with the ``kerx`` table.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,193 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import (
|
||||
fixedToFloat as fi2fl,
|
||||
floatToFixed as fl2fi,
|
||||
floatToFixedToStr as fl2str,
|
||||
strToFixedToFloat as str2fl,
|
||||
)
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools.varLib.models import piecewiseLinearMap
|
||||
from fontTools.varLib.varStore import VarStoreInstancer, NO_VARIATION_INDEX
|
||||
from fontTools.ttLib import TTLibError
|
||||
from . import DefaultTable
|
||||
from . import otTables
|
||||
import struct
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table__a_v_a_r(BaseTTXConverter):
|
||||
"""Axis Variations table
|
||||
|
||||
This class represents the ``avar`` table of a variable font. The object has one
|
||||
substantive attribute, ``segments``, which maps axis tags to a segments dictionary::
|
||||
|
||||
>>> font["avar"].segments # doctest: +SKIP
|
||||
{'wght': {-1.0: -1.0,
|
||||
0.0: 0.0,
|
||||
0.125: 0.11444091796875,
|
||||
0.25: 0.23492431640625,
|
||||
0.5: 0.35540771484375,
|
||||
0.625: 0.5,
|
||||
0.75: 0.6566162109375,
|
||||
0.875: 0.81927490234375,
|
||||
1.0: 1.0},
|
||||
'ital': {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}}
|
||||
|
||||
Notice that the segments dictionary is made up of normalized values. A valid
|
||||
``avar`` segment mapping must contain the entries ``-1.0: -1.0, 0.0: 0.0, 1.0: 1.0``.
|
||||
fontTools does not enforce this, so it is your responsibility to ensure that
|
||||
mappings are valid.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/avar
|
||||
"""
|
||||
|
||||
dependencies = ["fvar"]
|
||||
|
||||
def __init__(self, tag=None):
|
||||
super().__init__(tag)
|
||||
self.segments = {}
|
||||
|
||||
def compile(self, ttFont):
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
if not hasattr(self, "table"):
|
||||
self.table = otTables.avar()
|
||||
if not hasattr(self.table, "Reserved"):
|
||||
self.table.Reserved = 0
|
||||
self.table.Version = (getattr(self, "majorVersion", 1) << 16) | getattr(
|
||||
self, "minorVersion", 0
|
||||
)
|
||||
self.table.AxisCount = len(axisTags)
|
||||
self.table.AxisSegmentMap = []
|
||||
for axis in axisTags:
|
||||
mappings = self.segments[axis]
|
||||
segmentMap = otTables.AxisSegmentMap()
|
||||
segmentMap.PositionMapCount = len(mappings)
|
||||
segmentMap.AxisValueMap = []
|
||||
for key, value in sorted(mappings.items()):
|
||||
valueMap = otTables.AxisValueMap()
|
||||
valueMap.FromCoordinate = key
|
||||
valueMap.ToCoordinate = value
|
||||
segmentMap.AxisValueMap.append(valueMap)
|
||||
self.table.AxisSegmentMap.append(segmentMap)
|
||||
return super().compile(ttFont)
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
super().decompile(data, ttFont)
|
||||
self.majorVersion = self.table.Version >> 16
|
||||
self.minorVersion = self.table.Version & 0xFFFF
|
||||
if self.majorVersion not in (1, 2):
|
||||
raise NotImplementedError("Unknown avar table version")
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
for axis in axisTags:
|
||||
self.segments[axis] = {}
|
||||
for axis, segmentMap in zip(axisTags, self.table.AxisSegmentMap):
|
||||
segments = self.segments[axis] = {}
|
||||
for segment in segmentMap.AxisValueMap:
|
||||
segments[segment.FromCoordinate] = segment.ToCoordinate
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag(
|
||||
"version",
|
||||
major=getattr(self, "majorVersion", 1),
|
||||
minor=getattr(self, "minorVersion", 0),
|
||||
)
|
||||
writer.newline()
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
for axis in axisTags:
|
||||
writer.begintag("segment", axis=axis)
|
||||
writer.newline()
|
||||
for key, value in sorted(self.segments[axis].items()):
|
||||
key = fl2str(key, 14)
|
||||
value = fl2str(value, 14)
|
||||
writer.simpletag("mapping", **{"from": key, "to": value})
|
||||
writer.newline()
|
||||
writer.endtag("segment")
|
||||
writer.newline()
|
||||
if getattr(self, "majorVersion", 1) >= 2:
|
||||
if self.table.VarIdxMap:
|
||||
self.table.VarIdxMap.toXML(writer, ttFont, name="VarIdxMap")
|
||||
if self.table.VarStore:
|
||||
self.table.VarStore.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "table"):
|
||||
self.table = otTables.avar()
|
||||
if not hasattr(self.table, "Reserved"):
|
||||
self.table.Reserved = 0
|
||||
if name == "version":
|
||||
self.majorVersion = safeEval(attrs["major"])
|
||||
self.minorVersion = safeEval(attrs["minor"])
|
||||
self.table.Version = (getattr(self, "majorVersion", 1) << 16) | getattr(
|
||||
self, "minorVersion", 0
|
||||
)
|
||||
elif name == "segment":
|
||||
axis = attrs["axis"]
|
||||
segment = self.segments[axis] = {}
|
||||
for element in content:
|
||||
if isinstance(element, tuple):
|
||||
elementName, elementAttrs, _ = element
|
||||
if elementName == "mapping":
|
||||
fromValue = str2fl(elementAttrs["from"], 14)
|
||||
toValue = str2fl(elementAttrs["to"], 14)
|
||||
if fromValue in segment:
|
||||
log.warning(
|
||||
"duplicate entry for %s in axis '%s'", fromValue, axis
|
||||
)
|
||||
segment[fromValue] = toValue
|
||||
else:
|
||||
super().fromXML(name, attrs, content, ttFont)
|
||||
|
||||
def renormalizeLocation(self, location, font, dropZeroes=True):
|
||||
|
||||
majorVersion = getattr(self, "majorVersion", 1)
|
||||
|
||||
if majorVersion not in (1, 2):
|
||||
raise NotImplementedError("Unknown avar table version")
|
||||
|
||||
avarSegments = self.segments
|
||||
mappedLocation = {}
|
||||
for axisTag, value in location.items():
|
||||
avarMapping = avarSegments.get(axisTag, None)
|
||||
if avarMapping is not None:
|
||||
value = piecewiseLinearMap(value, avarMapping)
|
||||
mappedLocation[axisTag] = value
|
||||
|
||||
if majorVersion < 2:
|
||||
return mappedLocation
|
||||
|
||||
# Version 2
|
||||
|
||||
varIdxMap = self.table.VarIdxMap
|
||||
varStore = self.table.VarStore
|
||||
axes = font["fvar"].axes
|
||||
if varStore is not None:
|
||||
instancer = VarStoreInstancer(varStore, axes, mappedLocation)
|
||||
|
||||
coords = list(fl2fi(mappedLocation.get(axis.axisTag, 0), 14) for axis in axes)
|
||||
|
||||
out = []
|
||||
for varIdx, v in enumerate(coords):
|
||||
|
||||
if varIdxMap is not None:
|
||||
varIdx = varIdxMap[varIdx]
|
||||
|
||||
if varStore is not None:
|
||||
delta = instancer[varIdx]
|
||||
v += otRound(delta)
|
||||
v = min(max(v, -(1 << 14)), +(1 << 14))
|
||||
|
||||
out.append(v)
|
||||
|
||||
mappedLocation = {
|
||||
axis.axisTag: fi2fl(v, 14)
|
||||
for v, axis in zip(out, axes)
|
||||
if v != 0 or not dropZeroes
|
||||
}
|
||||
|
||||
return mappedLocation
|
||||
@@ -0,0 +1,15 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html
|
||||
class table__b_s_l_n(BaseTTXConverter):
|
||||
"""Baseline table
|
||||
|
||||
The AAT ``bsln`` table is similar in purpose to the OpenType ``BASE``
|
||||
table; it stores per-script baselines to support automatic alignment
|
||||
of lines of text.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,24 @@
|
||||
# coding: utf-8
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table__c_i_d_g(BaseTTXConverter):
|
||||
"""CID to Glyph ID table
|
||||
|
||||
The AAT ``cidg`` table has almost the same structure as ``gidc``,
|
||||
just mapping CIDs to GlyphIDs instead of the reverse direction.
|
||||
|
||||
It is useful for fonts that may be used by a PDF renderer in lieu of
|
||||
a font reference with a known glyph collection but no subsetted
|
||||
glyphs. For instance, a PDF can say “please use a font conforming
|
||||
to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is,
|
||||
say, a TrueType font. ``gidc`` is lossy for this purpose and is
|
||||
obsoleted by ``cidg``.
|
||||
|
||||
For example, the first font in ``/System/Library/Fonts/PingFang.ttc``
|
||||
(which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gcid.html
|
||||
"""
|
||||
|
||||
pass
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,94 @@
|
||||
from . import DefaultTable
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytesjoin
|
||||
from fontTools.ttLib.tables.TupleVariation import (
|
||||
compileTupleVariationStore,
|
||||
decompileTupleVariationStore,
|
||||
TupleVariation,
|
||||
)
|
||||
|
||||
|
||||
# https://www.microsoft.com/typography/otspec/cvar.htm
|
||||
# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html
|
||||
|
||||
CVAR_HEADER_FORMAT = """
|
||||
> # big endian
|
||||
majorVersion: H
|
||||
minorVersion: H
|
||||
tupleVariationCount: H
|
||||
offsetToData: H
|
||||
"""
|
||||
|
||||
CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT)
|
||||
|
||||
|
||||
class table__c_v_a_r(DefaultTable.DefaultTable):
|
||||
"""Control Value Table (CVT) variations table
|
||||
|
||||
The ``cvar`` table contains variations for the values in a ``cvt``
|
||||
table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/cvar
|
||||
"""
|
||||
|
||||
dependencies = ["cvt ", "fvar"]
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.majorVersion, self.minorVersion = 1, 0
|
||||
self.variations = []
|
||||
|
||||
def compile(self, ttFont, useSharedPoints=False):
|
||||
tupleVariationCount, tuples, data = compileTupleVariationStore(
|
||||
variations=[v for v in self.variations if v.hasImpact()],
|
||||
pointCount=len(ttFont["cvt "].values),
|
||||
axisTags=[axis.axisTag for axis in ttFont["fvar"].axes],
|
||||
sharedTupleIndices={},
|
||||
useSharedPoints=useSharedPoints,
|
||||
)
|
||||
header = {
|
||||
"majorVersion": self.majorVersion,
|
||||
"minorVersion": self.minorVersion,
|
||||
"tupleVariationCount": tupleVariationCount,
|
||||
"offsetToData": CVAR_HEADER_SIZE + len(tuples),
|
||||
}
|
||||
return b"".join([sstruct.pack(CVAR_HEADER_FORMAT, header), tuples, data])
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
header = {}
|
||||
sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header)
|
||||
self.majorVersion = header["majorVersion"]
|
||||
self.minorVersion = header["minorVersion"]
|
||||
assert self.majorVersion == 1, self.majorVersion
|
||||
self.variations = decompileTupleVariationStore(
|
||||
tableTag=self.tableTag,
|
||||
axisTags=axisTags,
|
||||
tupleVariationCount=header["tupleVariationCount"],
|
||||
pointCount=len(ttFont["cvt "].values),
|
||||
sharedTuples=None,
|
||||
data=data,
|
||||
pos=CVAR_HEADER_SIZE,
|
||||
dataPos=header["offsetToData"],
|
||||
)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.majorVersion = int(attrs.get("major", "1"))
|
||||
self.minorVersion = int(attrs.get("minor", "0"))
|
||||
elif name == "tuple":
|
||||
valueCount = len(ttFont["cvt "].values)
|
||||
var = TupleVariation({}, [None] * valueCount)
|
||||
self.variations.append(var)
|
||||
for tupleElement in content:
|
||||
if isinstance(tupleElement, tuple):
|
||||
tupleName, tupleAttrs, tupleContent = tupleElement
|
||||
var.fromXML(tupleName, tupleAttrs, tupleContent)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
writer.simpletag("version", major=self.majorVersion, minor=self.minorVersion)
|
||||
writer.newline()
|
||||
for var in self.variations:
|
||||
var.toXML(writer, axisTags)
|
||||
@@ -0,0 +1,56 @@
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import array
|
||||
|
||||
|
||||
class table__c_v_t(DefaultTable.DefaultTable):
|
||||
"""Control Value Table
|
||||
|
||||
The Control Value Table holds a list of values that can be referenced
|
||||
by TrueType font instructions.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/cvt
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
values = array.array("h")
|
||||
values.frombytes(data)
|
||||
if sys.byteorder != "big":
|
||||
values.byteswap()
|
||||
self.values = values
|
||||
|
||||
def compile(self, ttFont):
|
||||
if not hasattr(self, "values"):
|
||||
return b""
|
||||
values = self.values[:]
|
||||
if sys.byteorder != "big":
|
||||
values.byteswap()
|
||||
return values.tobytes()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
for i, value in enumerate(self.values):
|
||||
writer.simpletag("cv", value=value, index=i)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "values"):
|
||||
self.values = array.array("h")
|
||||
if name == "cv":
|
||||
index = safeEval(attrs["index"])
|
||||
value = safeEval(attrs["value"])
|
||||
for i in range(1 + index - len(self.values)):
|
||||
self.values.append(0)
|
||||
self.values[index] = value
|
||||
|
||||
def __len__(self):
|
||||
return len(self.values)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.values[index]
|
||||
|
||||
def __setitem__(self, index, value):
|
||||
self.values[index] = value
|
||||
|
||||
def __delitem__(self, index):
|
||||
del self.values[index]
|
||||
@@ -0,0 +1,15 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table__f_e_a_t(BaseTTXConverter):
|
||||
"""Feature name table
|
||||
|
||||
The feature name table is an AAT (Apple Advanced Typography) table for
|
||||
storing font features, settings, and their human-readable names. It should
|
||||
not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS``
|
||||
tables.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6feat.html
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,62 @@
|
||||
from . import DefaultTable
|
||||
from . import ttProgram
|
||||
|
||||
|
||||
class table__f_p_g_m(DefaultTable.DefaultTable):
|
||||
"""Font Program table
|
||||
|
||||
The ``fpgm`` table typically contains function defintions that are
|
||||
used by font instructions. This Font Program is similar to the Control
|
||||
Value Program that is stored in the ``prep`` table, but
|
||||
the ``fpgm`` table is only executed one time, when the font is first
|
||||
used.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/fpgm
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
program = ttProgram.Program()
|
||||
program.fromBytecode(data)
|
||||
self.program = program
|
||||
|
||||
def compile(self, ttFont):
|
||||
if hasattr(self, "program"):
|
||||
return self.program.getBytecode()
|
||||
return b""
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
self.program.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
program = ttProgram.Program()
|
||||
program.fromXML(name, attrs, content, ttFont)
|
||||
self.program = program
|
||||
|
||||
def __bool__(self):
|
||||
"""
|
||||
>>> fpgm = table__f_p_g_m()
|
||||
>>> bool(fpgm)
|
||||
False
|
||||
>>> p = ttProgram.Program()
|
||||
>>> fpgm.program = p
|
||||
>>> bool(fpgm)
|
||||
False
|
||||
>>> bc = bytearray([0])
|
||||
>>> p.fromBytecode(bc)
|
||||
>>> bool(fpgm)
|
||||
True
|
||||
>>> p.bytecode.pop()
|
||||
0
|
||||
>>> bool(fpgm)
|
||||
False
|
||||
"""
|
||||
return hasattr(self, "program") and bool(self.program)
|
||||
|
||||
__nonzero__ = __bool__
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
@@ -0,0 +1,261 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import (
|
||||
fixedToFloat as fi2fl,
|
||||
floatToFixed as fl2fi,
|
||||
floatToFixedToStr as fl2str,
|
||||
strToFixedToFloat as str2fl,
|
||||
)
|
||||
from fontTools.misc.textTools import Tag, bytesjoin, safeEval
|
||||
from fontTools.ttLib import TTLibError
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
|
||||
# Apple's documentation of 'fvar':
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6fvar.html
|
||||
|
||||
FVAR_HEADER_FORMAT = """
|
||||
> # big endian
|
||||
version: L
|
||||
offsetToData: H
|
||||
countSizePairs: H
|
||||
axisCount: H
|
||||
axisSize: H
|
||||
instanceCount: H
|
||||
instanceSize: H
|
||||
"""
|
||||
|
||||
FVAR_AXIS_FORMAT = """
|
||||
> # big endian
|
||||
axisTag: 4s
|
||||
minValue: 16.16F
|
||||
defaultValue: 16.16F
|
||||
maxValue: 16.16F
|
||||
flags: H
|
||||
axisNameID: H
|
||||
"""
|
||||
|
||||
FVAR_INSTANCE_FORMAT = """
|
||||
> # big endian
|
||||
subfamilyNameID: H
|
||||
flags: H
|
||||
"""
|
||||
|
||||
|
||||
class table__f_v_a_r(DefaultTable.DefaultTable):
|
||||
"""FonT Variations table
|
||||
|
||||
The ``fvar`` table contains records of the variation axes and of the
|
||||
named instances in a variable font.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6fvar.html
|
||||
"""
|
||||
|
||||
dependencies = ["name"]
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.axes = []
|
||||
self.instances = []
|
||||
|
||||
def compile(self, ttFont):
|
||||
instanceSize = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + (len(self.axes) * 4)
|
||||
includePostScriptNames = any(
|
||||
instance.postscriptNameID != 0xFFFF for instance in self.instances
|
||||
)
|
||||
if includePostScriptNames:
|
||||
instanceSize += 2
|
||||
header = {
|
||||
"version": 0x00010000,
|
||||
"offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT),
|
||||
"countSizePairs": 2,
|
||||
"axisCount": len(self.axes),
|
||||
"axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT),
|
||||
"instanceCount": len(self.instances),
|
||||
"instanceSize": instanceSize,
|
||||
}
|
||||
result = [sstruct.pack(FVAR_HEADER_FORMAT, header)]
|
||||
result.extend([axis.compile() for axis in self.axes])
|
||||
axisTags = [axis.axisTag for axis in self.axes]
|
||||
for instance in self.instances:
|
||||
result.append(instance.compile(axisTags, includePostScriptNames))
|
||||
return bytesjoin(result)
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
header = {}
|
||||
headerSize = sstruct.calcsize(FVAR_HEADER_FORMAT)
|
||||
header = sstruct.unpack(FVAR_HEADER_FORMAT, data[0:headerSize])
|
||||
if header["version"] != 0x00010000:
|
||||
raise TTLibError("unsupported 'fvar' version %04x" % header["version"])
|
||||
pos = header["offsetToData"]
|
||||
axisSize = header["axisSize"]
|
||||
for _ in range(header["axisCount"]):
|
||||
axis = Axis()
|
||||
axis.decompile(data[pos : pos + axisSize])
|
||||
self.axes.append(axis)
|
||||
pos += axisSize
|
||||
instanceSize = header["instanceSize"]
|
||||
axisTags = [axis.axisTag for axis in self.axes]
|
||||
for _ in range(header["instanceCount"]):
|
||||
instance = NamedInstance()
|
||||
instance.decompile(data[pos : pos + instanceSize], axisTags)
|
||||
self.instances.append(instance)
|
||||
pos += instanceSize
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
for axis in self.axes:
|
||||
axis.toXML(writer, ttFont)
|
||||
for instance in self.instances:
|
||||
instance.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "Axis":
|
||||
axis = Axis()
|
||||
axis.fromXML(name, attrs, content, ttFont)
|
||||
self.axes.append(axis)
|
||||
elif name == "NamedInstance":
|
||||
instance = NamedInstance()
|
||||
instance.fromXML(name, attrs, content, ttFont)
|
||||
self.instances.append(instance)
|
||||
|
||||
def getAxes(self):
|
||||
return {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in self.axes}
|
||||
|
||||
|
||||
class Axis(object):
|
||||
def __init__(self):
|
||||
self.axisTag = None
|
||||
self.axisNameID = 0
|
||||
self.flags = 0
|
||||
self.minValue = -1.0
|
||||
self.defaultValue = 0.0
|
||||
self.maxValue = 1.0
|
||||
|
||||
def compile(self):
|
||||
return sstruct.pack(FVAR_AXIS_FORMAT, self)
|
||||
|
||||
def decompile(self, data):
|
||||
sstruct.unpack2(FVAR_AXIS_FORMAT, data, self)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
name = (
|
||||
ttFont["name"].getDebugName(self.axisNameID) if "name" in ttFont else None
|
||||
)
|
||||
if name is not None:
|
||||
writer.newline()
|
||||
writer.comment(name)
|
||||
writer.newline()
|
||||
writer.begintag("Axis")
|
||||
writer.newline()
|
||||
for tag, value in [
|
||||
("AxisTag", self.axisTag),
|
||||
("Flags", "0x%X" % self.flags),
|
||||
("MinValue", fl2str(self.minValue, 16)),
|
||||
("DefaultValue", fl2str(self.defaultValue, 16)),
|
||||
("MaxValue", fl2str(self.maxValue, 16)),
|
||||
("AxisNameID", str(self.axisNameID)),
|
||||
]:
|
||||
writer.begintag(tag)
|
||||
writer.write(value)
|
||||
writer.endtag(tag)
|
||||
writer.newline()
|
||||
writer.endtag("Axis")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, _attrs, content, ttFont):
|
||||
assert name == "Axis"
|
||||
for tag, _, value in filter(lambda t: type(t) is tuple, content):
|
||||
value = "".join(value)
|
||||
if tag == "AxisTag":
|
||||
self.axisTag = Tag(value)
|
||||
elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue", "AxisNameID"}:
|
||||
setattr(
|
||||
self,
|
||||
tag[0].lower() + tag[1:],
|
||||
str2fl(value, 16) if tag.endswith("Value") else safeEval(value),
|
||||
)
|
||||
|
||||
|
||||
class NamedInstance(object):
|
||||
def __init__(self):
|
||||
self.subfamilyNameID = 0
|
||||
self.postscriptNameID = 0xFFFF
|
||||
self.flags = 0
|
||||
self.coordinates = {}
|
||||
|
||||
def compile(self, axisTags, includePostScriptName):
|
||||
result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)]
|
||||
for axis in axisTags:
|
||||
fixedCoord = fl2fi(self.coordinates[axis], 16)
|
||||
result.append(struct.pack(">l", fixedCoord))
|
||||
if includePostScriptName:
|
||||
result.append(struct.pack(">H", self.postscriptNameID))
|
||||
return bytesjoin(result)
|
||||
|
||||
def decompile(self, data, axisTags):
|
||||
sstruct.unpack2(FVAR_INSTANCE_FORMAT, data, self)
|
||||
pos = sstruct.calcsize(FVAR_INSTANCE_FORMAT)
|
||||
for axis in axisTags:
|
||||
value = struct.unpack(">l", data[pos : pos + 4])[0]
|
||||
self.coordinates[axis] = fi2fl(value, 16)
|
||||
pos += 4
|
||||
if pos + 2 <= len(data):
|
||||
self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0]
|
||||
else:
|
||||
self.postscriptNameID = 0xFFFF
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
name = (
|
||||
ttFont["name"].getDebugName(self.subfamilyNameID)
|
||||
if "name" in ttFont
|
||||
else None
|
||||
)
|
||||
if name is not None:
|
||||
writer.newline()
|
||||
writer.comment(name)
|
||||
writer.newline()
|
||||
psname = (
|
||||
ttFont["name"].getDebugName(self.postscriptNameID)
|
||||
if "name" in ttFont
|
||||
else None
|
||||
)
|
||||
if psname is not None:
|
||||
writer.comment("PostScript: " + psname)
|
||||
writer.newline()
|
||||
if self.postscriptNameID == 0xFFFF:
|
||||
writer.begintag(
|
||||
"NamedInstance",
|
||||
flags=("0x%X" % self.flags),
|
||||
subfamilyNameID=self.subfamilyNameID,
|
||||
)
|
||||
else:
|
||||
writer.begintag(
|
||||
"NamedInstance",
|
||||
flags=("0x%X" % self.flags),
|
||||
subfamilyNameID=self.subfamilyNameID,
|
||||
postscriptNameID=self.postscriptNameID,
|
||||
)
|
||||
writer.newline()
|
||||
for axis in ttFont["fvar"].axes:
|
||||
writer.simpletag(
|
||||
"coord",
|
||||
axis=axis.axisTag,
|
||||
value=fl2str(self.coordinates[axis.axisTag], 16),
|
||||
)
|
||||
writer.newline()
|
||||
writer.endtag("NamedInstance")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
assert name == "NamedInstance"
|
||||
self.subfamilyNameID = safeEval(attrs["subfamilyNameID"])
|
||||
self.flags = safeEval(attrs.get("flags", "0"))
|
||||
if "postscriptNameID" in attrs:
|
||||
self.postscriptNameID = safeEval(attrs["postscriptNameID"])
|
||||
else:
|
||||
self.postscriptNameID = 0xFFFF
|
||||
|
||||
for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content):
|
||||
if tag == "coord":
|
||||
value = str2fl(elementAttrs["value"], 16)
|
||||
self.coordinates[elementAttrs["axis"]] = value
|
||||
@@ -0,0 +1,63 @@
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
|
||||
GASP_SYMMETRIC_GRIDFIT = 0x0004
|
||||
GASP_SYMMETRIC_SMOOTHING = 0x0008
|
||||
GASP_DOGRAY = 0x0002
|
||||
GASP_GRIDFIT = 0x0001
|
||||
|
||||
|
||||
class table__g_a_s_p(DefaultTable.DefaultTable):
|
||||
"""Grid-fitting and Scan-conversion Procedure table
|
||||
|
||||
The ``gasp`` table defines the preferred rasterization settings for
|
||||
the font when rendered on monochrome and greyscale output devices.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/gasp
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.version, numRanges = struct.unpack(">HH", data[:4])
|
||||
assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version
|
||||
data = data[4:]
|
||||
self.gaspRange = {}
|
||||
for i in range(numRanges):
|
||||
rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
|
||||
self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
|
||||
data = data[4:]
|
||||
assert not data, "too much data"
|
||||
|
||||
def compile(self, ttFont):
|
||||
version = 0 # ignore self.version
|
||||
numRanges = len(self.gaspRange)
|
||||
data = b""
|
||||
items = sorted(self.gaspRange.items())
|
||||
for rangeMaxPPEM, rangeGaspBehavior in items:
|
||||
data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
|
||||
if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY):
|
||||
version = 1
|
||||
data = struct.pack(">HH", version, numRanges) + data
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
items = sorted(self.gaspRange.items())
|
||||
for rangeMaxPPEM, rangeGaspBehavior in items:
|
||||
writer.simpletag(
|
||||
"gaspRange",
|
||||
[
|
||||
("rangeMaxPPEM", rangeMaxPPEM),
|
||||
("rangeGaspBehavior", rangeGaspBehavior),
|
||||
],
|
||||
)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name != "gaspRange":
|
||||
return
|
||||
if not hasattr(self, "gaspRange"):
|
||||
self.gaspRange = {}
|
||||
self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(
|
||||
attrs["rangeGaspBehavior"]
|
||||
)
|
||||
@@ -0,0 +1,13 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gcid.html
|
||||
class table__g_c_i_d(BaseTTXConverter):
|
||||
"""Glyph ID to CID table
|
||||
|
||||
The AAT ``gcid`` table stores glyphID-to-CID mappings.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gcid.html
|
||||
"""
|
||||
|
||||
pass
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,340 @@
|
||||
from collections import deque
|
||||
from functools import partial
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.misc.lazyTools import LazyDict
|
||||
from fontTools.ttLib import OPTIMIZE_FONT_SPEED
|
||||
from fontTools.ttLib.tables.TupleVariation import TupleVariation
|
||||
from . import DefaultTable
|
||||
import array
|
||||
import itertools
|
||||
import logging
|
||||
import struct
|
||||
import sys
|
||||
import fontTools.ttLib.tables.TupleVariation as tv
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# https://www.microsoft.com/typography/otspec/gvar.htm
|
||||
# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
|
||||
#
|
||||
# Apple's documentation of 'gvar':
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html
|
||||
#
|
||||
# FreeType2 source code for parsing 'gvar':
|
||||
# http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/src/truetype/ttgxvar.c
|
||||
|
||||
GVAR_HEADER_FORMAT_HEAD = """
|
||||
> # big endian
|
||||
version: H
|
||||
reserved: H
|
||||
axisCount: H
|
||||
sharedTupleCount: H
|
||||
offsetToSharedTuples: I
|
||||
"""
|
||||
# In between the HEAD and TAIL lies the glyphCount, which is
|
||||
# of different size: 2 bytes for gvar, and 3 bytes for GVAR.
|
||||
GVAR_HEADER_FORMAT_TAIL = """
|
||||
> # big endian
|
||||
flags: H
|
||||
offsetToGlyphVariationData: I
|
||||
"""
|
||||
|
||||
GVAR_HEADER_SIZE_HEAD = sstruct.calcsize(GVAR_HEADER_FORMAT_HEAD)
|
||||
GVAR_HEADER_SIZE_TAIL = sstruct.calcsize(GVAR_HEADER_FORMAT_TAIL)
|
||||
|
||||
|
||||
class table__g_v_a_r(DefaultTable.DefaultTable):
|
||||
"""Glyph Variations table
|
||||
|
||||
The ``gvar`` table provides the per-glyph variation data that
|
||||
describe how glyph outlines in the ``glyf`` table change across
|
||||
the variation space that is defined for the font in the ``fvar``
|
||||
table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/gvar
|
||||
"""
|
||||
|
||||
dependencies = ["fvar", "glyf"]
|
||||
gid_size = 2
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.version, self.reserved = 1, 0
|
||||
self.variations = {}
|
||||
|
||||
def compile(self, ttFont):
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
sharedTuples = tv.compileSharedTuples(
|
||||
axisTags, itertools.chain(*self.variations.values())
|
||||
)
|
||||
sharedTupleIndices = {coord: i for i, coord in enumerate(sharedTuples)}
|
||||
sharedTupleSize = sum([len(c) for c in sharedTuples])
|
||||
compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedTupleIndices)
|
||||
offset = 0
|
||||
offsets = []
|
||||
for glyph in compiledGlyphs:
|
||||
offsets.append(offset)
|
||||
offset += len(glyph)
|
||||
offsets.append(offset)
|
||||
compiledOffsets, tableFormat = self.compileOffsets_(offsets)
|
||||
|
||||
GVAR_HEADER_SIZE = GVAR_HEADER_SIZE_HEAD + self.gid_size + GVAR_HEADER_SIZE_TAIL
|
||||
header = {}
|
||||
header["version"] = self.version
|
||||
header["reserved"] = self.reserved
|
||||
header["axisCount"] = len(axisTags)
|
||||
header["sharedTupleCount"] = len(sharedTuples)
|
||||
header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets)
|
||||
header["flags"] = tableFormat
|
||||
header["offsetToGlyphVariationData"] = (
|
||||
header["offsetToSharedTuples"] + sharedTupleSize
|
||||
)
|
||||
|
||||
result = [
|
||||
sstruct.pack(GVAR_HEADER_FORMAT_HEAD, header),
|
||||
len(compiledGlyphs).to_bytes(self.gid_size, "big"),
|
||||
sstruct.pack(GVAR_HEADER_FORMAT_TAIL, header),
|
||||
]
|
||||
|
||||
result.append(compiledOffsets)
|
||||
result.extend(sharedTuples)
|
||||
result.extend(compiledGlyphs)
|
||||
return b"".join(result)
|
||||
|
||||
def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices):
|
||||
optimizeSpeed = ttFont.cfg[OPTIMIZE_FONT_SPEED]
|
||||
result = []
|
||||
glyf = ttFont["glyf"]
|
||||
for glyphName in ttFont.getGlyphOrder():
|
||||
variations = self.variations.get(glyphName, [])
|
||||
if not variations:
|
||||
result.append(b"")
|
||||
continue
|
||||
pointCountUnused = 0 # pointCount is actually unused by compileGlyph
|
||||
result.append(
|
||||
compileGlyph_(
|
||||
self.gid_size,
|
||||
variations,
|
||||
pointCountUnused,
|
||||
axisTags,
|
||||
sharedCoordIndices,
|
||||
optimizeSize=not optimizeSpeed,
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
glyphs = ttFont.getGlyphOrder()
|
||||
|
||||
# Parse the header
|
||||
GVAR_HEADER_SIZE = GVAR_HEADER_SIZE_HEAD + self.gid_size + GVAR_HEADER_SIZE_TAIL
|
||||
sstruct.unpack(GVAR_HEADER_FORMAT_HEAD, data[:GVAR_HEADER_SIZE_HEAD], self)
|
||||
self.glyphCount = int.from_bytes(
|
||||
data[GVAR_HEADER_SIZE_HEAD : GVAR_HEADER_SIZE_HEAD + self.gid_size], "big"
|
||||
)
|
||||
sstruct.unpack(
|
||||
GVAR_HEADER_FORMAT_TAIL,
|
||||
data[GVAR_HEADER_SIZE_HEAD + self.gid_size : GVAR_HEADER_SIZE],
|
||||
self,
|
||||
)
|
||||
|
||||
assert len(glyphs) == self.glyphCount, (len(glyphs), self.glyphCount)
|
||||
assert len(axisTags) == self.axisCount, (
|
||||
len(axisTags),
|
||||
self.axisCount,
|
||||
axisTags,
|
||||
)
|
||||
sharedCoords = tv.decompileSharedTuples(
|
||||
axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples
|
||||
)
|
||||
variations = {}
|
||||
offsetToData = self.offsetToGlyphVariationData
|
||||
glyf = ttFont["glyf"]
|
||||
|
||||
def get_read_item():
|
||||
reverseGlyphMap = ttFont.getReverseGlyphMap()
|
||||
tableFormat = self.flags & 1
|
||||
|
||||
def read_item(glyphName):
|
||||
gid = reverseGlyphMap[glyphName]
|
||||
offsetSize = 2 if tableFormat == 0 else 4
|
||||
startOffset = GVAR_HEADER_SIZE + offsetSize * gid
|
||||
endOffset = startOffset + offsetSize * 2
|
||||
offsets = table__g_v_a_r.decompileOffsets_(
|
||||
data[startOffset:endOffset],
|
||||
tableFormat=tableFormat,
|
||||
glyphCount=1,
|
||||
)
|
||||
gvarData = data[offsetToData + offsets[0] : offsetToData + offsets[1]]
|
||||
if not gvarData:
|
||||
return []
|
||||
glyph = glyf[glyphName]
|
||||
numPointsInGlyph = self.getNumPoints_(glyph)
|
||||
return decompileGlyph_(
|
||||
self.gid_size, numPointsInGlyph, sharedCoords, axisTags, gvarData
|
||||
)
|
||||
|
||||
return read_item
|
||||
|
||||
read_item = get_read_item()
|
||||
l = LazyDict({glyphs[gid]: read_item for gid in range(self.glyphCount)})
|
||||
|
||||
self.variations = l
|
||||
|
||||
if ttFont.lazy is False: # Be lazy for None and True
|
||||
self.ensureDecompiled()
|
||||
|
||||
def ensureDecompiled(self, recurse=False):
|
||||
# The recurse argument is unused, but part of the signature of
|
||||
# ensureDecompiled across the library.
|
||||
# Use a zero-length deque to consume the lazy dict
|
||||
deque(self.variations.values(), maxlen=0)
|
||||
|
||||
@staticmethod
|
||||
def decompileOffsets_(data, tableFormat, glyphCount):
|
||||
if tableFormat == 0:
|
||||
# Short format: array of UInt16
|
||||
offsets = array.array("H")
|
||||
offsetsSize = (glyphCount + 1) * 2
|
||||
else:
|
||||
# Long format: array of UInt32
|
||||
offsets = array.array("I")
|
||||
offsetsSize = (glyphCount + 1) * 4
|
||||
offsets.frombytes(data[0:offsetsSize])
|
||||
if sys.byteorder != "big":
|
||||
offsets.byteswap()
|
||||
|
||||
# In the short format, offsets need to be multiplied by 2.
|
||||
# This is not documented in Apple's TrueType specification,
|
||||
# but can be inferred from the FreeType implementation, and
|
||||
# we could verify it with two sample GX fonts.
|
||||
if tableFormat == 0:
|
||||
offsets = [off * 2 for off in offsets]
|
||||
|
||||
return offsets
|
||||
|
||||
@staticmethod
|
||||
def compileOffsets_(offsets):
|
||||
"""Packs a list of offsets into a 'gvar' offset table.
|
||||
|
||||
Returns a pair (bytestring, tableFormat). Bytestring is the
|
||||
packed offset table. Format indicates whether the table
|
||||
uses short (tableFormat=0) or long (tableFormat=1) integers.
|
||||
The returned tableFormat should get packed into the flags field
|
||||
of the 'gvar' header.
|
||||
"""
|
||||
assert len(offsets) >= 2
|
||||
for i in range(1, len(offsets)):
|
||||
assert offsets[i - 1] <= offsets[i]
|
||||
if max(offsets) <= 0xFFFF * 2:
|
||||
packed = array.array("H", [n >> 1 for n in offsets])
|
||||
tableFormat = 0
|
||||
else:
|
||||
packed = array.array("I", offsets)
|
||||
tableFormat = 1
|
||||
if sys.byteorder != "big":
|
||||
packed.byteswap()
|
||||
return (packed.tobytes(), tableFormat)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
writer.simpletag("reserved", value=self.reserved)
|
||||
writer.newline()
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
for glyphName in ttFont.getGlyphNames():
|
||||
variations = self.variations.get(glyphName)
|
||||
if not variations:
|
||||
continue
|
||||
writer.begintag("glyphVariations", glyph=glyphName)
|
||||
writer.newline()
|
||||
for gvar in variations:
|
||||
gvar.toXML(writer, axisTags)
|
||||
writer.endtag("glyphVariations")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = safeEval(attrs["value"])
|
||||
elif name == "reserved":
|
||||
self.reserved = safeEval(attrs["value"])
|
||||
elif name == "glyphVariations":
|
||||
if not hasattr(self, "variations"):
|
||||
self.variations = {}
|
||||
glyphName = attrs["glyph"]
|
||||
glyph = ttFont["glyf"][glyphName]
|
||||
numPointsInGlyph = self.getNumPoints_(glyph)
|
||||
glyphVariations = []
|
||||
for element in content:
|
||||
if isinstance(element, tuple):
|
||||
name, attrs, content = element
|
||||
if name == "tuple":
|
||||
gvar = TupleVariation({}, [None] * numPointsInGlyph)
|
||||
glyphVariations.append(gvar)
|
||||
for tupleElement in content:
|
||||
if isinstance(tupleElement, tuple):
|
||||
tupleName, tupleAttrs, tupleContent = tupleElement
|
||||
gvar.fromXML(tupleName, tupleAttrs, tupleContent)
|
||||
self.variations[glyphName] = glyphVariations
|
||||
|
||||
@staticmethod
|
||||
def getNumPoints_(glyph):
|
||||
NUM_PHANTOM_POINTS = 4
|
||||
|
||||
if glyph.isComposite():
|
||||
return len(glyph.components) + NUM_PHANTOM_POINTS
|
||||
else:
|
||||
# Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute.
|
||||
return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS
|
||||
|
||||
|
||||
def compileGlyph_(
|
||||
dataOffsetSize,
|
||||
variations,
|
||||
pointCount,
|
||||
axisTags,
|
||||
sharedCoordIndices,
|
||||
*,
|
||||
optimizeSize=True,
|
||||
):
|
||||
assert dataOffsetSize in (2, 3)
|
||||
tupleVariationCount, tuples, data = tv.compileTupleVariationStore(
|
||||
variations, pointCount, axisTags, sharedCoordIndices, optimizeSize=optimizeSize
|
||||
)
|
||||
if tupleVariationCount == 0:
|
||||
return b""
|
||||
|
||||
offsetToData = 2 + dataOffsetSize + len(tuples)
|
||||
|
||||
result = [
|
||||
tupleVariationCount.to_bytes(2, "big"),
|
||||
offsetToData.to_bytes(dataOffsetSize, "big"),
|
||||
tuples,
|
||||
data,
|
||||
]
|
||||
if (offsetToData + len(data)) % 2 != 0:
|
||||
result.append(b"\0") # padding
|
||||
return b"".join(result)
|
||||
|
||||
|
||||
def decompileGlyph_(dataOffsetSize, pointCount, sharedTuples, axisTags, data):
|
||||
assert dataOffsetSize in (2, 3)
|
||||
if len(data) < 2 + dataOffsetSize:
|
||||
return []
|
||||
|
||||
tupleVariationCount = int.from_bytes(data[:2], "big")
|
||||
offsetToData = int.from_bytes(data[2 : 2 + dataOffsetSize], "big")
|
||||
|
||||
dataPos = offsetToData
|
||||
return tv.decompileTupleVariationStore(
|
||||
"gvar",
|
||||
axisTags,
|
||||
tupleVariationCount,
|
||||
pointCount,
|
||||
sharedTuples,
|
||||
data,
|
||||
2 + dataOffsetSize,
|
||||
offsetToData,
|
||||
)
|
||||
@@ -0,0 +1,127 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytechr, byteord, strjoin
|
||||
from . import DefaultTable
|
||||
import array
|
||||
from collections.abc import Mapping
|
||||
|
||||
hdmxHeaderFormat = """
|
||||
> # big endian!
|
||||
version: H
|
||||
numRecords: H
|
||||
recordSize: l
|
||||
"""
|
||||
|
||||
|
||||
class _GlyphnamedList(Mapping):
|
||||
def __init__(self, reverseGlyphOrder, data):
|
||||
self._array = data
|
||||
self._map = dict(reverseGlyphOrder)
|
||||
|
||||
def __getitem__(self, k):
|
||||
return self._array[self._map[k]]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._map)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._map)
|
||||
|
||||
def keys(self):
|
||||
return self._map.keys()
|
||||
|
||||
|
||||
class table__h_d_m_x(DefaultTable.DefaultTable):
|
||||
"""Horizontal Device Metrics table
|
||||
|
||||
The ``hdmx`` table is an optional table that stores advance widths for
|
||||
glyph outlines at specified pixel sizes.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/hdmx
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
|
||||
self.hdmx = {}
|
||||
for i in range(self.numRecords):
|
||||
ppem = byteord(data[0])
|
||||
maxSize = byteord(data[1])
|
||||
widths = _GlyphnamedList(
|
||||
ttFont.getReverseGlyphMap(), array.array("B", data[2 : 2 + numGlyphs])
|
||||
)
|
||||
self.hdmx[ppem] = widths
|
||||
data = data[self.recordSize :]
|
||||
assert len(data) == 0, "too much hdmx data"
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.version = 0
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
|
||||
pad = (self.recordSize - 2 - numGlyphs) * b"\0"
|
||||
self.numRecords = len(self.hdmx)
|
||||
data = sstruct.pack(hdmxHeaderFormat, self)
|
||||
items = sorted(self.hdmx.items())
|
||||
for ppem, widths in items:
|
||||
data = data + bytechr(ppem) + bytechr(max(widths.values()))
|
||||
for glyphName in glyphOrder:
|
||||
width = widths[glyphName]
|
||||
data = data + bytechr(width)
|
||||
data = data + pad
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("hdmxData")
|
||||
writer.newline()
|
||||
ppems = sorted(self.hdmx.keys())
|
||||
records = []
|
||||
format = ""
|
||||
for ppem in ppems:
|
||||
widths = self.hdmx[ppem]
|
||||
records.append(widths)
|
||||
format = format + "%4d"
|
||||
glyphNames = ttFont.getGlyphOrder()[:]
|
||||
glyphNames.sort()
|
||||
maxNameLen = max(map(len, glyphNames))
|
||||
format = "%" + repr(maxNameLen) + "s:" + format + " ;"
|
||||
writer.write(format % (("ppem",) + tuple(ppems)))
|
||||
writer.newline()
|
||||
writer.newline()
|
||||
for glyphName in glyphNames:
|
||||
row = []
|
||||
for ppem in ppems:
|
||||
widths = self.hdmx[ppem]
|
||||
row.append(widths[glyphName])
|
||||
if ";" in glyphName:
|
||||
glyphName = "\\x3b".join(glyphName.split(";"))
|
||||
writer.write(format % ((glyphName,) + tuple(row)))
|
||||
writer.newline()
|
||||
writer.endtag("hdmxData")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name != "hdmxData":
|
||||
return
|
||||
content = strjoin(content)
|
||||
lines = content.split(";")
|
||||
topRow = lines[0].split()
|
||||
assert topRow[0] == "ppem:", "illegal hdmx format"
|
||||
ppems = list(map(int, topRow[1:]))
|
||||
self.hdmx = hdmx = {}
|
||||
for ppem in ppems:
|
||||
hdmx[ppem] = {}
|
||||
lines = (line.split() for line in lines[1:])
|
||||
for line in lines:
|
||||
if not line:
|
||||
continue
|
||||
assert line[0][-1] == ":", "illegal hdmx format"
|
||||
glyphName = line[0][:-1]
|
||||
if "\\" in glyphName:
|
||||
from fontTools.misc.textTools import safeEval
|
||||
|
||||
glyphName = safeEval('"""' + glyphName + '"""')
|
||||
line = list(map(int, line[1:]))
|
||||
assert len(line) == len(ppems), "illegal hdmx format"
|
||||
for i, ppem in enumerate(ppems):
|
||||
hdmx[ppem][glyphName] = line[i]
|
||||
@@ -0,0 +1,130 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat
|
||||
from fontTools.misc.textTools import safeEval, num2binary, binary2num
|
||||
from fontTools.misc.timeTools import (
|
||||
timestampFromString,
|
||||
timestampToString,
|
||||
timestampNow,
|
||||
)
|
||||
from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
|
||||
from fontTools.misc.arrayTools import intRect, unionRect
|
||||
from . import DefaultTable
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
headFormat = """
|
||||
> # big endian
|
||||
tableVersion: 16.16F
|
||||
fontRevision: 16.16F
|
||||
checkSumAdjustment: I
|
||||
magicNumber: I
|
||||
flags: H
|
||||
unitsPerEm: H
|
||||
created: Q
|
||||
modified: Q
|
||||
xMin: h
|
||||
yMin: h
|
||||
xMax: h
|
||||
yMax: h
|
||||
macStyle: H
|
||||
lowestRecPPEM: H
|
||||
fontDirectionHint: h
|
||||
indexToLocFormat: h
|
||||
glyphDataFormat: h
|
||||
"""
|
||||
|
||||
|
||||
class table__h_e_a_d(DefaultTable.DefaultTable):
|
||||
"""Font Header table
|
||||
|
||||
The ``head`` table contains a variety of font-wide information.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/head
|
||||
"""
|
||||
|
||||
dependencies = ["maxp", "loca", "CFF ", "CFF2"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, rest = sstruct.unpack2(headFormat, data, self)
|
||||
if rest:
|
||||
# this is quite illegal, but there seem to be fonts out there that do this
|
||||
log.warning("extra bytes at the end of 'head' table")
|
||||
assert rest == b"\0\0"
|
||||
|
||||
# For timestamp fields, ignore the top four bytes. Some fonts have
|
||||
# bogus values there. Since till 2038 those bytes only can be zero,
|
||||
# ignore them.
|
||||
#
|
||||
# https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
|
||||
for stamp in "created", "modified":
|
||||
value = getattr(self, stamp)
|
||||
if value > 0xFFFFFFFF:
|
||||
log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
|
||||
value &= 0xFFFFFFFF
|
||||
setattr(self, stamp, value)
|
||||
if value < 0x7C259DC0: # January 1, 1970 00:00:00
|
||||
log.warning(
|
||||
"'%s' timestamp seems very low; regarding as unix timestamp", stamp
|
||||
)
|
||||
value += 0x7C259DC0
|
||||
setattr(self, stamp, value)
|
||||
|
||||
def compile(self, ttFont):
|
||||
if ttFont.recalcBBoxes:
|
||||
# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
|
||||
if "CFF " in ttFont:
|
||||
topDict = ttFont["CFF "].cff.topDictIndex[0]
|
||||
self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
|
||||
elif "CFF2" in ttFont:
|
||||
topDict = ttFont["CFF2"].cff.topDictIndex[0]
|
||||
charStrings = topDict.CharStrings
|
||||
fontBBox = None
|
||||
for charString in charStrings.values():
|
||||
bounds = charString.calcBounds(charStrings)
|
||||
if bounds is not None:
|
||||
if fontBBox is not None:
|
||||
fontBBox = unionRect(fontBBox, bounds)
|
||||
else:
|
||||
fontBBox = bounds
|
||||
if fontBBox is not None:
|
||||
self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
|
||||
if ttFont.recalcTimestamp:
|
||||
self.modified = timestampNow()
|
||||
data = sstruct.pack(headFormat, self)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
_, names, fixes = sstruct.getformat(headFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name in fixes:
|
||||
value = floatToFixedToStr(value, precisionBits=fixes[name])
|
||||
elif name in ("created", "modified"):
|
||||
value = timestampToString(value)
|
||||
elif name in ("magicNumber", "checkSumAdjustment"):
|
||||
if value < 0:
|
||||
value = value + 0x100000000
|
||||
value = hex(value)
|
||||
if value[-1:] == "L":
|
||||
value = value[:-1]
|
||||
elif name in ("macStyle", "flags"):
|
||||
value = num2binary(value, 16)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
fixes = sstruct.getformat(headFormat)[2]
|
||||
if name in fixes:
|
||||
value = strToFixedToFloat(value, precisionBits=fixes[name])
|
||||
elif name in ("created", "modified"):
|
||||
value = timestampFromString(value)
|
||||
elif name in ("macStyle", "flags"):
|
||||
value = binary2num(value)
|
||||
else:
|
||||
value = safeEval(value)
|
||||
setattr(self, name, value)
|
||||
@@ -0,0 +1,147 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.misc.fixedTools import (
|
||||
ensureVersionIsLong as fi2ve,
|
||||
versionToFixed as ve2fi,
|
||||
)
|
||||
from . import DefaultTable
|
||||
import math
|
||||
|
||||
|
||||
hheaFormat = """
|
||||
> # big endian
|
||||
tableVersion: L
|
||||
ascent: h
|
||||
descent: h
|
||||
lineGap: h
|
||||
advanceWidthMax: H
|
||||
minLeftSideBearing: h
|
||||
minRightSideBearing: h
|
||||
xMaxExtent: h
|
||||
caretSlopeRise: h
|
||||
caretSlopeRun: h
|
||||
caretOffset: h
|
||||
reserved0: h
|
||||
reserved1: h
|
||||
reserved2: h
|
||||
reserved3: h
|
||||
metricDataFormat: h
|
||||
numberOfHMetrics: H
|
||||
"""
|
||||
|
||||
|
||||
class table__h_h_e_a(DefaultTable.DefaultTable):
|
||||
"""Horizontal Header table
|
||||
|
||||
The ``hhea`` table contains information needed during horizontal
|
||||
text layout.
|
||||
|
||||
.. note::
|
||||
This converter class is kept in sync with the :class:`._v_h_e_a.table__v_h_e_a`
|
||||
table constructor.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/hhea
|
||||
"""
|
||||
|
||||
# Note: Keep in sync with table__v_h_e_a
|
||||
|
||||
dependencies = ["hmtx", "glyf", "CFF ", "CFF2"]
|
||||
|
||||
# OpenType spec renamed these, add aliases for compatibility
|
||||
@property
|
||||
def ascender(self):
|
||||
return self.ascent
|
||||
|
||||
@ascender.setter
|
||||
def ascender(self, value):
|
||||
self.ascent = value
|
||||
|
||||
@property
|
||||
def descender(self):
|
||||
return self.descent
|
||||
|
||||
@descender.setter
|
||||
def descender(self, value):
|
||||
self.descent = value
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(hheaFormat, data, self)
|
||||
|
||||
def compile(self, ttFont):
|
||||
if ttFont.recalcBBoxes and (
|
||||
ttFont.isLoaded("glyf")
|
||||
or ttFont.isLoaded("CFF ")
|
||||
or ttFont.isLoaded("CFF2")
|
||||
):
|
||||
self.recalc(ttFont)
|
||||
self.tableVersion = fi2ve(self.tableVersion)
|
||||
return sstruct.pack(hheaFormat, self)
|
||||
|
||||
def recalc(self, ttFont):
|
||||
if "hmtx" not in ttFont:
|
||||
return
|
||||
|
||||
hmtxTable = ttFont["hmtx"]
|
||||
self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())
|
||||
|
||||
boundsWidthDict = {}
|
||||
if "glyf" in ttFont:
|
||||
glyfTable = ttFont["glyf"]
|
||||
for name in ttFont.getGlyphOrder():
|
||||
g = glyfTable[name]
|
||||
if g.numberOfContours == 0:
|
||||
continue
|
||||
if g.numberOfContours < 0 and not hasattr(g, "xMax"):
|
||||
# Composite glyph without extents set.
|
||||
# Calculate those.
|
||||
g.recalcBounds(glyfTable)
|
||||
boundsWidthDict[name] = g.xMax - g.xMin
|
||||
elif "CFF " in ttFont or "CFF2" in ttFont:
|
||||
if "CFF " in ttFont:
|
||||
topDict = ttFont["CFF "].cff.topDictIndex[0]
|
||||
else:
|
||||
topDict = ttFont["CFF2"].cff.topDictIndex[0]
|
||||
charStrings = topDict.CharStrings
|
||||
for name in ttFont.getGlyphOrder():
|
||||
cs = charStrings[name]
|
||||
bounds = cs.calcBounds(charStrings)
|
||||
if bounds is not None:
|
||||
boundsWidthDict[name] = int(
|
||||
math.ceil(bounds[2]) - math.floor(bounds[0])
|
||||
)
|
||||
|
||||
if boundsWidthDict:
|
||||
minLeftSideBearing = float("inf")
|
||||
minRightSideBearing = float("inf")
|
||||
xMaxExtent = -float("inf")
|
||||
for name, boundsWidth in boundsWidthDict.items():
|
||||
advanceWidth, lsb = hmtxTable[name]
|
||||
rsb = advanceWidth - lsb - boundsWidth
|
||||
extent = lsb + boundsWidth
|
||||
minLeftSideBearing = min(minLeftSideBearing, lsb)
|
||||
minRightSideBearing = min(minRightSideBearing, rsb)
|
||||
xMaxExtent = max(xMaxExtent, extent)
|
||||
self.minLeftSideBearing = minLeftSideBearing
|
||||
self.minRightSideBearing = minRightSideBearing
|
||||
self.xMaxExtent = xMaxExtent
|
||||
|
||||
else: # No glyph has outlines.
|
||||
self.minLeftSideBearing = 0
|
||||
self.minRightSideBearing = 0
|
||||
self.xMaxExtent = 0
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(hheaFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name == "tableVersion":
|
||||
value = fi2ve(value)
|
||||
value = "0x%08x" % value
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "tableVersion":
|
||||
setattr(self, name, ve2fi(attrs["value"]))
|
||||
return
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
@@ -0,0 +1,164 @@
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools import ttLib
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import struct
|
||||
import array
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class table__h_m_t_x(DefaultTable.DefaultTable):
|
||||
"""Horizontal Metrics table
|
||||
|
||||
The ``hmtx`` table contains per-glyph metrics for the glyphs in a
|
||||
``glyf``, ``CFF ``, or ``CFF2`` table, as needed for horizontal text
|
||||
layout.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/hmtx
|
||||
"""
|
||||
|
||||
headerTag = "hhea"
|
||||
advanceName = "width"
|
||||
sideBearingName = "lsb"
|
||||
numberOfMetricsName = "numberOfHMetrics"
|
||||
longMetricFormat = "Hh"
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
headerTable = ttFont.get(self.headerTag)
|
||||
if headerTable is not None:
|
||||
numberOfMetrics = int(getattr(headerTable, self.numberOfMetricsName))
|
||||
else:
|
||||
numberOfMetrics = numGlyphs
|
||||
if numberOfMetrics > numGlyphs:
|
||||
log.warning(
|
||||
"The %s.%s exceeds the maxp.numGlyphs"
|
||||
% (self.headerTag, self.numberOfMetricsName)
|
||||
)
|
||||
numberOfMetrics = numGlyphs
|
||||
numberOfSideBearings = numGlyphs - numberOfMetrics
|
||||
tableSize = 4 * numberOfMetrics + 2 * numberOfSideBearings
|
||||
if len(data) < tableSize:
|
||||
raise ttLib.TTLibError(
|
||||
f"not enough '{self.tableTag}' table data: "
|
||||
f"expected {tableSize} bytes, got {len(data)}"
|
||||
)
|
||||
# Note: advanceWidth is unsigned, but some font editors might
|
||||
# read/write as signed. We can't be sure whether it was a mistake
|
||||
# or not, so we read as unsigned but also issue a warning...
|
||||
metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
|
||||
metrics = struct.unpack(metricsFmt, data[: 4 * numberOfMetrics])
|
||||
data = data[4 * numberOfMetrics :]
|
||||
sideBearings = array.array("h", data[: 2 * numberOfSideBearings])
|
||||
data = data[2 * numberOfSideBearings :]
|
||||
|
||||
if sys.byteorder != "big":
|
||||
sideBearings.byteswap()
|
||||
if data:
|
||||
log.warning("too much '%s' table data" % self.tableTag)
|
||||
self.metrics = {}
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
for i in range(numberOfMetrics):
|
||||
glyphName = glyphOrder[i]
|
||||
advanceWidth, lsb = metrics[i * 2 : i * 2 + 2]
|
||||
if advanceWidth > 32767:
|
||||
log.warning(
|
||||
"Glyph %r has a huge advance %s (%d); is it intentional or "
|
||||
"an (invalid) negative value?",
|
||||
glyphName,
|
||||
self.advanceName,
|
||||
advanceWidth,
|
||||
)
|
||||
self.metrics[glyphName] = (advanceWidth, lsb)
|
||||
lastAdvance = metrics[-2]
|
||||
for i in range(numberOfSideBearings):
|
||||
glyphName = glyphOrder[i + numberOfMetrics]
|
||||
self.metrics[glyphName] = (lastAdvance, sideBearings[i])
|
||||
|
||||
def compile(self, ttFont):
|
||||
metrics = []
|
||||
hasNegativeAdvances = False
|
||||
for glyphName in ttFont.getGlyphOrder():
|
||||
advanceWidth, sideBearing = self.metrics[glyphName]
|
||||
if advanceWidth < 0:
|
||||
log.error(
|
||||
"Glyph %r has negative advance %s" % (glyphName, self.advanceName)
|
||||
)
|
||||
hasNegativeAdvances = True
|
||||
metrics.append([advanceWidth, sideBearing])
|
||||
|
||||
headerTable = ttFont.get(self.headerTag)
|
||||
if headerTable is not None:
|
||||
lastAdvance = metrics[-1][0]
|
||||
lastIndex = len(metrics)
|
||||
while metrics[lastIndex - 2][0] == lastAdvance:
|
||||
lastIndex -= 1
|
||||
if lastIndex <= 1:
|
||||
# all advances are equal
|
||||
lastIndex = 1
|
||||
break
|
||||
additionalMetrics = metrics[lastIndex:]
|
||||
additionalMetrics = [otRound(sb) for _, sb in additionalMetrics]
|
||||
metrics = metrics[:lastIndex]
|
||||
numberOfMetrics = len(metrics)
|
||||
setattr(headerTable, self.numberOfMetricsName, numberOfMetrics)
|
||||
else:
|
||||
# no hhea/vhea, can't store numberOfMetrics; assume == numGlyphs
|
||||
numberOfMetrics = ttFont["maxp"].numGlyphs
|
||||
additionalMetrics = []
|
||||
|
||||
allMetrics = []
|
||||
for advance, sb in metrics:
|
||||
allMetrics.extend([otRound(advance), otRound(sb)])
|
||||
metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
|
||||
try:
|
||||
data = struct.pack(metricsFmt, *allMetrics)
|
||||
except struct.error as e:
|
||||
if "out of range" in str(e) and hasNegativeAdvances:
|
||||
raise ttLib.TTLibError(
|
||||
"'%s' table can't contain negative advance %ss"
|
||||
% (self.tableTag, self.advanceName)
|
||||
)
|
||||
else:
|
||||
raise
|
||||
additionalMetrics = array.array("h", additionalMetrics)
|
||||
if sys.byteorder != "big":
|
||||
additionalMetrics.byteswap()
|
||||
data = data + additionalMetrics.tobytes()
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
names = sorted(self.metrics.keys())
|
||||
for glyphName in names:
|
||||
advance, sb = self.metrics[glyphName]
|
||||
writer.simpletag(
|
||||
"mtx",
|
||||
[
|
||||
("name", glyphName),
|
||||
(self.advanceName, advance),
|
||||
(self.sideBearingName, sb),
|
||||
],
|
||||
)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "metrics"):
|
||||
self.metrics = {}
|
||||
if name == "mtx":
|
||||
self.metrics[attrs["name"]] = (
|
||||
safeEval(attrs[self.advanceName]),
|
||||
safeEval(attrs[self.sideBearingName]),
|
||||
)
|
||||
|
||||
def __delitem__(self, glyphName):
|
||||
del self.metrics[glyphName]
|
||||
|
||||
def __getitem__(self, glyphName):
|
||||
return self.metrics[glyphName]
|
||||
|
||||
def __setitem__(self, glyphName, advance_sb_pair):
|
||||
self.metrics[glyphName] = tuple(advance_sb_pair)
|
||||
@@ -0,0 +1,289 @@
|
||||
from fontTools.ttLib import getSearchRange
|
||||
from fontTools.misc.textTools import safeEval, readHex
|
||||
from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
import sys
|
||||
import array
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class table__k_e_r_n(DefaultTable.DefaultTable):
|
||||
"""Kerning table
|
||||
|
||||
The ``kern`` table contains values that contextually adjust the inter-glyph
|
||||
spacing for the glyphs in a ``glyf`` table.
|
||||
|
||||
Note that similar contextual spacing adjustments can also be stored
|
||||
in the "kern" feature of a ``GPOS`` table.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/kern
|
||||
"""
|
||||
|
||||
def getkern(self, format):
|
||||
for subtable in self.kernTables:
|
||||
if subtable.format == format:
|
||||
return subtable
|
||||
return None # not found
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
version, nTables = struct.unpack(">HH", data[:4])
|
||||
apple = False
|
||||
if (len(data) >= 8) and (version == 1):
|
||||
# AAT Apple's "new" format. Hm.
|
||||
version, nTables = struct.unpack(">LL", data[:8])
|
||||
self.version = fi2fl(version, 16)
|
||||
data = data[8:]
|
||||
apple = True
|
||||
else:
|
||||
self.version = version
|
||||
data = data[4:]
|
||||
self.kernTables = []
|
||||
for i in range(nTables):
|
||||
if self.version == 1.0:
|
||||
# Apple
|
||||
length, coverage, subtableFormat = struct.unpack(">LBB", data[:6])
|
||||
else:
|
||||
# in OpenType spec the "version" field refers to the common
|
||||
# subtable header; the actual subtable format is stored in
|
||||
# the 8-15 mask bits of "coverage" field.
|
||||
# This "version" is always 0 so we ignore it here
|
||||
_, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6])
|
||||
if nTables == 1 and subtableFormat == 0:
|
||||
# The "length" value is ignored since some fonts
|
||||
# (like OpenSans and Calibri) have a subtable larger than
|
||||
# its value.
|
||||
(nPairs,) = struct.unpack(">H", data[6:8])
|
||||
calculated_length = (nPairs * 6) + 14
|
||||
if length != calculated_length:
|
||||
log.warning(
|
||||
"'kern' subtable longer than defined: "
|
||||
"%d bytes instead of %d bytes" % (calculated_length, length)
|
||||
)
|
||||
length = calculated_length
|
||||
if subtableFormat not in kern_classes:
|
||||
subtable = KernTable_format_unkown(subtableFormat)
|
||||
else:
|
||||
subtable = kern_classes[subtableFormat](apple)
|
||||
subtable.decompile(data[:length], ttFont)
|
||||
self.kernTables.append(subtable)
|
||||
data = data[length:]
|
||||
|
||||
def compile(self, ttFont):
|
||||
if hasattr(self, "kernTables"):
|
||||
nTables = len(self.kernTables)
|
||||
else:
|
||||
nTables = 0
|
||||
if self.version == 1.0:
|
||||
# AAT Apple's "new" format.
|
||||
data = struct.pack(">LL", fl2fi(self.version, 16), nTables)
|
||||
else:
|
||||
data = struct.pack(">HH", self.version, nTables)
|
||||
if hasattr(self, "kernTables"):
|
||||
for subtable in self.kernTables:
|
||||
data = data + subtable.compile(ttFont)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
for subtable in self.kernTables:
|
||||
subtable.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = safeEval(attrs["value"])
|
||||
return
|
||||
if name != "kernsubtable":
|
||||
return
|
||||
if not hasattr(self, "kernTables"):
|
||||
self.kernTables = []
|
||||
format = safeEval(attrs["format"])
|
||||
if format not in kern_classes:
|
||||
subtable = KernTable_format_unkown(format)
|
||||
else:
|
||||
apple = self.version == 1.0
|
||||
subtable = kern_classes[format](apple)
|
||||
self.kernTables.append(subtable)
|
||||
subtable.fromXML(name, attrs, content, ttFont)
|
||||
|
||||
|
||||
class KernTable_format_0(object):
|
||||
# 'version' is kept for backward compatibility
|
||||
version = format = 0
|
||||
|
||||
def __init__(self, apple=False):
|
||||
self.apple = apple
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
if not self.apple:
|
||||
version, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6])
|
||||
if version != 0:
|
||||
from fontTools.ttLib import TTLibError
|
||||
|
||||
raise TTLibError("unsupported kern subtable version: %d" % version)
|
||||
tupleIndex = None
|
||||
# Should we also assert length == len(data)?
|
||||
data = data[6:]
|
||||
else:
|
||||
length, coverage, subtableFormat, tupleIndex = struct.unpack(
|
||||
">LBBH", data[:8]
|
||||
)
|
||||
data = data[8:]
|
||||
assert self.format == subtableFormat, "unsupported format"
|
||||
self.coverage = coverage
|
||||
self.tupleIndex = tupleIndex
|
||||
|
||||
self.kernTable = kernTable = {}
|
||||
|
||||
nPairs, searchRange, entrySelector, rangeShift = struct.unpack(
|
||||
">HHHH", data[:8]
|
||||
)
|
||||
data = data[8:]
|
||||
|
||||
datas = array.array("H", data[: 6 * nPairs])
|
||||
if sys.byteorder != "big":
|
||||
datas.byteswap()
|
||||
it = iter(datas)
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
for k in range(nPairs):
|
||||
left, right, value = next(it), next(it), next(it)
|
||||
if value >= 32768:
|
||||
value -= 65536
|
||||
try:
|
||||
kernTable[(glyphOrder[left], glyphOrder[right])] = value
|
||||
except IndexError:
|
||||
# Slower, but will not throw an IndexError on an invalid
|
||||
# glyph id.
|
||||
kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = (
|
||||
value
|
||||
)
|
||||
if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess
|
||||
log.warning(
|
||||
"excess data in 'kern' subtable: %d bytes", len(data) - 6 * nPairs
|
||||
)
|
||||
|
||||
def compile(self, ttFont):
|
||||
nPairs = min(len(self.kernTable), 0xFFFF)
|
||||
searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)
|
||||
searchRange &= 0xFFFF
|
||||
entrySelector = min(entrySelector, 0xFFFF)
|
||||
rangeShift = min(rangeShift, 0xFFFF)
|
||||
data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift)
|
||||
|
||||
# yeehee! (I mean, turn names into indices)
|
||||
try:
|
||||
reverseOrder = ttFont.getReverseGlyphMap()
|
||||
kernTable = sorted(
|
||||
(reverseOrder[left], reverseOrder[right], value)
|
||||
for ((left, right), value) in self.kernTable.items()
|
||||
)
|
||||
except KeyError:
|
||||
# Slower, but will not throw KeyError on invalid glyph id.
|
||||
getGlyphID = ttFont.getGlyphID
|
||||
kernTable = sorted(
|
||||
(getGlyphID(left), getGlyphID(right), value)
|
||||
for ((left, right), value) in self.kernTable.items()
|
||||
)
|
||||
|
||||
for left, right, value in kernTable:
|
||||
data = data + struct.pack(">HHh", left, right, value)
|
||||
|
||||
if not self.apple:
|
||||
version = 0
|
||||
length = len(data) + 6
|
||||
if length >= 0x10000:
|
||||
log.warning(
|
||||
'"kern" subtable overflow, '
|
||||
"truncating length value while preserving pairs."
|
||||
)
|
||||
length &= 0xFFFF
|
||||
header = struct.pack(">HHBB", version, length, self.format, self.coverage)
|
||||
else:
|
||||
if self.tupleIndex is None:
|
||||
# sensible default when compiling a TTX from an old fonttools
|
||||
# or when inserting a Windows-style format 0 subtable into an
|
||||
# Apple version=1.0 kern table
|
||||
log.warning("'tupleIndex' is None; default to 0")
|
||||
self.tupleIndex = 0
|
||||
length = len(data) + 8
|
||||
header = struct.pack(
|
||||
">LBBH", length, self.coverage, self.format, self.tupleIndex
|
||||
)
|
||||
return header + data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
attrs = dict(coverage=self.coverage, format=self.format)
|
||||
if self.apple:
|
||||
if self.tupleIndex is None:
|
||||
log.warning("'tupleIndex' is None; default to 0")
|
||||
attrs["tupleIndex"] = 0
|
||||
else:
|
||||
attrs["tupleIndex"] = self.tupleIndex
|
||||
writer.begintag("kernsubtable", **attrs)
|
||||
writer.newline()
|
||||
items = sorted(self.kernTable.items())
|
||||
for (left, right), value in items:
|
||||
writer.simpletag("pair", [("l", left), ("r", right), ("v", value)])
|
||||
writer.newline()
|
||||
writer.endtag("kernsubtable")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.coverage = safeEval(attrs["coverage"])
|
||||
subtableFormat = safeEval(attrs["format"])
|
||||
if self.apple:
|
||||
if "tupleIndex" in attrs:
|
||||
self.tupleIndex = safeEval(attrs["tupleIndex"])
|
||||
else:
|
||||
# previous fontTools versions didn't export tupleIndex
|
||||
log.warning("Apple kern subtable is missing 'tupleIndex' attribute")
|
||||
self.tupleIndex = None
|
||||
else:
|
||||
self.tupleIndex = None
|
||||
assert subtableFormat == self.format, "unsupported format"
|
||||
if not hasattr(self, "kernTable"):
|
||||
self.kernTable = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
|
||||
|
||||
def __getitem__(self, pair):
|
||||
return self.kernTable[pair]
|
||||
|
||||
def __setitem__(self, pair, value):
|
||||
self.kernTable[pair] = value
|
||||
|
||||
def __delitem__(self, pair):
|
||||
del self.kernTable[pair]
|
||||
|
||||
|
||||
class KernTable_format_unkown(object):
|
||||
def __init__(self, format):
|
||||
self.format = format
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.data = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
return self.data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("kernsubtable", format=self.format)
|
||||
writer.newline()
|
||||
writer.comment("unknown 'kern' subtable format")
|
||||
writer.newline()
|
||||
writer.dumphex(self.data)
|
||||
writer.endtag("kernsubtable")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.decompile(readHex(content), ttFont)
|
||||
|
||||
|
||||
kern_classes = {0: KernTable_format_0}
|
||||
@@ -0,0 +1,13 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table__l_c_a_r(BaseTTXConverter):
|
||||
"""Ligature Caret table
|
||||
|
||||
The AAT ``lcar`` table stores division points within ligatures, which applications
|
||||
can use to position carets properly between the logical parts of the ligature.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6lcar.html
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,70 @@
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import array
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class table__l_o_c_a(DefaultTable.DefaultTable):
|
||||
"""Index to Location table
|
||||
|
||||
The ``loca`` table stores the offsets in the ``glyf`` table that correspond
|
||||
to the descriptions of each glyph. The glyphs are references by Glyph ID.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/loca
|
||||
"""
|
||||
|
||||
dependencies = ["glyf"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
longFormat = ttFont["head"].indexToLocFormat
|
||||
if longFormat:
|
||||
format = "I"
|
||||
else:
|
||||
format = "H"
|
||||
locations = array.array(format)
|
||||
locations.frombytes(data)
|
||||
if sys.byteorder != "big":
|
||||
locations.byteswap()
|
||||
if not longFormat:
|
||||
locations = array.array("I", (2 * l for l in locations))
|
||||
if len(locations) < (ttFont["maxp"].numGlyphs + 1):
|
||||
log.warning(
|
||||
"corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d",
|
||||
len(locations) - 1,
|
||||
ttFont["maxp"].numGlyphs,
|
||||
)
|
||||
self.locations = locations
|
||||
|
||||
def compile(self, ttFont):
|
||||
try:
|
||||
max_location = max(self.locations)
|
||||
except AttributeError:
|
||||
self.set([])
|
||||
max_location = 0
|
||||
if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations):
|
||||
locations = array.array("H")
|
||||
for location in self.locations:
|
||||
locations.append(location // 2)
|
||||
ttFont["head"].indexToLocFormat = 0
|
||||
else:
|
||||
locations = array.array("I", self.locations)
|
||||
ttFont["head"].indexToLocFormat = 1
|
||||
if sys.byteorder != "big":
|
||||
locations.byteswap()
|
||||
return locations.tobytes()
|
||||
|
||||
def set(self, locations):
|
||||
self.locations = array.array("I", locations)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("The 'loca' table will be calculated by the compiler")
|
||||
writer.newline()
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.locations[index]
|
||||
|
||||
def __len__(self):
|
||||
return len(self.locations)
|
||||
@@ -0,0 +1,72 @@
|
||||
from fontTools.misc.textTools import bytesjoin, tobytes, safeEval
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html
|
||||
|
||||
|
||||
class table__l_t_a_g(DefaultTable.DefaultTable):
|
||||
"""Language Tag table
|
||||
|
||||
The AAT ``ltag`` table contains mappings between the numeric codes used
|
||||
in the language field of the ``name`` table and IETF language tags.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html
|
||||
"""
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.version, self.flags = 1, 0
|
||||
self.tags = []
|
||||
|
||||
def addTag(self, tag):
|
||||
"""Add 'tag' to the list of langauge tags if not already there.
|
||||
|
||||
Returns the integer index of 'tag' in the list of all tags.
|
||||
"""
|
||||
try:
|
||||
return self.tags.index(tag)
|
||||
except ValueError:
|
||||
self.tags.append(tag)
|
||||
return len(self.tags) - 1
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.version, self.flags, numTags = struct.unpack(">LLL", data[:12])
|
||||
assert self.version == 1
|
||||
self.tags = []
|
||||
for i in range(numTags):
|
||||
pos = 12 + i * 4
|
||||
offset, length = struct.unpack(">HH", data[pos : pos + 4])
|
||||
tag = data[offset : offset + length].decode("ascii")
|
||||
self.tags.append(tag)
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))]
|
||||
stringPool = ""
|
||||
for tag in self.tags:
|
||||
offset = stringPool.find(tag)
|
||||
if offset < 0:
|
||||
offset = len(stringPool)
|
||||
stringPool = stringPool + tag
|
||||
offset = offset + 12 + len(self.tags) * 4
|
||||
dataList.append(struct.pack(">HH", offset, len(tag)))
|
||||
dataList.append(tobytes(stringPool))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
writer.simpletag("flags", value=self.flags)
|
||||
writer.newline()
|
||||
for tag in self.tags:
|
||||
writer.simpletag("LanguageTag", tag=tag)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "tags"):
|
||||
self.tags = []
|
||||
if name == "LanguageTag":
|
||||
self.tags.append(attrs["tag"])
|
||||
elif "value" in attrs:
|
||||
value = safeEval(attrs["value"])
|
||||
setattr(self, name, value)
|
||||
@@ -0,0 +1,147 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
|
||||
maxpFormat_0_5 = """
|
||||
> # big endian
|
||||
tableVersion: i
|
||||
numGlyphs: H
|
||||
"""
|
||||
|
||||
maxpFormat_1_0_add = """
|
||||
> # big endian
|
||||
maxPoints: H
|
||||
maxContours: H
|
||||
maxCompositePoints: H
|
||||
maxCompositeContours: H
|
||||
maxZones: H
|
||||
maxTwilightPoints: H
|
||||
maxStorage: H
|
||||
maxFunctionDefs: H
|
||||
maxInstructionDefs: H
|
||||
maxStackElements: H
|
||||
maxSizeOfInstructions: H
|
||||
maxComponentElements: H
|
||||
maxComponentDepth: H
|
||||
"""
|
||||
|
||||
|
||||
class table__m_a_x_p(DefaultTable.DefaultTable):
|
||||
"""Maximum Profile table
|
||||
|
||||
The ``maxp`` table contains the memory requirements for the data in
|
||||
the font.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/maxp
|
||||
"""
|
||||
|
||||
dependencies = ["glyf"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self)
|
||||
self.numGlyphs = int(self.numGlyphs)
|
||||
if self.tableVersion != 0x00005000:
|
||||
dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self)
|
||||
assert len(data) == 0
|
||||
|
||||
def compile(self, ttFont):
|
||||
if "glyf" in ttFont:
|
||||
if ttFont.isLoaded("glyf") and ttFont.recalcBBoxes:
|
||||
self.recalc(ttFont)
|
||||
else:
|
||||
pass # CFF
|
||||
self.numGlyphs = len(ttFont.getGlyphOrder())
|
||||
if self.tableVersion != 0x00005000:
|
||||
self.tableVersion = 0x00010000
|
||||
data = sstruct.pack(maxpFormat_0_5, self)
|
||||
if self.tableVersion == 0x00010000:
|
||||
data = data + sstruct.pack(maxpFormat_1_0_add, self)
|
||||
return data
|
||||
|
||||
def recalc(self, ttFont):
|
||||
"""Recalculate the font bounding box, and most other maxp values except
|
||||
for the TT instructions values. Also recalculate the value of bit 1
|
||||
of the flags field and the font bounding box of the 'head' table.
|
||||
"""
|
||||
glyfTable = ttFont["glyf"]
|
||||
hmtxTable = ttFont["hmtx"]
|
||||
headTable = ttFont["head"]
|
||||
self.numGlyphs = len(glyfTable)
|
||||
INFINITY = 100000
|
||||
xMin = +INFINITY
|
||||
yMin = +INFINITY
|
||||
xMax = -INFINITY
|
||||
yMax = -INFINITY
|
||||
maxPoints = 0
|
||||
maxContours = 0
|
||||
maxCompositePoints = 0
|
||||
maxCompositeContours = 0
|
||||
maxComponentElements = 0
|
||||
maxComponentDepth = 0
|
||||
allXMinIsLsb = 1
|
||||
for glyphName in ttFont.getGlyphOrder():
|
||||
g = glyfTable[glyphName]
|
||||
if g.numberOfContours:
|
||||
if hmtxTable[glyphName][1] != g.xMin:
|
||||
allXMinIsLsb = 0
|
||||
xMin = min(xMin, g.xMin)
|
||||
yMin = min(yMin, g.yMin)
|
||||
xMax = max(xMax, g.xMax)
|
||||
yMax = max(yMax, g.yMax)
|
||||
if g.numberOfContours > 0:
|
||||
nPoints, nContours = g.getMaxpValues()
|
||||
maxPoints = max(maxPoints, nPoints)
|
||||
maxContours = max(maxContours, nContours)
|
||||
elif g.isComposite():
|
||||
nPoints, nContours, componentDepth = g.getCompositeMaxpValues(
|
||||
glyfTable
|
||||
)
|
||||
maxCompositePoints = max(maxCompositePoints, nPoints)
|
||||
maxCompositeContours = max(maxCompositeContours, nContours)
|
||||
maxComponentElements = max(maxComponentElements, len(g.components))
|
||||
maxComponentDepth = max(maxComponentDepth, componentDepth)
|
||||
if xMin == +INFINITY:
|
||||
headTable.xMin = 0
|
||||
headTable.yMin = 0
|
||||
headTable.xMax = 0
|
||||
headTable.yMax = 0
|
||||
else:
|
||||
headTable.xMin = xMin
|
||||
headTable.yMin = yMin
|
||||
headTable.xMax = xMax
|
||||
headTable.yMax = yMax
|
||||
self.maxPoints = maxPoints
|
||||
self.maxContours = maxContours
|
||||
self.maxCompositePoints = maxCompositePoints
|
||||
self.maxCompositeContours = maxCompositeContours
|
||||
self.maxComponentElements = maxComponentElements
|
||||
self.maxComponentDepth = maxComponentDepth
|
||||
if allXMinIsLsb:
|
||||
headTable.flags = headTable.flags | 0x2
|
||||
else:
|
||||
headTable.flags = headTable.flags & ~0x2
|
||||
|
||||
def testrepr(self):
|
||||
items = sorted(self.__dict__.items())
|
||||
print(". . . . . . . . .")
|
||||
for combo in items:
|
||||
print(" %s: %s" % combo)
|
||||
print(". . . . . . . . .")
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
if self.tableVersion != 0x00005000:
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5)
|
||||
if self.tableVersion != 0x00005000:
|
||||
formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add)
|
||||
names = {**names, **names_1_0}
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name == "tableVersion":
|
||||
value = hex(value)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
@@ -0,0 +1,112 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytesjoin, strjoin, readHex
|
||||
from fontTools.ttLib import TTLibError
|
||||
from . import DefaultTable
|
||||
|
||||
# Apple's documentation of 'meta':
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6meta.html
|
||||
|
||||
META_HEADER_FORMAT = """
|
||||
> # big endian
|
||||
version: L
|
||||
flags: L
|
||||
dataOffset: L
|
||||
numDataMaps: L
|
||||
"""
|
||||
|
||||
|
||||
DATA_MAP_FORMAT = """
|
||||
> # big endian
|
||||
tag: 4s
|
||||
dataOffset: L
|
||||
dataLength: L
|
||||
"""
|
||||
|
||||
|
||||
class table__m_e_t_a(DefaultTable.DefaultTable):
|
||||
"""Metadata table
|
||||
|
||||
The ``meta`` table contains various metadata values for the font. Each
|
||||
category of metadata in the table is identified by a four-character tag.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/meta
|
||||
"""
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.data = {}
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
headerSize = sstruct.calcsize(META_HEADER_FORMAT)
|
||||
header = sstruct.unpack(META_HEADER_FORMAT, data[0:headerSize])
|
||||
if header["version"] != 1:
|
||||
raise TTLibError("unsupported 'meta' version %d" % header["version"])
|
||||
dataMapSize = sstruct.calcsize(DATA_MAP_FORMAT)
|
||||
for i in range(header["numDataMaps"]):
|
||||
dataMapOffset = headerSize + i * dataMapSize
|
||||
dataMap = sstruct.unpack(
|
||||
DATA_MAP_FORMAT, data[dataMapOffset : dataMapOffset + dataMapSize]
|
||||
)
|
||||
tag = dataMap["tag"]
|
||||
offset = dataMap["dataOffset"]
|
||||
self.data[tag] = data[offset : offset + dataMap["dataLength"]]
|
||||
if tag in ["dlng", "slng"]:
|
||||
self.data[tag] = self.data[tag].decode("utf-8")
|
||||
|
||||
def compile(self, ttFont):
|
||||
keys = sorted(self.data.keys())
|
||||
headerSize = sstruct.calcsize(META_HEADER_FORMAT)
|
||||
dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT)
|
||||
header = sstruct.pack(
|
||||
META_HEADER_FORMAT,
|
||||
{
|
||||
"version": 1,
|
||||
"flags": 0,
|
||||
"dataOffset": dataOffset,
|
||||
"numDataMaps": len(keys),
|
||||
},
|
||||
)
|
||||
dataMaps = []
|
||||
dataBlocks = []
|
||||
for tag in keys:
|
||||
if tag in ["dlng", "slng"]:
|
||||
data = self.data[tag].encode("utf-8")
|
||||
else:
|
||||
data = self.data[tag]
|
||||
dataMaps.append(
|
||||
sstruct.pack(
|
||||
DATA_MAP_FORMAT,
|
||||
{"tag": tag, "dataOffset": dataOffset, "dataLength": len(data)},
|
||||
)
|
||||
)
|
||||
dataBlocks.append(data)
|
||||
dataOffset += len(data)
|
||||
return bytesjoin([header] + dataMaps + dataBlocks)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
for tag in sorted(self.data.keys()):
|
||||
if tag in ["dlng", "slng"]:
|
||||
writer.begintag("text", tag=tag)
|
||||
writer.newline()
|
||||
writer.write(self.data[tag])
|
||||
writer.newline()
|
||||
writer.endtag("text")
|
||||
writer.newline()
|
||||
else:
|
||||
writer.begintag("hexdata", tag=tag)
|
||||
writer.newline()
|
||||
data = self.data[tag]
|
||||
if min(data) >= 0x20 and max(data) <= 0x7E:
|
||||
writer.comment("ascii: " + data.decode("ascii"))
|
||||
writer.newline()
|
||||
writer.dumphex(data)
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "hexdata":
|
||||
self.data[attrs["tag"]] = readHex(content)
|
||||
elif name == "text" and attrs["tag"] in ["dlng", "slng"]:
|
||||
self.data[attrs["tag"]] = strjoin(content).strip()
|
||||
else:
|
||||
raise TTLibError("can't handle '%s' element" % name)
|
||||
@@ -0,0 +1,14 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6mort.html
|
||||
class table__m_o_r_t(BaseTTXConverter):
|
||||
"""The AAT ``mort`` table contains glyph transformations used for script shaping and
|
||||
for various other optional smart features.
|
||||
|
||||
Note: ``mort`` has been deprecated in favor of the newer ``morx`` table.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6mort.html
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,15 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
|
||||
class table__m_o_r_x(BaseTTXConverter):
|
||||
"""The AAT ``morx`` table contains glyph transformations used for script shaping and
|
||||
for various other optional smart features, akin to ``GSUB`` and ``GPOS`` features
|
||||
in OpenType Layout.
|
||||
|
||||
Note: ``morx`` is a replacement for the now deprecated ``mort`` table.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
|
||||
"""
|
||||
|
||||
pass
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,14 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html
|
||||
class table__o_p_b_d(BaseTTXConverter):
|
||||
"""Optical Bounds table
|
||||
|
||||
The AAT ``opbd`` table contains optical boundary points for glyphs, which
|
||||
applications can use for the visual alignment of lines of text.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,319 @@
|
||||
from fontTools import ttLib
|
||||
from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import struct
|
||||
import array
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
postFormat = """
|
||||
>
|
||||
formatType: 16.16F
|
||||
italicAngle: 16.16F # italic angle in degrees
|
||||
underlinePosition: h
|
||||
underlineThickness: h
|
||||
isFixedPitch: L
|
||||
minMemType42: L # minimum memory if TrueType font is downloaded
|
||||
maxMemType42: L # maximum memory if TrueType font is downloaded
|
||||
minMemType1: L # minimum memory if Type1 font is downloaded
|
||||
maxMemType1: L # maximum memory if Type1 font is downloaded
|
||||
"""
|
||||
|
||||
postFormatSize = sstruct.calcsize(postFormat)
|
||||
|
||||
|
||||
class table__p_o_s_t(DefaultTable.DefaultTable):
|
||||
"""PostScript table
|
||||
|
||||
The ``post`` table contains information needed to use the font on
|
||||
PostScript printers, including the PostScript names of glyphs and
|
||||
data that was stored in the ``FontInfo`` dictionary for Type 1 fonts.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/post
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(postFormat, data[:postFormatSize], self)
|
||||
data = data[postFormatSize:]
|
||||
if self.formatType == 1.0:
|
||||
self.decode_format_1_0(data, ttFont)
|
||||
elif self.formatType == 2.0:
|
||||
self.decode_format_2_0(data, ttFont)
|
||||
elif self.formatType == 3.0:
|
||||
self.decode_format_3_0(data, ttFont)
|
||||
elif self.formatType == 4.0:
|
||||
self.decode_format_4_0(data, ttFont)
|
||||
else:
|
||||
# supported format
|
||||
raise ttLib.TTLibError(
|
||||
"'post' table format %f not supported" % self.formatType
|
||||
)
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(postFormat, self)
|
||||
if self.formatType == 1.0:
|
||||
pass # we're done
|
||||
elif self.formatType == 2.0:
|
||||
data = data + self.encode_format_2_0(ttFont)
|
||||
elif self.formatType == 3.0:
|
||||
pass # we're done
|
||||
elif self.formatType == 4.0:
|
||||
data = data + self.encode_format_4_0(ttFont)
|
||||
else:
|
||||
# supported format
|
||||
raise ttLib.TTLibError(
|
||||
"'post' table format %f not supported" % self.formatType
|
||||
)
|
||||
return data
|
||||
|
||||
def getGlyphOrder(self):
|
||||
"""This function will get called by a ttLib.TTFont instance.
|
||||
Do not call this function yourself, use TTFont().getGlyphOrder()
|
||||
or its relatives instead!
|
||||
"""
|
||||
if not hasattr(self, "glyphOrder"):
|
||||
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
|
||||
glyphOrder = self.glyphOrder
|
||||
del self.glyphOrder
|
||||
return glyphOrder
|
||||
|
||||
def decode_format_1_0(self, data, ttFont):
|
||||
self.glyphOrder = standardGlyphOrder[: ttFont["maxp"].numGlyphs]
|
||||
|
||||
def decode_format_2_0(self, data, ttFont):
|
||||
(numGlyphs,) = struct.unpack(">H", data[:2])
|
||||
numGlyphs = int(numGlyphs)
|
||||
if numGlyphs > ttFont["maxp"].numGlyphs:
|
||||
# Assume the numGlyphs field is bogus, so sync with maxp.
|
||||
# I've seen this in one font, and if the assumption is
|
||||
# wrong elsewhere, well, so be it: it's hard enough to
|
||||
# work around _one_ non-conforming post format...
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
data = data[2:]
|
||||
indices = array.array("H")
|
||||
indices.frombytes(data[: 2 * numGlyphs])
|
||||
if sys.byteorder != "big":
|
||||
indices.byteswap()
|
||||
data = data[2 * numGlyphs :]
|
||||
maxIndex = max(indices)
|
||||
self.extraNames = extraNames = unpackPStrings(data, maxIndex - 257)
|
||||
self.glyphOrder = glyphOrder = [""] * int(ttFont["maxp"].numGlyphs)
|
||||
for glyphID in range(numGlyphs):
|
||||
index = indices[glyphID]
|
||||
if index > 257:
|
||||
try:
|
||||
name = extraNames[index - 258]
|
||||
except IndexError:
|
||||
name = ""
|
||||
else:
|
||||
# fetch names from standard list
|
||||
name = standardGlyphOrder[index]
|
||||
glyphOrder[glyphID] = name
|
||||
self.build_psNameMapping(ttFont)
|
||||
|
||||
def build_psNameMapping(self, ttFont):
|
||||
mapping = {}
|
||||
allNames = {}
|
||||
glyphOrderNames = set(self.glyphOrder)
|
||||
for i in range(ttFont["maxp"].numGlyphs):
|
||||
glyphName = psName = self.glyphOrder[i]
|
||||
if glyphName == "":
|
||||
glyphName = "glyph%.5d" % i
|
||||
|
||||
if glyphName in allNames:
|
||||
# make up a new glyphName that's unique
|
||||
n = allNames[glyphName]
|
||||
# check if the glyph name exists in the glyph order
|
||||
while f"{glyphName}.{n}" in glyphOrderNames:
|
||||
n += 1
|
||||
allNames[glyphName] = n + 1
|
||||
glyphName = f"{glyphName}.{n}"
|
||||
|
||||
allNames[glyphName] = 1
|
||||
if glyphName != psName:
|
||||
self.glyphOrder[i] = glyphName
|
||||
mapping[glyphName] = psName
|
||||
|
||||
self.mapping = mapping
|
||||
|
||||
def decode_format_3_0(self, data, ttFont):
|
||||
# Setting self.glyphOrder to None will cause the TTFont object
|
||||
# try and construct glyph names from a Unicode cmap table.
|
||||
self.glyphOrder = None
|
||||
|
||||
def decode_format_4_0(self, data, ttFont):
|
||||
from fontTools import agl
|
||||
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
indices = array.array("H")
|
||||
indices.frombytes(data)
|
||||
if sys.byteorder != "big":
|
||||
indices.byteswap()
|
||||
# In some older fonts, the size of the post table doesn't match
|
||||
# the number of glyphs. Sometimes it's bigger, sometimes smaller.
|
||||
self.glyphOrder = glyphOrder = [""] * int(numGlyphs)
|
||||
for i in range(min(len(indices), numGlyphs)):
|
||||
if indices[i] == 0xFFFF:
|
||||
self.glyphOrder[i] = ""
|
||||
elif indices[i] in agl.UV2AGL:
|
||||
self.glyphOrder[i] = agl.UV2AGL[indices[i]]
|
||||
else:
|
||||
self.glyphOrder[i] = "uni%04X" % indices[i]
|
||||
self.build_psNameMapping(ttFont)
|
||||
|
||||
def encode_format_2_0(self, ttFont):
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
assert len(glyphOrder) == numGlyphs
|
||||
indices = array.array("H")
|
||||
extraDict = {}
|
||||
extraNames = self.extraNames = [
|
||||
n for n in self.extraNames if n not in standardGlyphOrder
|
||||
]
|
||||
for i, name in enumerate(extraNames):
|
||||
extraDict[name] = i
|
||||
for glyphName in glyphOrder:
|
||||
if glyphName in self.mapping:
|
||||
psName = self.mapping[glyphName]
|
||||
else:
|
||||
psName = glyphName
|
||||
if psName in extraDict:
|
||||
index = 258 + extraDict[psName]
|
||||
elif psName in standardGlyphOrder:
|
||||
index = standardGlyphOrder.index(psName)
|
||||
else:
|
||||
index = 258 + len(extraNames)
|
||||
extraDict[psName] = len(extraNames)
|
||||
extraNames.append(psName)
|
||||
indices.append(index)
|
||||
if sys.byteorder != "big":
|
||||
indices.byteswap()
|
||||
return (
|
||||
struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
|
||||
)
|
||||
|
||||
def encode_format_4_0(self, ttFont):
|
||||
from fontTools import agl
|
||||
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
assert len(glyphOrder) == numGlyphs
|
||||
indices = array.array("H")
|
||||
for glyphID in glyphOrder:
|
||||
glyphID = glyphID.split("#")[0]
|
||||
if glyphID in agl.AGL2UV:
|
||||
indices.append(agl.AGL2UV[glyphID])
|
||||
elif len(glyphID) == 7 and glyphID[:3] == "uni":
|
||||
indices.append(int(glyphID[3:], 16))
|
||||
else:
|
||||
indices.append(0xFFFF)
|
||||
if sys.byteorder != "big":
|
||||
indices.byteswap()
|
||||
return indices.tobytes()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(postFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
if hasattr(self, "mapping"):
|
||||
writer.begintag("psNames")
|
||||
writer.newline()
|
||||
writer.comment(
|
||||
"This file uses unique glyph names based on the information\n"
|
||||
"found in the 'post' table. Since these names might not be unique,\n"
|
||||
"we have to invent artificial names in case of clashes. In order to\n"
|
||||
"be able to retain the original information, we need a name to\n"
|
||||
"ps name mapping for those cases where they differ. That's what\n"
|
||||
"you see below.\n"
|
||||
)
|
||||
writer.newline()
|
||||
items = sorted(self.mapping.items())
|
||||
for name, psName in items:
|
||||
writer.simpletag("psName", name=name, psName=psName)
|
||||
writer.newline()
|
||||
writer.endtag("psNames")
|
||||
writer.newline()
|
||||
if hasattr(self, "extraNames"):
|
||||
writer.begintag("extraNames")
|
||||
writer.newline()
|
||||
writer.comment(
|
||||
"following are the name that are not taken from the standard Mac glyph order"
|
||||
)
|
||||
writer.newline()
|
||||
for name in self.extraNames:
|
||||
writer.simpletag("psName", name=name)
|
||||
writer.newline()
|
||||
writer.endtag("extraNames")
|
||||
writer.newline()
|
||||
if hasattr(self, "data"):
|
||||
writer.begintag("hexdata")
|
||||
writer.newline()
|
||||
writer.dumphex(self.data)
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name not in ("psNames", "extraNames", "hexdata"):
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
elif name == "psNames":
|
||||
self.mapping = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "psName":
|
||||
self.mapping[attrs["name"]] = attrs["psName"]
|
||||
elif name == "extraNames":
|
||||
self.extraNames = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "psName":
|
||||
self.extraNames.append(attrs["name"])
|
||||
else:
|
||||
self.data = readHex(content)
|
||||
|
||||
|
||||
def unpackPStrings(data, n):
|
||||
# extract n Pascal strings from data.
|
||||
# if there is not enough data, use ""
|
||||
|
||||
strings = []
|
||||
index = 0
|
||||
dataLen = len(data)
|
||||
|
||||
for _ in range(n):
|
||||
if dataLen <= index:
|
||||
length = 0
|
||||
else:
|
||||
length = byteord(data[index])
|
||||
index += 1
|
||||
|
||||
if dataLen <= index + length - 1:
|
||||
name = ""
|
||||
else:
|
||||
name = tostr(data[index : index + length], encoding="latin1")
|
||||
strings.append(name)
|
||||
index += length
|
||||
|
||||
if index < dataLen:
|
||||
log.warning("%d extra bytes in post.stringData array", dataLen - index)
|
||||
|
||||
elif dataLen < index:
|
||||
log.warning("not enough data in post.stringData array")
|
||||
|
||||
return strings
|
||||
|
||||
|
||||
def packPStrings(strings):
|
||||
data = b""
|
||||
for s in strings:
|
||||
data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
|
||||
return data
|
||||
@@ -0,0 +1,16 @@
|
||||
from fontTools import ttLib
|
||||
|
||||
superclass = ttLib.getTableClass("fpgm")
|
||||
|
||||
|
||||
class table__p_r_e_p(superclass):
|
||||
"""Control Value Program table
|
||||
|
||||
The ``prep`` table contains TrueType instructions that can makee font-wide
|
||||
alterations to the Control Value Table. It may potentially be executed
|
||||
before any glyph is processed.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/prep
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,12 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6prop.html
|
||||
class table__p_r_o_p(BaseTTXConverter):
|
||||
"""The AAT ``prop`` table can store a variety of per-glyph properties, such as
|
||||
Unicode directionality or whether glyphs are non-spacing marks.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6prop.html
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,129 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval, num2binary, binary2num
|
||||
from . import DefaultTable
|
||||
from .sbixStrike import Strike
|
||||
|
||||
|
||||
sbixHeaderFormat = """
|
||||
>
|
||||
version: H # Version number (set to 1)
|
||||
flags: H # The only two bits used in the flags field are bits 0
|
||||
# and 1. For historical reasons, bit 0 must always be 1.
|
||||
# Bit 1 is a sbixDrawOutlines flag and is interpreted as
|
||||
# follows:
|
||||
# 0: Draw only 'sbix' bitmaps
|
||||
# 1: Draw both 'sbix' bitmaps and outlines, in that
|
||||
# order
|
||||
numStrikes: L # Number of bitmap strikes to follow
|
||||
"""
|
||||
sbixHeaderFormatSize = sstruct.calcsize(sbixHeaderFormat)
|
||||
|
||||
|
||||
sbixStrikeOffsetFormat = """
|
||||
>
|
||||
strikeOffset: L # Offset from begining of table to data for the
|
||||
# individual strike
|
||||
"""
|
||||
sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat)
|
||||
|
||||
|
||||
class table__s_b_i_x(DefaultTable.DefaultTable):
|
||||
"""Standard Bitmap Graphics table
|
||||
|
||||
The ``sbix`` table stores bitmap image data in standard graphics formats
|
||||
like JPEG, PNG, or TIFF. The glyphs for which the ``sbix`` table provides
|
||||
data are indexed by Glyph ID. For each such glyph, the ``sbix`` table can
|
||||
hold different data for different sizes, called "strikes."
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/sbix
|
||||
"""
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.version = 1
|
||||
self.flags = 1
|
||||
self.numStrikes = 0
|
||||
self.strikes = {}
|
||||
self.strikeOffsets = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
# read table header
|
||||
sstruct.unpack(sbixHeaderFormat, data[:sbixHeaderFormatSize], self)
|
||||
# collect offsets to individual strikes in self.strikeOffsets
|
||||
for i in range(self.numStrikes):
|
||||
current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize
|
||||
offset_entry = sbixStrikeOffset()
|
||||
sstruct.unpack(
|
||||
sbixStrikeOffsetFormat,
|
||||
data[current_offset : current_offset + sbixStrikeOffsetFormatSize],
|
||||
offset_entry,
|
||||
)
|
||||
self.strikeOffsets.append(offset_entry.strikeOffset)
|
||||
|
||||
# decompile Strikes
|
||||
for i in range(self.numStrikes - 1, -1, -1):
|
||||
current_strike = Strike(rawdata=data[self.strikeOffsets[i] :])
|
||||
data = data[: self.strikeOffsets[i]]
|
||||
current_strike.decompile(ttFont)
|
||||
# print " Strike length: %xh" % len(bitmapSetData)
|
||||
# print "Number of Glyph entries:", len(current_strike.glyphs)
|
||||
if current_strike.ppem in self.strikes:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike")
|
||||
self.strikes[current_strike.ppem] = current_strike
|
||||
|
||||
# after the glyph data records have been extracted, we don't need the offsets anymore
|
||||
del self.strikeOffsets
|
||||
del self.numStrikes
|
||||
|
||||
def compile(self, ttFont):
|
||||
sbixData = b""
|
||||
self.numStrikes = len(self.strikes)
|
||||
sbixHeader = sstruct.pack(sbixHeaderFormat, self)
|
||||
|
||||
# calculate offset to start of first strike
|
||||
setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes
|
||||
|
||||
for si in sorted(self.strikes.keys()):
|
||||
current_strike = self.strikes[si]
|
||||
current_strike.compile(ttFont)
|
||||
# append offset to this strike to table header
|
||||
current_strike.strikeOffset = setOffset
|
||||
sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike)
|
||||
setOffset += len(current_strike.data)
|
||||
sbixData += current_strike.data
|
||||
|
||||
return sbixHeader + sbixData
|
||||
|
||||
def toXML(self, xmlWriter, ttFont):
|
||||
xmlWriter.simpletag("version", value=self.version)
|
||||
xmlWriter.newline()
|
||||
xmlWriter.simpletag("flags", value=num2binary(self.flags, 16))
|
||||
xmlWriter.newline()
|
||||
for i in sorted(self.strikes.keys()):
|
||||
self.strikes[i].toXML(xmlWriter, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
elif name == "flags":
|
||||
setattr(self, name, binary2num(attrs["value"]))
|
||||
elif name == "strike":
|
||||
current_strike = Strike()
|
||||
for element in content:
|
||||
if isinstance(element, tuple):
|
||||
name, attrs, content = element
|
||||
current_strike.fromXML(name, attrs, content, ttFont)
|
||||
self.strikes[current_strike.ppem] = current_strike
|
||||
else:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("can't handle '%s' element" % name)
|
||||
|
||||
|
||||
# Helper classes
|
||||
|
||||
|
||||
class sbixStrikeOffset(object):
|
||||
pass
|
||||
@@ -0,0 +1,332 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import (
|
||||
fixedToFloat as fi2fl,
|
||||
floatToFixed as fl2fi,
|
||||
floatToFixedToStr as fl2str,
|
||||
strToFixedToFloat as str2fl,
|
||||
)
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval
|
||||
from fontTools.ttLib import TTLibError
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
from collections.abc import MutableMapping
|
||||
|
||||
|
||||
# Apple's documentation of 'trak':
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6trak.html
|
||||
|
||||
TRAK_HEADER_FORMAT = """
|
||||
> # big endian
|
||||
version: 16.16F
|
||||
format: H
|
||||
horizOffset: H
|
||||
vertOffset: H
|
||||
reserved: H
|
||||
"""
|
||||
|
||||
TRAK_HEADER_FORMAT_SIZE = sstruct.calcsize(TRAK_HEADER_FORMAT)
|
||||
|
||||
|
||||
TRACK_DATA_FORMAT = """
|
||||
> # big endian
|
||||
nTracks: H
|
||||
nSizes: H
|
||||
sizeTableOffset: L
|
||||
"""
|
||||
|
||||
TRACK_DATA_FORMAT_SIZE = sstruct.calcsize(TRACK_DATA_FORMAT)
|
||||
|
||||
|
||||
TRACK_TABLE_ENTRY_FORMAT = """
|
||||
> # big endian
|
||||
track: 16.16F
|
||||
nameIndex: H
|
||||
offset: H
|
||||
"""
|
||||
|
||||
TRACK_TABLE_ENTRY_FORMAT_SIZE = sstruct.calcsize(TRACK_TABLE_ENTRY_FORMAT)
|
||||
|
||||
|
||||
# size values are actually '16.16F' fixed-point values, but here I do the
|
||||
# fixedToFloat conversion manually instead of relying on sstruct
|
||||
SIZE_VALUE_FORMAT = ">l"
|
||||
SIZE_VALUE_FORMAT_SIZE = struct.calcsize(SIZE_VALUE_FORMAT)
|
||||
|
||||
# per-Size values are in 'FUnits', i.e. 16-bit signed integers
|
||||
PER_SIZE_VALUE_FORMAT = ">h"
|
||||
PER_SIZE_VALUE_FORMAT_SIZE = struct.calcsize(PER_SIZE_VALUE_FORMAT)
|
||||
|
||||
|
||||
class table__t_r_a_k(DefaultTable.DefaultTable):
|
||||
"""The AAT ``trak`` table can store per-size adjustments to each glyph's
|
||||
sidebearings to make when tracking is enabled, which applications can
|
||||
use to provide more visually balanced line spacing.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6trak.html
|
||||
"""
|
||||
|
||||
dependencies = ["name"]
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
offset = TRAK_HEADER_FORMAT_SIZE
|
||||
for direction in ("horiz", "vert"):
|
||||
trackData = getattr(self, direction + "Data", TrackData())
|
||||
offsetName = direction + "Offset"
|
||||
# set offset to 0 if None or empty
|
||||
if not trackData:
|
||||
setattr(self, offsetName, 0)
|
||||
continue
|
||||
# TrackData table format must be longword aligned
|
||||
alignedOffset = (offset + 3) & ~3
|
||||
padding, offset = b"\x00" * (alignedOffset - offset), alignedOffset
|
||||
setattr(self, offsetName, offset)
|
||||
|
||||
data = trackData.compile(offset)
|
||||
offset += len(data)
|
||||
dataList.append(padding + data)
|
||||
|
||||
self.reserved = 0
|
||||
tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList)
|
||||
return tableData
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self)
|
||||
for direction in ("horiz", "vert"):
|
||||
trackData = TrackData()
|
||||
offset = getattr(self, direction + "Offset")
|
||||
if offset != 0:
|
||||
trackData.decompile(data, offset)
|
||||
setattr(self, direction + "Data", trackData)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
writer.simpletag("format", value=self.format)
|
||||
writer.newline()
|
||||
for direction in ("horiz", "vert"):
|
||||
dataName = direction + "Data"
|
||||
writer.begintag(dataName)
|
||||
writer.newline()
|
||||
trackData = getattr(self, dataName, TrackData())
|
||||
trackData.toXML(writer, ttFont)
|
||||
writer.endtag(dataName)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = safeEval(attrs["value"])
|
||||
elif name == "format":
|
||||
self.format = safeEval(attrs["value"])
|
||||
elif name in ("horizData", "vertData"):
|
||||
trackData = TrackData()
|
||||
setattr(self, name, trackData)
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content_ = element
|
||||
trackData.fromXML(name, attrs, content_, ttFont)
|
||||
|
||||
|
||||
class TrackData(MutableMapping):
|
||||
def __init__(self, initialdata={}):
|
||||
self._map = dict(initialdata)
|
||||
|
||||
def compile(self, offset):
|
||||
nTracks = len(self)
|
||||
sizes = self.sizes()
|
||||
nSizes = len(sizes)
|
||||
|
||||
# offset to the start of the size subtable
|
||||
offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE * nTracks
|
||||
trackDataHeader = sstruct.pack(
|
||||
TRACK_DATA_FORMAT,
|
||||
{"nTracks": nTracks, "nSizes": nSizes, "sizeTableOffset": offset},
|
||||
)
|
||||
|
||||
entryDataList = []
|
||||
perSizeDataList = []
|
||||
# offset to per-size tracking values
|
||||
offset += SIZE_VALUE_FORMAT_SIZE * nSizes
|
||||
# sort track table entries by track value
|
||||
for track, entry in sorted(self.items()):
|
||||
assert entry.nameIndex is not None
|
||||
entry.track = track
|
||||
entry.offset = offset
|
||||
entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)]
|
||||
# sort per-size values by size
|
||||
for size, value in sorted(entry.items()):
|
||||
perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)]
|
||||
offset += PER_SIZE_VALUE_FORMAT_SIZE * nSizes
|
||||
# sort size values
|
||||
sizeDataList = [
|
||||
struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes)
|
||||
]
|
||||
|
||||
data = bytesjoin(
|
||||
[trackDataHeader] + entryDataList + sizeDataList + perSizeDataList
|
||||
)
|
||||
return data
|
||||
|
||||
def decompile(self, data, offset):
|
||||
# initial offset is from the start of trak table to the current TrackData
|
||||
trackDataHeader = data[offset : offset + TRACK_DATA_FORMAT_SIZE]
|
||||
if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE:
|
||||
raise TTLibError("not enough data to decompile TrackData header")
|
||||
sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self)
|
||||
offset += TRACK_DATA_FORMAT_SIZE
|
||||
|
||||
nSizes = self.nSizes
|
||||
sizeTableOffset = self.sizeTableOffset
|
||||
sizeTable = []
|
||||
for i in range(nSizes):
|
||||
sizeValueData = data[
|
||||
sizeTableOffset : sizeTableOffset + SIZE_VALUE_FORMAT_SIZE
|
||||
]
|
||||
if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE:
|
||||
raise TTLibError("not enough data to decompile TrackData size subtable")
|
||||
(sizeValue,) = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData)
|
||||
sizeTable.append(fi2fl(sizeValue, 16))
|
||||
sizeTableOffset += SIZE_VALUE_FORMAT_SIZE
|
||||
|
||||
for i in range(self.nTracks):
|
||||
entry = TrackTableEntry()
|
||||
entryData = data[offset : offset + TRACK_TABLE_ENTRY_FORMAT_SIZE]
|
||||
if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE:
|
||||
raise TTLibError("not enough data to decompile TrackTableEntry record")
|
||||
sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry)
|
||||
perSizeOffset = entry.offset
|
||||
for j in range(nSizes):
|
||||
size = sizeTable[j]
|
||||
perSizeValueData = data[
|
||||
perSizeOffset : perSizeOffset + PER_SIZE_VALUE_FORMAT_SIZE
|
||||
]
|
||||
if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE:
|
||||
raise TTLibError(
|
||||
"not enough data to decompile per-size track values"
|
||||
)
|
||||
(perSizeValue,) = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData)
|
||||
entry[size] = perSizeValue
|
||||
perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE
|
||||
self[entry.track] = entry
|
||||
offset += TRACK_TABLE_ENTRY_FORMAT_SIZE
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
nTracks = len(self)
|
||||
nSizes = len(self.sizes())
|
||||
writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes))
|
||||
writer.newline()
|
||||
for track, entry in sorted(self.items()):
|
||||
assert entry.nameIndex is not None
|
||||
entry.track = track
|
||||
entry.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name != "trackEntry":
|
||||
return
|
||||
entry = TrackTableEntry()
|
||||
entry.fromXML(name, attrs, content, ttFont)
|
||||
self[entry.track] = entry
|
||||
|
||||
def sizes(self):
|
||||
if not self:
|
||||
return frozenset()
|
||||
tracks = list(self.tracks())
|
||||
sizes = self[tracks.pop(0)].sizes()
|
||||
for track in tracks:
|
||||
entrySizes = self[track].sizes()
|
||||
if sizes != entrySizes:
|
||||
raise TTLibError(
|
||||
"'trak' table entries must specify the same sizes: "
|
||||
"%s != %s" % (sorted(sizes), sorted(entrySizes))
|
||||
)
|
||||
return frozenset(sizes)
|
||||
|
||||
def __getitem__(self, track):
|
||||
return self._map[track]
|
||||
|
||||
def __delitem__(self, track):
|
||||
del self._map[track]
|
||||
|
||||
def __setitem__(self, track, entry):
|
||||
self._map[track] = entry
|
||||
|
||||
def __len__(self):
|
||||
return len(self._map)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._map)
|
||||
|
||||
def keys(self):
|
||||
return self._map.keys()
|
||||
|
||||
tracks = keys
|
||||
|
||||
def __repr__(self):
|
||||
return "TrackData({})".format(self._map if self else "")
|
||||
|
||||
|
||||
class TrackTableEntry(MutableMapping):
|
||||
def __init__(self, values={}, nameIndex=None):
|
||||
self.nameIndex = nameIndex
|
||||
self._map = dict(values)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
name = ttFont["name"].getDebugName(self.nameIndex)
|
||||
writer.begintag(
|
||||
"trackEntry",
|
||||
(("value", fl2str(self.track, 16)), ("nameIndex", self.nameIndex)),
|
||||
)
|
||||
writer.newline()
|
||||
if name:
|
||||
writer.comment(name)
|
||||
writer.newline()
|
||||
for size, perSizeValue in sorted(self.items()):
|
||||
writer.simpletag("track", size=fl2str(size, 16), value=perSizeValue)
|
||||
writer.newline()
|
||||
writer.endtag("trackEntry")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.track = str2fl(attrs["value"], 16)
|
||||
self.nameIndex = safeEval(attrs["nameIndex"])
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, _ = element
|
||||
if name != "track":
|
||||
continue
|
||||
size = str2fl(attrs["size"], 16)
|
||||
self[size] = safeEval(attrs["value"])
|
||||
|
||||
def __getitem__(self, size):
|
||||
return self._map[size]
|
||||
|
||||
def __delitem__(self, size):
|
||||
del self._map[size]
|
||||
|
||||
def __setitem__(self, size, value):
|
||||
self._map[size] = value
|
||||
|
||||
def __len__(self):
|
||||
return len(self._map)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._map)
|
||||
|
||||
def keys(self):
|
||||
return self._map.keys()
|
||||
|
||||
sizes = keys
|
||||
|
||||
def __repr__(self):
|
||||
return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return self.nameIndex == other.nameIndex and dict(self) == dict(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
result = self.__eq__(other)
|
||||
return result if result is NotImplemented else not result
|
||||
@@ -0,0 +1,139 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.misc.fixedTools import (
|
||||
ensureVersionIsLong as fi2ve,
|
||||
versionToFixed as ve2fi,
|
||||
)
|
||||
from . import DefaultTable
|
||||
import math
|
||||
|
||||
|
||||
vheaFormat = """
|
||||
> # big endian
|
||||
tableVersion: L
|
||||
ascent: h
|
||||
descent: h
|
||||
lineGap: h
|
||||
advanceHeightMax: H
|
||||
minTopSideBearing: h
|
||||
minBottomSideBearing: h
|
||||
yMaxExtent: h
|
||||
caretSlopeRise: h
|
||||
caretSlopeRun: h
|
||||
caretOffset: h
|
||||
reserved1: h
|
||||
reserved2: h
|
||||
reserved3: h
|
||||
reserved4: h
|
||||
metricDataFormat: h
|
||||
numberOfVMetrics: H
|
||||
"""
|
||||
|
||||
|
||||
class table__v_h_e_a(DefaultTable.DefaultTable):
|
||||
"""Vertical Header table
|
||||
|
||||
The ``vhea`` table contains information needed during vertical
|
||||
text layout.
|
||||
|
||||
.. note::
|
||||
This converter class is kept in sync with the :class:`._h_h_e_a.table__h_h_e_a`
|
||||
table constructor.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/vhea
|
||||
"""
|
||||
|
||||
# Note: Keep in sync with table__h_h_e_a
|
||||
|
||||
dependencies = ["vmtx", "glyf", "CFF ", "CFF2"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(vheaFormat, data, self)
|
||||
|
||||
def compile(self, ttFont):
|
||||
if ttFont.recalcBBoxes and (
|
||||
ttFont.isLoaded("glyf")
|
||||
or ttFont.isLoaded("CFF ")
|
||||
or ttFont.isLoaded("CFF2")
|
||||
):
|
||||
self.recalc(ttFont)
|
||||
self.tableVersion = fi2ve(self.tableVersion)
|
||||
return sstruct.pack(vheaFormat, self)
|
||||
|
||||
def recalc(self, ttFont):
|
||||
if "vmtx" not in ttFont:
|
||||
return
|
||||
|
||||
vmtxTable = ttFont["vmtx"]
|
||||
self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values())
|
||||
|
||||
boundsHeightDict = {}
|
||||
if "glyf" in ttFont:
|
||||
glyfTable = ttFont["glyf"]
|
||||
for name in ttFont.getGlyphOrder():
|
||||
g = glyfTable[name]
|
||||
if g.numberOfContours == 0:
|
||||
continue
|
||||
if g.numberOfContours < 0 and not hasattr(g, "yMax"):
|
||||
# Composite glyph without extents set.
|
||||
# Calculate those.
|
||||
g.recalcBounds(glyfTable)
|
||||
boundsHeightDict[name] = g.yMax - g.yMin
|
||||
elif "CFF " in ttFont or "CFF2" in ttFont:
|
||||
if "CFF " in ttFont:
|
||||
topDict = ttFont["CFF "].cff.topDictIndex[0]
|
||||
else:
|
||||
topDict = ttFont["CFF2"].cff.topDictIndex[0]
|
||||
charStrings = topDict.CharStrings
|
||||
for name in ttFont.getGlyphOrder():
|
||||
cs = charStrings[name]
|
||||
bounds = cs.calcBounds(charStrings)
|
||||
if bounds is not None:
|
||||
boundsHeightDict[name] = int(
|
||||
math.ceil(bounds[3]) - math.floor(bounds[1])
|
||||
)
|
||||
|
||||
if boundsHeightDict:
|
||||
minTopSideBearing = float("inf")
|
||||
minBottomSideBearing = float("inf")
|
||||
yMaxExtent = -float("inf")
|
||||
for name, boundsHeight in boundsHeightDict.items():
|
||||
advanceHeight, tsb = vmtxTable[name]
|
||||
bsb = advanceHeight - tsb - boundsHeight
|
||||
extent = tsb + boundsHeight
|
||||
minTopSideBearing = min(minTopSideBearing, tsb)
|
||||
minBottomSideBearing = min(minBottomSideBearing, bsb)
|
||||
yMaxExtent = max(yMaxExtent, extent)
|
||||
self.minTopSideBearing = minTopSideBearing
|
||||
self.minBottomSideBearing = minBottomSideBearing
|
||||
self.yMaxExtent = yMaxExtent
|
||||
|
||||
else: # No glyph has outlines.
|
||||
self.minTopSideBearing = 0
|
||||
self.minBottomSideBearing = 0
|
||||
self.yMaxExtent = 0
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(vheaFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name == "tableVersion":
|
||||
value = fi2ve(value)
|
||||
value = "0x%08x" % value
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "tableVersion":
|
||||
setattr(self, name, ve2fi(attrs["value"]))
|
||||
return
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
# reserved0 is caretOffset for legacy reasons
|
||||
@property
|
||||
def reserved0(self):
|
||||
return self.caretOffset
|
||||
|
||||
@reserved0.setter
|
||||
def reserved0(self, value):
|
||||
self.caretOffset = value
|
||||
@@ -0,0 +1,19 @@
|
||||
from fontTools import ttLib
|
||||
|
||||
superclass = ttLib.getTableClass("hmtx")
|
||||
|
||||
|
||||
class table__v_m_t_x(superclass):
|
||||
"""Vertical Metrics table
|
||||
|
||||
The ``vmtx`` table contains per-glyph metrics for the glyphs in a
|
||||
``glyf``, ``CFF ``, or ``CFF2`` table, as needed for vertical text
|
||||
layout.
|
||||
|
||||
See also https://learn.microsoft.com/en-us/typography/opentype/spec/vmtx
|
||||
"""
|
||||
|
||||
headerTag = "vhea"
|
||||
advanceName = "height"
|
||||
sideBearingName = "tsb"
|
||||
numberOfMetricsName = "numberOfVMetrics"
|
||||
@@ -0,0 +1,20 @@
|
||||
from fontTools.misc.textTools import strjoin, tobytes, tostr
|
||||
from . import DefaultTable
|
||||
|
||||
|
||||
class asciiTable(DefaultTable.DefaultTable):
|
||||
def toXML(self, writer, ttFont):
|
||||
data = tostr(self.data)
|
||||
# removing null bytes. XXX needed??
|
||||
data = data.split("\0")
|
||||
data = strjoin(data)
|
||||
writer.begintag("source")
|
||||
writer.newline()
|
||||
writer.write_noindent(data)
|
||||
writer.newline()
|
||||
writer.endtag("source")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
lines = strjoin(content).split("\n")
|
||||
self.data = tobytes("\n".join(lines[1:-1]))
|
||||
@@ -0,0 +1,92 @@
|
||||
import struct, warnings
|
||||
|
||||
try:
|
||||
import lz4
|
||||
except ImportError:
|
||||
lz4 = None
|
||||
else:
|
||||
import lz4.block
|
||||
|
||||
# old scheme for VERSION < 0.9 otherwise use lz4.block
|
||||
|
||||
|
||||
def decompress(data):
|
||||
(compression,) = struct.unpack(">L", data[4:8])
|
||||
scheme = compression >> 27
|
||||
size = compression & 0x07FFFFFF
|
||||
if scheme == 0:
|
||||
pass
|
||||
elif scheme == 1 and lz4:
|
||||
res = lz4.block.decompress(struct.pack("<L", size) + data[8:])
|
||||
if len(res) != size:
|
||||
warnings.warn("Table decompression failed.")
|
||||
else:
|
||||
data = res
|
||||
else:
|
||||
warnings.warn("Table is compressed with an unsupported compression scheme")
|
||||
return (data, scheme)
|
||||
|
||||
|
||||
def compress(scheme, data):
|
||||
hdr = data[:4] + struct.pack(">L", (scheme << 27) + (len(data) & 0x07FFFFFF))
|
||||
if scheme == 0:
|
||||
return data
|
||||
elif scheme == 1 and lz4:
|
||||
res = lz4.block.compress(
|
||||
data, mode="high_compression", compression=16, store_size=False
|
||||
)
|
||||
return hdr + res
|
||||
else:
|
||||
warnings.warn("Table failed to compress by unsupported compression scheme")
|
||||
return data
|
||||
|
||||
|
||||
def _entries(attrs, sameval):
|
||||
ak = 0
|
||||
vals = []
|
||||
lastv = 0
|
||||
for k, v in attrs:
|
||||
if len(vals) and (k != ak + 1 or (sameval and v != lastv)):
|
||||
yield (ak - len(vals) + 1, len(vals), vals)
|
||||
vals = []
|
||||
ak = k
|
||||
vals.append(v)
|
||||
lastv = v
|
||||
yield (ak - len(vals) + 1, len(vals), vals)
|
||||
|
||||
|
||||
def entries(attributes, sameval=False):
|
||||
g = _entries(sorted(attributes.items(), key=lambda x: int(x[0])), sameval)
|
||||
return g
|
||||
|
||||
|
||||
def bininfo(num, size=1):
|
||||
if num == 0:
|
||||
return struct.pack(">4H", 0, 0, 0, 0)
|
||||
srange = 1
|
||||
select = 0
|
||||
while srange <= num:
|
||||
srange *= 2
|
||||
select += 1
|
||||
select -= 1
|
||||
srange //= 2
|
||||
srange *= size
|
||||
shift = num * size - srange
|
||||
return struct.pack(">4H", num, srange, select, shift)
|
||||
|
||||
|
||||
def num2tag(n):
|
||||
if n < 0x200000:
|
||||
return str(n)
|
||||
else:
|
||||
return (
|
||||
struct.unpack("4s", struct.pack(">L", n))[0].replace(b"\000", b"").decode()
|
||||
)
|
||||
|
||||
|
||||
def tag2num(n):
|
||||
try:
|
||||
return int(n)
|
||||
except ValueError:
|
||||
n = (n + " ")[:4]
|
||||
return struct.unpack(">L", n.encode("ascii"))[0]
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,163 @@
|
||||
"""Methods for traversing trees of otData-driven OpenType tables."""
|
||||
|
||||
from collections import deque
|
||||
from typing import Callable, Deque, Iterable, List, Optional, Tuple
|
||||
from .otBase import BaseTable
|
||||
|
||||
|
||||
__all__ = [
|
||||
"bfs_base_table",
|
||||
"dfs_base_table",
|
||||
"SubTablePath",
|
||||
]
|
||||
|
||||
|
||||
class SubTablePath(Tuple[BaseTable.SubTableEntry, ...]):
|
||||
def __str__(self) -> str:
|
||||
path_parts = []
|
||||
for entry in self:
|
||||
path_part = entry.name
|
||||
if entry.index is not None:
|
||||
path_part += f"[{entry.index}]"
|
||||
path_parts.append(path_part)
|
||||
return ".".join(path_parts)
|
||||
|
||||
|
||||
# Given f(current frontier, new entries) add new entries to frontier
|
||||
AddToFrontierFn = Callable[[Deque[SubTablePath], List[SubTablePath]], None]
|
||||
|
||||
|
||||
def dfs_base_table(
|
||||
root: BaseTable,
|
||||
root_accessor: Optional[str] = None,
|
||||
skip_root: bool = False,
|
||||
predicate: Optional[Callable[[SubTablePath], bool]] = None,
|
||||
iter_subtables_fn: Optional[
|
||||
Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
|
||||
] = None,
|
||||
) -> Iterable[SubTablePath]:
|
||||
"""Depth-first search tree of BaseTables.
|
||||
|
||||
Args:
|
||||
root (BaseTable): the root of the tree.
|
||||
root_accessor (Optional[str]): attribute name for the root table, if any (mostly
|
||||
useful for debugging).
|
||||
skip_root (Optional[bool]): if True, the root itself is not visited, only its
|
||||
children.
|
||||
predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out
|
||||
paths. If True, the path is yielded and its subtables are added to the
|
||||
queue. If False, the path is skipped and its subtables are not traversed.
|
||||
iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]):
|
||||
function to iterate over subtables of a table. If None, the default
|
||||
BaseTable.iterSubTables() is used.
|
||||
|
||||
Yields:
|
||||
SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples
|
||||
for each of the nodes in the tree. The last entry in a path is the current
|
||||
subtable, whereas preceding ones refer to its parent tables all the way up to
|
||||
the root.
|
||||
"""
|
||||
yield from _traverse_ot_data(
|
||||
root,
|
||||
root_accessor,
|
||||
skip_root,
|
||||
predicate,
|
||||
lambda frontier, new: frontier.extendleft(reversed(new)),
|
||||
iter_subtables_fn,
|
||||
)
|
||||
|
||||
|
||||
def bfs_base_table(
|
||||
root: BaseTable,
|
||||
root_accessor: Optional[str] = None,
|
||||
skip_root: bool = False,
|
||||
predicate: Optional[Callable[[SubTablePath], bool]] = None,
|
||||
iter_subtables_fn: Optional[
|
||||
Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
|
||||
] = None,
|
||||
) -> Iterable[SubTablePath]:
|
||||
"""Breadth-first search tree of BaseTables.
|
||||
|
||||
Args:
|
||||
root
|
||||
the root of the tree.
|
||||
root_accessor (Optional[str]): attribute name for the root table, if any (mostly
|
||||
useful for debugging).
|
||||
skip_root (Optional[bool]): if True, the root itself is not visited, only its
|
||||
children.
|
||||
predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out
|
||||
paths. If True, the path is yielded and its subtables are added to the
|
||||
queue. If False, the path is skipped and its subtables are not traversed.
|
||||
iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]):
|
||||
function to iterate over subtables of a table. If None, the default
|
||||
BaseTable.iterSubTables() is used.
|
||||
|
||||
Yields:
|
||||
SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples
|
||||
for each of the nodes in the tree. The last entry in a path is the current
|
||||
subtable, whereas preceding ones refer to its parent tables all the way up to
|
||||
the root.
|
||||
"""
|
||||
yield from _traverse_ot_data(
|
||||
root,
|
||||
root_accessor,
|
||||
skip_root,
|
||||
predicate,
|
||||
lambda frontier, new: frontier.extend(new),
|
||||
iter_subtables_fn,
|
||||
)
|
||||
|
||||
|
||||
def _traverse_ot_data(
|
||||
root: BaseTable,
|
||||
root_accessor: Optional[str],
|
||||
skip_root: bool,
|
||||
predicate: Optional[Callable[[SubTablePath], bool]],
|
||||
add_to_frontier_fn: AddToFrontierFn,
|
||||
iter_subtables_fn: Optional[
|
||||
Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
|
||||
] = None,
|
||||
) -> Iterable[SubTablePath]:
|
||||
# no visited because general otData cannot cycle (forward-offset only)
|
||||
if root_accessor is None:
|
||||
root_accessor = type(root).__name__
|
||||
|
||||
if predicate is None:
|
||||
|
||||
def predicate(path):
|
||||
return True
|
||||
|
||||
if iter_subtables_fn is None:
|
||||
|
||||
def iter_subtables_fn(table):
|
||||
return table.iterSubTables()
|
||||
|
||||
frontier: Deque[SubTablePath] = deque()
|
||||
|
||||
root_entry = BaseTable.SubTableEntry(root_accessor, root)
|
||||
if not skip_root:
|
||||
frontier.append((root_entry,))
|
||||
else:
|
||||
add_to_frontier_fn(
|
||||
frontier,
|
||||
[
|
||||
(root_entry, subtable_entry)
|
||||
for subtable_entry in iter_subtables_fn(root)
|
||||
],
|
||||
)
|
||||
|
||||
while frontier:
|
||||
# path is (value, attr_name) tuples. attr_name is attr of parent to get value
|
||||
path = frontier.popleft()
|
||||
current = path[-1].value
|
||||
|
||||
if not predicate(path):
|
||||
continue
|
||||
|
||||
yield SubTablePath(path)
|
||||
|
||||
new_entries = [
|
||||
path + (subtable_entry,) for subtable_entry in iter_subtables_fn(current)
|
||||
]
|
||||
|
||||
add_to_frontier_fn(frontier, new_entries)
|
||||
@@ -0,0 +1,149 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import readHex, safeEval
|
||||
import struct
|
||||
|
||||
|
||||
sbixGlyphHeaderFormat = """
|
||||
>
|
||||
originOffsetX: h # The x-value of the point in the glyph relative to its
|
||||
# lower-left corner which corresponds to the origin of
|
||||
# the glyph on the screen, that is the point on the
|
||||
# baseline at the left edge of the glyph.
|
||||
originOffsetY: h # The y-value of the point in the glyph relative to its
|
||||
# lower-left corner which corresponds to the origin of
|
||||
# the glyph on the screen, that is the point on the
|
||||
# baseline at the left edge of the glyph.
|
||||
graphicType: 4s # e.g. "png "
|
||||
"""
|
||||
|
||||
sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat)
|
||||
|
||||
|
||||
class Glyph(object):
|
||||
def __init__(
|
||||
self,
|
||||
glyphName=None,
|
||||
referenceGlyphName=None,
|
||||
originOffsetX=0,
|
||||
originOffsetY=0,
|
||||
graphicType=None,
|
||||
imageData=None,
|
||||
rawdata=None,
|
||||
gid=0,
|
||||
):
|
||||
self.gid = gid
|
||||
self.glyphName = glyphName
|
||||
self.referenceGlyphName = referenceGlyphName
|
||||
self.originOffsetX = originOffsetX
|
||||
self.originOffsetY = originOffsetY
|
||||
self.rawdata = rawdata
|
||||
self.graphicType = graphicType
|
||||
self.imageData = imageData
|
||||
|
||||
# fix self.graphicType if it is null terminated or too short
|
||||
if self.graphicType is not None:
|
||||
if self.graphicType[-1] == "\0":
|
||||
self.graphicType = self.graphicType[:-1]
|
||||
if len(self.graphicType) > 4:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError(
|
||||
"Glyph.graphicType must not be longer than 4 characters."
|
||||
)
|
||||
elif len(self.graphicType) < 4:
|
||||
# pad with spaces
|
||||
self.graphicType += " "[: (4 - len(self.graphicType))]
|
||||
|
||||
def is_reference_type(self):
|
||||
"""Returns True if this glyph is a reference to another glyph's image data."""
|
||||
return self.graphicType == "dupe" or self.graphicType == "flip"
|
||||
|
||||
def decompile(self, ttFont):
|
||||
self.glyphName = ttFont.getGlyphName(self.gid)
|
||||
if self.rawdata is None:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("No table data to decompile")
|
||||
if len(self.rawdata) > 0:
|
||||
if len(self.rawdata) < sbixGlyphHeaderFormatSize:
|
||||
from fontTools import ttLib
|
||||
|
||||
# print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata))
|
||||
raise ttLib.TTLibError("Glyph header too short.")
|
||||
|
||||
sstruct.unpack(
|
||||
sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self
|
||||
)
|
||||
|
||||
if self.is_reference_type():
|
||||
# this glyph is a reference to another glyph's image data
|
||||
(gid,) = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:])
|
||||
self.referenceGlyphName = ttFont.getGlyphName(gid)
|
||||
else:
|
||||
self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:]
|
||||
self.referenceGlyphName = None
|
||||
# clean up
|
||||
del self.rawdata
|
||||
del self.gid
|
||||
|
||||
def compile(self, ttFont):
|
||||
if self.glyphName is None:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("Can't compile Glyph without glyph name")
|
||||
# TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index?
|
||||
# (needed if you just want to compile the sbix table on its own)
|
||||
self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName))
|
||||
if self.graphicType is None:
|
||||
rawdata = b""
|
||||
else:
|
||||
rawdata = sstruct.pack(sbixGlyphHeaderFormat, self)
|
||||
if self.is_reference_type():
|
||||
rawdata += struct.pack(">H", ttFont.getGlyphID(self.referenceGlyphName))
|
||||
else:
|
||||
assert self.imageData is not None
|
||||
rawdata += self.imageData
|
||||
self.rawdata = rawdata
|
||||
|
||||
def toXML(self, xmlWriter, ttFont):
|
||||
if self.graphicType is None:
|
||||
# TODO: ignore empty glyphs?
|
||||
# a glyph data entry is required for each glyph,
|
||||
# but empty ones can be calculated at compile time
|
||||
xmlWriter.simpletag("glyph", name=self.glyphName)
|
||||
xmlWriter.newline()
|
||||
return
|
||||
xmlWriter.begintag(
|
||||
"glyph",
|
||||
graphicType=self.graphicType,
|
||||
name=self.glyphName,
|
||||
originOffsetX=self.originOffsetX,
|
||||
originOffsetY=self.originOffsetY,
|
||||
)
|
||||
xmlWriter.newline()
|
||||
if self.is_reference_type():
|
||||
# this glyph is a reference to another glyph id.
|
||||
xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName)
|
||||
else:
|
||||
xmlWriter.begintag("hexdata")
|
||||
xmlWriter.newline()
|
||||
xmlWriter.dumphex(self.imageData)
|
||||
xmlWriter.endtag("hexdata")
|
||||
xmlWriter.newline()
|
||||
xmlWriter.endtag("glyph")
|
||||
xmlWriter.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "ref":
|
||||
# this glyph i.e. a reference to another glyph's image data.
|
||||
# in this case imageData contains the glyph id of the reference glyph
|
||||
# get glyph id from glyphname
|
||||
glyphname = safeEval("'''" + attrs["glyphname"] + "'''")
|
||||
self.imageData = struct.pack(">H", ttFont.getGlyphID(glyphname))
|
||||
self.referenceGlyphName = glyphname
|
||||
elif name == "hexdata":
|
||||
self.imageData = readHex(content)
|
||||
else:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("can't handle '%s' element" % name)
|
||||
@@ -0,0 +1,177 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from .sbixGlyph import Glyph
|
||||
import struct
|
||||
|
||||
sbixStrikeHeaderFormat = """
|
||||
>
|
||||
ppem: H # The PPEM for which this strike was designed (e.g., 9,
|
||||
# 12, 24)
|
||||
resolution: H # The screen resolution (in dpi) for which this strike
|
||||
# was designed (e.g., 72)
|
||||
"""
|
||||
|
||||
sbixGlyphDataOffsetFormat = """
|
||||
>
|
||||
glyphDataOffset: L # Offset from the beginning of the strike data record
|
||||
# to data for the individual glyph
|
||||
"""
|
||||
|
||||
sbixStrikeHeaderFormatSize = sstruct.calcsize(sbixStrikeHeaderFormat)
|
||||
sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat)
|
||||
|
||||
|
||||
class Strike(object):
|
||||
def __init__(self, rawdata=None, ppem=0, resolution=72):
|
||||
self.data = rawdata
|
||||
self.ppem = ppem
|
||||
self.resolution = resolution
|
||||
self.glyphs = {}
|
||||
|
||||
def decompile(self, ttFont):
|
||||
if self.data is None:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError
|
||||
if len(self.data) < sbixStrikeHeaderFormatSize:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise (
|
||||
ttLib.TTLibError,
|
||||
"Strike header too short: Expected %x, got %x.",
|
||||
) % (sbixStrikeHeaderFormatSize, len(self.data))
|
||||
|
||||
# read Strike header from raw data
|
||||
sstruct.unpack(
|
||||
sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self
|
||||
)
|
||||
|
||||
# calculate number of glyphs
|
||||
(firstGlyphDataOffset,) = struct.unpack(
|
||||
">L",
|
||||
self.data[
|
||||
sbixStrikeHeaderFormatSize : sbixStrikeHeaderFormatSize
|
||||
+ sbixGlyphDataOffsetFormatSize
|
||||
],
|
||||
)
|
||||
self.numGlyphs = (
|
||||
firstGlyphDataOffset - sbixStrikeHeaderFormatSize
|
||||
) // sbixGlyphDataOffsetFormatSize - 1
|
||||
# ^ -1 because there's one more offset than glyphs
|
||||
|
||||
# build offset list for single glyph data offsets
|
||||
self.glyphDataOffsets = []
|
||||
for i in range(
|
||||
self.numGlyphs + 1
|
||||
): # + 1 because there's one more offset than glyphs
|
||||
start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize
|
||||
(current_offset,) = struct.unpack(
|
||||
">L", self.data[start : start + sbixGlyphDataOffsetFormatSize]
|
||||
)
|
||||
self.glyphDataOffsets.append(current_offset)
|
||||
|
||||
# iterate through offset list and slice raw data into glyph data records
|
||||
for i in range(self.numGlyphs):
|
||||
current_glyph = Glyph(
|
||||
rawdata=self.data[
|
||||
self.glyphDataOffsets[i] : self.glyphDataOffsets[i + 1]
|
||||
],
|
||||
gid=i,
|
||||
)
|
||||
current_glyph.decompile(ttFont)
|
||||
self.glyphs[current_glyph.glyphName] = current_glyph
|
||||
del self.glyphDataOffsets
|
||||
del self.numGlyphs
|
||||
del self.data
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.glyphDataOffsets = b""
|
||||
self.bitmapData = b""
|
||||
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
|
||||
# first glyph starts right after the header
|
||||
currentGlyphDataOffset = (
|
||||
sbixStrikeHeaderFormatSize
|
||||
+ sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1)
|
||||
)
|
||||
for glyphName in glyphOrder:
|
||||
if glyphName in self.glyphs:
|
||||
# we have glyph data for this glyph
|
||||
current_glyph = self.glyphs[glyphName]
|
||||
else:
|
||||
# must add empty glyph data record for this glyph
|
||||
current_glyph = Glyph(glyphName=glyphName)
|
||||
current_glyph.compile(ttFont)
|
||||
current_glyph.glyphDataOffset = currentGlyphDataOffset
|
||||
self.bitmapData += current_glyph.rawdata
|
||||
currentGlyphDataOffset += len(current_glyph.rawdata)
|
||||
self.glyphDataOffsets += sstruct.pack(
|
||||
sbixGlyphDataOffsetFormat, current_glyph
|
||||
)
|
||||
|
||||
# add last "offset", really the end address of the last glyph data record
|
||||
dummy = Glyph()
|
||||
dummy.glyphDataOffset = currentGlyphDataOffset
|
||||
self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy)
|
||||
|
||||
# pack header
|
||||
self.data = sstruct.pack(sbixStrikeHeaderFormat, self)
|
||||
# add offsets and image data after header
|
||||
self.data += self.glyphDataOffsets + self.bitmapData
|
||||
|
||||
def toXML(self, xmlWriter, ttFont):
|
||||
xmlWriter.begintag("strike")
|
||||
xmlWriter.newline()
|
||||
xmlWriter.simpletag("ppem", value=self.ppem)
|
||||
xmlWriter.newline()
|
||||
xmlWriter.simpletag("resolution", value=self.resolution)
|
||||
xmlWriter.newline()
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
for glyphName in glyphOrder:
|
||||
if glyphName in self.glyphs:
|
||||
self.glyphs[glyphName].toXML(xmlWriter, ttFont)
|
||||
# TODO: what if there are more glyph data records than (glyf table) glyphs?
|
||||
xmlWriter.endtag("strike")
|
||||
xmlWriter.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name in ["ppem", "resolution"]:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
elif name == "glyph":
|
||||
if "graphicType" in attrs:
|
||||
myFormat = safeEval("'''" + attrs["graphicType"] + "'''")
|
||||
else:
|
||||
myFormat = None
|
||||
if "glyphname" in attrs:
|
||||
myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''")
|
||||
elif "name" in attrs:
|
||||
myGlyphName = safeEval("'''" + attrs["name"] + "'''")
|
||||
else:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("Glyph must have a glyph name.")
|
||||
if "originOffsetX" in attrs:
|
||||
myOffsetX = safeEval(attrs["originOffsetX"])
|
||||
else:
|
||||
myOffsetX = 0
|
||||
if "originOffsetY" in attrs:
|
||||
myOffsetY = safeEval(attrs["originOffsetY"])
|
||||
else:
|
||||
myOffsetY = 0
|
||||
current_glyph = Glyph(
|
||||
glyphName=myGlyphName,
|
||||
graphicType=myFormat,
|
||||
originOffsetX=myOffsetX,
|
||||
originOffsetY=myOffsetY,
|
||||
)
|
||||
for element in content:
|
||||
if isinstance(element, tuple):
|
||||
name, attrs, content = element
|
||||
current_glyph.fromXML(name, attrs, content, ttFont)
|
||||
current_glyph.compile(ttFont)
|
||||
self.glyphs[current_glyph.glyphName] = current_glyph
|
||||
else:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("can't handle '%s' element" % name)
|
||||
@@ -0,0 +1,91 @@
|
||||
This folder is a subpackage of ttLib. Each module here is a
|
||||
specialized TT/OT table converter: they can convert raw data
|
||||
to Python objects and vice versa. Usually you don't need to
|
||||
use the modules directly: they are imported and used
|
||||
automatically when needed by ttLib.
|
||||
|
||||
If you are writing you own table converter the following is
|
||||
important.
|
||||
|
||||
The modules here have pretty strange names: this is due to the
|
||||
fact that we need to map TT table tags (which are case sensitive)
|
||||
to filenames (which on Mac and Win aren't case sensitive) as well
|
||||
as to Python identifiers. The latter means it can only contain
|
||||
[A-Za-z0-9_] and cannot start with a number.
|
||||
|
||||
ttLib provides functions to expand a tag into the format used here:
|
||||
|
||||
>>> from fontTools import ttLib
|
||||
>>> ttLib.tagToIdentifier("FOO ")
|
||||
'F_O_O_'
|
||||
>>> ttLib.tagToIdentifier("cvt ")
|
||||
'_c_v_t'
|
||||
>>> ttLib.tagToIdentifier("OS/2")
|
||||
'O_S_2f_2'
|
||||
>>> ttLib.tagToIdentifier("glyf")
|
||||
'_g_l_y_f'
|
||||
>>>
|
||||
|
||||
And vice versa:
|
||||
|
||||
>>> ttLib.identifierToTag("F_O_O_")
|
||||
'FOO '
|
||||
>>> ttLib.identifierToTag("_c_v_t")
|
||||
'cvt '
|
||||
>>> ttLib.identifierToTag("O_S_2f_2")
|
||||
'OS/2'
|
||||
>>> ttLib.identifierToTag("_g_l_y_f")
|
||||
'glyf'
|
||||
>>>
|
||||
|
||||
Eg. the 'glyf' table converter lives in a Python file called:
|
||||
|
||||
_g_l_y_f.py
|
||||
|
||||
The converter itself is a class, named "table_" + expandedtag. Eg:
|
||||
|
||||
class table__g_l_y_f:
|
||||
etc.
|
||||
|
||||
Note that if you _do_ need to use such modules or classes manually,
|
||||
there are two convenient API functions that let you find them by tag:
|
||||
|
||||
>>> ttLib.getTableModule('glyf')
|
||||
<module 'ttLib.tables._g_l_y_f'>
|
||||
>>> ttLib.getTableClass('glyf')
|
||||
<class ttLib.tables._g_l_y_f.table__g_l_y_f at 645f400>
|
||||
>>>
|
||||
|
||||
You must subclass from DefaultTable.DefaultTable. It provides some default
|
||||
behavior, as well as a constructor method (__init__) that you don't need to
|
||||
override.
|
||||
|
||||
Your converter should minimally provide two methods:
|
||||
|
||||
class table_F_O_O_(DefaultTable.DefaultTable): # converter for table 'FOO '
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
# 'data' is the raw table data. Unpack it into a
|
||||
# Python data structure.
|
||||
# 'ttFont' is a ttLib.TTfile instance, enabling you to
|
||||
# refer to other tables. Do ***not*** keep a reference to
|
||||
# it: it will cause a circular reference (ttFont saves
|
||||
# a reference to us), and that means we'll be leaking
|
||||
# memory. If you need to use it in other methods, just
|
||||
# pass it around as a method argument.
|
||||
|
||||
def compile(self, ttFont):
|
||||
# Return the raw data, as converted from the Python
|
||||
# data structure.
|
||||
# Again, 'ttFont' is there so you can access other tables.
|
||||
# Same warning applies.
|
||||
|
||||
If you want to support TTX import/export as well, you need to provide two
|
||||
additional methods:
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
# XXX
|
||||
|
||||
def fromXML(self, (name, attrs, content), ttFont):
|
||||
# XXX
|
||||
|
||||
@@ -0,0 +1,594 @@
|
||||
"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin
|
||||
import array
|
||||
from io import StringIO
|
||||
from typing import List
|
||||
import re
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# fmt: off
|
||||
|
||||
# first, the list of instructions that eat bytes or words from the instruction stream
|
||||
|
||||
streamInstructions = [
|
||||
#
|
||||
# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes
|
||||
#
|
||||
(0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn
|
||||
(0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn
|
||||
(0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn
|
||||
(0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn
|
||||
]
|
||||
|
||||
|
||||
# next, the list of "normal" instructions
|
||||
|
||||
instructions = [
|
||||
#
|
||||
# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes
|
||||
#
|
||||
(0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p -
|
||||
(0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n|
|
||||
(0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2)
|
||||
(0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 -
|
||||
(0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue -
|
||||
(0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b
|
||||
(0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f -
|
||||
(0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n)
|
||||
(0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek
|
||||
(0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack -
|
||||
(0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n -
|
||||
(0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
|
||||
(0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
|
||||
(0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
|
||||
(0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
|
||||
(0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
|
||||
(0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
|
||||
(0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n
|
||||
(0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2
|
||||
(0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e
|
||||
(0x59, 'EIF', 0, 'EndIf', 0, 0), # - -
|
||||
(0x1b, 'ELSE', 0, 'Else', 0, 0), # - -
|
||||
(0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - -
|
||||
(0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b
|
||||
(0x57, 'EVEN', 0, 'Even', 1, 1), # e b
|
||||
(0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f -
|
||||
(0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - -
|
||||
(0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - -
|
||||
(0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue -
|
||||
(0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l -
|
||||
(0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l -
|
||||
(0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n)
|
||||
(0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c
|
||||
(0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result
|
||||
(0x91, 'GETVARIATION', 0, 'GetVariation', 0, -1), # - a1,..,an
|
||||
(0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py
|
||||
(0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py
|
||||
(0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b
|
||||
(0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b
|
||||
(0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f -
|
||||
(0x58, 'IF', 0, 'If', 1, 0), # e -
|
||||
(0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v -
|
||||
(0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue -
|
||||
(0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p -
|
||||
(0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - -
|
||||
(0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset -
|
||||
(0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset -
|
||||
(0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset -
|
||||
(0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count -
|
||||
(0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b
|
||||
(0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b
|
||||
(0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2)
|
||||
(0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d
|
||||
(0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p -
|
||||
(0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p -
|
||||
(0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p -
|
||||
(0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2)
|
||||
(0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek
|
||||
(0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p -
|
||||
(0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem
|
||||
(0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize
|
||||
(0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p -
|
||||
(0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64
|
||||
(0x65, 'NEG', 0, 'Negate', 1, 1), # n -n
|
||||
(0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b
|
||||
(0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e )
|
||||
(0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2
|
||||
(0x56, 'ODD', 0, 'Odd', 1, 1), # e b
|
||||
(0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b
|
||||
(0x21, 'POP', 0, 'PopTopStack', 1, 0), # e -
|
||||
(0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value
|
||||
(0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - -
|
||||
(0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - -
|
||||
(0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c
|
||||
(0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2
|
||||
(0x43, 'RS', 0, 'ReadStore', 1, 1), # n v
|
||||
(0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - -
|
||||
(0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - -
|
||||
(0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - -
|
||||
(0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - -
|
||||
(0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n -
|
||||
(0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight -
|
||||
(0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n -
|
||||
(0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n -
|
||||
(0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p -
|
||||
(0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n -
|
||||
(0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n -
|
||||
(0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 -
|
||||
(0x5f, 'SDS', 0, 'SetDeltaShiftInGState', 1, 0), # n -
|
||||
(0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x -
|
||||
(0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - -
|
||||
(0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 -
|
||||
(0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - -
|
||||
(0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c -
|
||||
(0x32, 'SHP', 1, 'ShiftPointByLastPoint', -1, 0), # p1, p2, ..., ploopvalue -
|
||||
(0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue -
|
||||
(0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e -
|
||||
(0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n -
|
||||
(0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance -
|
||||
(0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x -
|
||||
(0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - -
|
||||
(0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 -
|
||||
(0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n -
|
||||
(0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p -
|
||||
(0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p -
|
||||
(0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p -
|
||||
(0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n -
|
||||
(0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n -
|
||||
(0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2)
|
||||
(0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - -
|
||||
(0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2
|
||||
(0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n -
|
||||
(0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n -
|
||||
(0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n -
|
||||
(0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n -
|
||||
(0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p -
|
||||
(0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l -
|
||||
(0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l -
|
||||
(0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l -
|
||||
]
|
||||
|
||||
# fmt: on
|
||||
|
||||
|
||||
def bitRepr(value, bits):
|
||||
s = ""
|
||||
for i in range(bits):
|
||||
s = "01"[value & 0x1] + s
|
||||
value = value >> 1
|
||||
return s
|
||||
|
||||
|
||||
_mnemonicPat = re.compile(r"[A-Z][A-Z0-9]*$")
|
||||
|
||||
|
||||
def _makeDict(instructionList):
|
||||
opcodeDict = {}
|
||||
mnemonicDict = {}
|
||||
for op, mnemonic, argBits, name, pops, pushes in instructionList:
|
||||
assert _mnemonicPat.match(mnemonic)
|
||||
mnemonicDict[mnemonic] = op, argBits, name
|
||||
if argBits:
|
||||
argoffset = op
|
||||
for i in range(1 << argBits):
|
||||
opcodeDict[op + i] = mnemonic, argBits, argoffset, name
|
||||
else:
|
||||
opcodeDict[op] = mnemonic, 0, 0, name
|
||||
return opcodeDict, mnemonicDict
|
||||
|
||||
|
||||
streamOpcodeDict, streamMnemonicDict = _makeDict(streamInstructions)
|
||||
opcodeDict, mnemonicDict = _makeDict(instructions)
|
||||
|
||||
|
||||
class tt_instructions_error(Exception):
|
||||
def __init__(self, error):
|
||||
self.error = error
|
||||
|
||||
def __str__(self):
|
||||
return "TT instructions error: %s" % repr(self.error)
|
||||
|
||||
|
||||
_comment = r"/\*.*?\*/"
|
||||
_instruction = r"([A-Z][A-Z0-9]*)\s*\[(.*?)\]"
|
||||
_number = r"-?[0-9]+"
|
||||
_token = "(%s)|(%s)|(%s)" % (_instruction, _number, _comment)
|
||||
|
||||
_tokenRE = re.compile(_token)
|
||||
_whiteRE = re.compile(r"\s*")
|
||||
|
||||
_pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/")
|
||||
|
||||
_indentRE = re.compile(r"^FDEF|IF|ELSE\[ \]\t.+")
|
||||
_unindentRE = re.compile(r"^ELSE|ENDF|EIF\[ \]\t.+")
|
||||
|
||||
|
||||
def _skipWhite(data, pos):
|
||||
m = _whiteRE.match(data, pos)
|
||||
newPos = m.regs[0][1]
|
||||
assert newPos >= pos
|
||||
return newPos
|
||||
|
||||
|
||||
class Program(object):
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
def fromBytecode(self, bytecode: bytes) -> None:
|
||||
self.bytecode = array.array("B", bytecode)
|
||||
if hasattr(self, "assembly"):
|
||||
del self.assembly
|
||||
|
||||
def fromAssembly(self, assembly: List[str] | str) -> None:
|
||||
if isinstance(assembly, list):
|
||||
self.assembly = assembly
|
||||
elif isinstance(assembly, str):
|
||||
self.assembly = assembly.splitlines()
|
||||
else:
|
||||
raise TypeError(f"expected str or List[str], got {type(assembly).__name__}")
|
||||
if hasattr(self, "bytecode"):
|
||||
del self.bytecode
|
||||
|
||||
def getBytecode(self) -> bytes:
|
||||
if not hasattr(self, "bytecode"):
|
||||
self._assemble()
|
||||
return self.bytecode.tobytes()
|
||||
|
||||
def getAssembly(self, preserve=True) -> List[str]:
|
||||
if not hasattr(self, "assembly"):
|
||||
self._disassemble(preserve=preserve)
|
||||
return self.assembly
|
||||
|
||||
def toXML(self, writer, ttFont) -> None:
|
||||
if (
|
||||
not hasattr(ttFont, "disassembleInstructions")
|
||||
or ttFont.disassembleInstructions
|
||||
):
|
||||
try:
|
||||
assembly = self.getAssembly()
|
||||
except:
|
||||
import traceback
|
||||
|
||||
tmp = StringIO()
|
||||
traceback.print_exc(file=tmp)
|
||||
msg = "An exception occurred during the decompilation of glyph program:\n\n"
|
||||
msg += tmp.getvalue()
|
||||
log.error(msg)
|
||||
writer.begintag("bytecode")
|
||||
writer.newline()
|
||||
writer.comment(msg.strip())
|
||||
writer.newline()
|
||||
writer.dumphex(self.getBytecode())
|
||||
writer.endtag("bytecode")
|
||||
writer.newline()
|
||||
else:
|
||||
if not assembly:
|
||||
return
|
||||
writer.begintag("assembly")
|
||||
writer.newline()
|
||||
i = 0
|
||||
indent = 0
|
||||
nInstr = len(assembly)
|
||||
while i < nInstr:
|
||||
instr = assembly[i]
|
||||
if _unindentRE.match(instr):
|
||||
indent -= 1
|
||||
writer.write(writer.indentwhite * indent)
|
||||
writer.write(instr)
|
||||
writer.newline()
|
||||
m = _pushCountPat.match(instr)
|
||||
i = i + 1
|
||||
if m:
|
||||
nValues = int(m.group(1))
|
||||
line: List[str] = []
|
||||
j = 0
|
||||
for j in range(nValues):
|
||||
if j and not (j % 25):
|
||||
writer.write(writer.indentwhite * indent)
|
||||
writer.write(" ".join(line))
|
||||
writer.newline()
|
||||
line = []
|
||||
line.append(assembly[i + j])
|
||||
writer.write(writer.indentwhite * indent)
|
||||
writer.write(" ".join(line))
|
||||
writer.newline()
|
||||
i = i + j + 1
|
||||
if _indentRE.match(instr):
|
||||
indent += 1
|
||||
writer.endtag("assembly")
|
||||
writer.newline()
|
||||
else:
|
||||
bytecode = self.getBytecode()
|
||||
if not bytecode:
|
||||
return
|
||||
writer.begintag("bytecode")
|
||||
writer.newline()
|
||||
writer.dumphex(bytecode)
|
||||
writer.endtag("bytecode")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont) -> None:
|
||||
if name == "assembly":
|
||||
self.fromAssembly(strjoin(content))
|
||||
self._assemble()
|
||||
del self.assembly
|
||||
else:
|
||||
assert name == "bytecode"
|
||||
self.fromBytecode(readHex(content))
|
||||
|
||||
def _assemble(self) -> None:
|
||||
assembly = " ".join(getattr(self, "assembly", []))
|
||||
bytecode: List[int] = []
|
||||
push = bytecode.append
|
||||
lenAssembly = len(assembly)
|
||||
pos = _skipWhite(assembly, 0)
|
||||
while pos < lenAssembly:
|
||||
m = _tokenRE.match(assembly, pos)
|
||||
if m is None:
|
||||
raise tt_instructions_error(
|
||||
"Syntax error in TT program (%s)" % assembly[pos - 5 : pos + 15]
|
||||
)
|
||||
dummy, mnemonic, arg, number, comment = m.groups()
|
||||
pos = m.regs[0][1]
|
||||
if comment:
|
||||
pos = _skipWhite(assembly, pos)
|
||||
continue
|
||||
|
||||
arg = arg.strip()
|
||||
if mnemonic.startswith("INSTR"):
|
||||
# Unknown instruction
|
||||
op = int(mnemonic[5:])
|
||||
push(op)
|
||||
elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"):
|
||||
op, argBits, name = mnemonicDict[mnemonic]
|
||||
if len(arg) != argBits:
|
||||
raise tt_instructions_error(
|
||||
"Incorrect number of argument bits (%s[%s])" % (mnemonic, arg)
|
||||
)
|
||||
if arg:
|
||||
arg = binary2num(arg)
|
||||
push(op + arg)
|
||||
else:
|
||||
push(op)
|
||||
else:
|
||||
args = []
|
||||
pos = _skipWhite(assembly, pos)
|
||||
while pos < lenAssembly:
|
||||
m = _tokenRE.match(assembly, pos)
|
||||
if m is None:
|
||||
raise tt_instructions_error(
|
||||
"Syntax error in TT program (%s)" % assembly[pos : pos + 15]
|
||||
)
|
||||
dummy, _mnemonic, arg, number, comment = m.groups()
|
||||
if number is None and comment is None:
|
||||
break
|
||||
pos = m.regs[0][1]
|
||||
pos = _skipWhite(assembly, pos)
|
||||
if comment is not None:
|
||||
continue
|
||||
args.append(int(number))
|
||||
nArgs = len(args)
|
||||
if mnemonic == "PUSH":
|
||||
# Automatically choose the most compact representation
|
||||
nWords = 0
|
||||
while nArgs:
|
||||
while (
|
||||
nWords < nArgs
|
||||
and nWords < 255
|
||||
and not (0 <= args[nWords] <= 255)
|
||||
):
|
||||
nWords += 1
|
||||
nBytes = 0
|
||||
while (
|
||||
nWords + nBytes < nArgs
|
||||
and nBytes < 255
|
||||
and 0 <= args[nWords + nBytes] <= 255
|
||||
):
|
||||
nBytes += 1
|
||||
if (
|
||||
nBytes < 2
|
||||
and nWords + nBytes < 255
|
||||
and nWords + nBytes != nArgs
|
||||
):
|
||||
# Will write bytes as words
|
||||
nWords += nBytes
|
||||
continue
|
||||
|
||||
# Write words
|
||||
if nWords:
|
||||
if nWords <= 8:
|
||||
op, argBits, name = streamMnemonicDict["PUSHW"]
|
||||
op = op + nWords - 1
|
||||
push(op)
|
||||
else:
|
||||
op, argBits, name = streamMnemonicDict["NPUSHW"]
|
||||
push(op)
|
||||
push(nWords)
|
||||
for value in args[:nWords]:
|
||||
assert -32768 <= value < 32768, (
|
||||
"PUSH value out of range %d" % value
|
||||
)
|
||||
push((value >> 8) & 0xFF)
|
||||
push(value & 0xFF)
|
||||
|
||||
# Write bytes
|
||||
if nBytes:
|
||||
pass
|
||||
if nBytes <= 8:
|
||||
op, argBits, name = streamMnemonicDict["PUSHB"]
|
||||
op = op + nBytes - 1
|
||||
push(op)
|
||||
else:
|
||||
op, argBits, name = streamMnemonicDict["NPUSHB"]
|
||||
push(op)
|
||||
push(nBytes)
|
||||
for value in args[nWords : nWords + nBytes]:
|
||||
push(value)
|
||||
|
||||
nTotal = nWords + nBytes
|
||||
args = args[nTotal:]
|
||||
nArgs -= nTotal
|
||||
nWords = 0
|
||||
else:
|
||||
# Write exactly what we've been asked to
|
||||
words = mnemonic[-1] == "W"
|
||||
op, argBits, name = streamMnemonicDict[mnemonic]
|
||||
if mnemonic[0] != "N":
|
||||
assert nArgs <= 8, nArgs
|
||||
op = op + nArgs - 1
|
||||
push(op)
|
||||
else:
|
||||
assert nArgs < 256
|
||||
push(op)
|
||||
push(nArgs)
|
||||
if words:
|
||||
for value in args:
|
||||
assert -32768 <= value < 32768, (
|
||||
"PUSHW value out of range %d" % value
|
||||
)
|
||||
push((value >> 8) & 0xFF)
|
||||
push(value & 0xFF)
|
||||
else:
|
||||
for value in args:
|
||||
assert 0 <= value < 256, (
|
||||
"PUSHB value out of range %d" % value
|
||||
)
|
||||
push(value)
|
||||
|
||||
pos = _skipWhite(assembly, pos)
|
||||
|
||||
if bytecode:
|
||||
assert max(bytecode) < 256 and min(bytecode) >= 0
|
||||
self.bytecode = array.array("B", bytecode)
|
||||
|
||||
def _disassemble(self, preserve=False) -> None:
|
||||
assembly = []
|
||||
i = 0
|
||||
bytecode = getattr(self, "bytecode", [])
|
||||
numBytecode = len(bytecode)
|
||||
while i < numBytecode:
|
||||
op = bytecode[i]
|
||||
try:
|
||||
mnemonic, argBits, argoffset, name = opcodeDict[op]
|
||||
except KeyError:
|
||||
if op in streamOpcodeDict:
|
||||
values = []
|
||||
|
||||
# Merge consecutive PUSH operations
|
||||
while bytecode[i] in streamOpcodeDict:
|
||||
op = bytecode[i]
|
||||
mnemonic, argBits, argoffset, name = streamOpcodeDict[op]
|
||||
words = mnemonic[-1] == "W"
|
||||
if argBits:
|
||||
nValues = op - argoffset + 1
|
||||
else:
|
||||
i = i + 1
|
||||
nValues = bytecode[i]
|
||||
i = i + 1
|
||||
assert nValues > 0
|
||||
if not words:
|
||||
for j in range(nValues):
|
||||
value = bytecode[i]
|
||||
values.append(repr(value))
|
||||
i = i + 1
|
||||
else:
|
||||
for j in range(nValues):
|
||||
# cast to signed int16
|
||||
value = (bytecode[i] << 8) | bytecode[i + 1]
|
||||
if value >= 0x8000:
|
||||
value = value - 0x10000
|
||||
values.append(repr(value))
|
||||
i = i + 2
|
||||
if preserve:
|
||||
break
|
||||
|
||||
if not preserve:
|
||||
mnemonic = "PUSH"
|
||||
nValues = len(values)
|
||||
if nValues == 1:
|
||||
assembly.append("%s[ ] /* 1 value pushed */" % mnemonic)
|
||||
else:
|
||||
assembly.append(
|
||||
"%s[ ] /* %s values pushed */" % (mnemonic, nValues)
|
||||
)
|
||||
assembly.extend(values)
|
||||
else:
|
||||
assembly.append("INSTR%d[ ]" % op)
|
||||
i = i + 1
|
||||
else:
|
||||
if argBits:
|
||||
assembly.append(
|
||||
mnemonic
|
||||
+ "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name)
|
||||
)
|
||||
else:
|
||||
assembly.append(mnemonic + "[ ] /* %s */" % name)
|
||||
i = i + 1
|
||||
self.assembly = assembly
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
"""
|
||||
>>> p = Program()
|
||||
>>> bool(p)
|
||||
False
|
||||
>>> bc = array.array("B", [0])
|
||||
>>> p.fromBytecode(bc)
|
||||
>>> bool(p)
|
||||
True
|
||||
>>> p.bytecode.pop()
|
||||
0
|
||||
>>> bool(p)
|
||||
False
|
||||
|
||||
>>> p = Program()
|
||||
>>> asm = ['SVTCA[0]']
|
||||
>>> p.fromAssembly(asm)
|
||||
>>> bool(p)
|
||||
True
|
||||
>>> p.assembly.pop()
|
||||
'SVTCA[0]'
|
||||
>>> bool(p)
|
||||
False
|
||||
"""
|
||||
return (hasattr(self, "assembly") and len(self.assembly) > 0) or (
|
||||
hasattr(self, "bytecode") and len(self.bytecode) > 0
|
||||
)
|
||||
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def __eq__(self, other) -> bool:
|
||||
if type(self) != type(other):
|
||||
return NotImplemented
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def __ne__(self, other) -> bool:
|
||||
result = self.__eq__(other)
|
||||
return result if result is NotImplemented else not result
|
||||
|
||||
|
||||
def _test():
|
||||
"""
|
||||
>>> _test()
|
||||
True
|
||||
"""
|
||||
|
||||
bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-"""
|
||||
|
||||
p = Program()
|
||||
p.fromBytecode(bc)
|
||||
asm = p.getAssembly(preserve=True)
|
||||
p.fromAssembly(asm)
|
||||
print(bc == p.getBytecode())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
Reference in New Issue
Block a user