refactor: excel parse
This commit is contained in:
@@ -0,0 +1,161 @@
|
||||
# Copyright (c) 2015-2024, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import Iterable, TypeVar, Iterator, Any
|
||||
from uuid import uuid4
|
||||
import functools
|
||||
import html
|
||||
from .juliandate import juliandate, calendardate
|
||||
from .binarydata import hex_strings_to_bytes, bytes_to_hexstr
|
||||
|
||||
escape = functools.partial(html.escape, quote=True)
|
||||
|
||||
|
||||
def float2transparency(value: float) -> int:
|
||||
"""
|
||||
Returns DXF transparency value as integer in the range from ``0`` to ``255``, where
|
||||
``0`` is 100% transparent and ``255`` is opaque.
|
||||
|
||||
Args:
|
||||
value: transparency value as float in the range from ``0`` to ``1``, where ``0``
|
||||
is opaque and ``1`` is 100% transparency.
|
||||
|
||||
"""
|
||||
return int((1.0 - float(value)) * 255) | 0x02000000
|
||||
|
||||
|
||||
def transparency2float(value: int) -> float:
|
||||
"""
|
||||
Returns transparency value as float from ``0`` to ``1``, ``0`` for no transparency
|
||||
(opaque) and ``1`` for 100% transparency.
|
||||
|
||||
Args:
|
||||
value: DXF integer transparency value, ``0`` for 100% transparency and ``255``
|
||||
for opaque
|
||||
|
||||
"""
|
||||
# 255 -> 0.
|
||||
# 0 -> 1.
|
||||
return 1.0 - float(int(value) & 0xFF) / 255.0
|
||||
|
||||
|
||||
def set_flag_state(flags: int, flag: int, state: bool = True) -> int:
|
||||
"""Set/clear binary `flag` in data `flags`.
|
||||
|
||||
Args:
|
||||
flags: data value
|
||||
flag: flag to set/clear
|
||||
state: ``True`` for setting, ``False`` for clearing
|
||||
|
||||
"""
|
||||
if state:
|
||||
flags = flags | flag
|
||||
else:
|
||||
flags = flags & ~flag
|
||||
return flags
|
||||
|
||||
|
||||
def guid() -> str:
|
||||
"""Returns a general unique ID, based on :func:`uuid.uuid4`.
|
||||
|
||||
This function creates a GUID for the header variables $VERSIONGUID and
|
||||
$FINGERPRINTGUID, which matches the AutoCAD pattern
|
||||
``{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}``.
|
||||
|
||||
"""
|
||||
return "{" + str(uuid4()).upper() + "}"
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def take2(iterable: Iterable[T]) -> Iterator[tuple[T, T]]:
|
||||
"""Iterate `iterable` as non-overlapping pairs.
|
||||
|
||||
:code:`take2('ABCDEFGH') -> AB CD EF GH`
|
||||
|
||||
"""
|
||||
sentinel = object()
|
||||
store: Any = sentinel
|
||||
for item in iterable:
|
||||
if store is sentinel:
|
||||
store = item
|
||||
else:
|
||||
yield store, item
|
||||
store = sentinel
|
||||
|
||||
|
||||
def pairwise(iterable: Iterable[T], close=False) -> Iterator[tuple[T, T]]:
|
||||
"""Iterate `iterable` as consecutive overlapping pairs.
|
||||
This is similar to ``itertools.pairwise()`` in Python 3.10 but with `close` option.
|
||||
|
||||
:code:`polygon_segments('ABCDEFG') -> AB BC CD DE EF FG`
|
||||
:code:`polygon_segments('ABCDEFG', True) -> AB BC CD DE EF FG GA`
|
||||
|
||||
"""
|
||||
sentinel = object()
|
||||
first: Any = sentinel
|
||||
store: Any = sentinel
|
||||
item: Any = sentinel
|
||||
for item in iterable:
|
||||
if store is sentinel:
|
||||
store = item
|
||||
first = item
|
||||
else:
|
||||
yield store, item
|
||||
store = item
|
||||
if close and first is not sentinel and item is not first:
|
||||
yield item, first
|
||||
|
||||
|
||||
def suppress_zeros(s: str, leading: bool = False, trailing: bool = True):
|
||||
"""Suppress trailing and/or leading ``0`` of string `s`.
|
||||
|
||||
Args:
|
||||
s: data string
|
||||
leading: suppress leading ``0``
|
||||
trailing: suppress trailing ``0``
|
||||
|
||||
"""
|
||||
# is anything to do?
|
||||
if (not leading) and (not trailing):
|
||||
return s
|
||||
|
||||
# if `s` represents zero
|
||||
if float(s) == 0.0:
|
||||
return "0"
|
||||
|
||||
# preserve sign
|
||||
if s[0] in "-+":
|
||||
sign = s[0]
|
||||
s = s[1:]
|
||||
else:
|
||||
sign = ""
|
||||
|
||||
# strip zeros
|
||||
if leading:
|
||||
s = s.lstrip("0")
|
||||
if trailing and "." in s:
|
||||
s = s.rstrip("0")
|
||||
|
||||
# remove comma if no decimals follow
|
||||
if s[-1] in ".,":
|
||||
s = s[:-1]
|
||||
|
||||
return sign + s
|
||||
|
||||
|
||||
def normalize_text_angle(angle: float, fix_upside_down=True) -> float:
|
||||
"""
|
||||
Normalizes text `angle` to the range from 0 to 360 degrees and fixes upside down text angles.
|
||||
|
||||
Args:
|
||||
angle: text angle in degrees
|
||||
fix_upside_down: rotate upside down text angle about 180 degree
|
||||
|
||||
"""
|
||||
angle = angle % 360.0 # normalize angle (0 .. 360)
|
||||
if fix_upside_down and (90 < angle <= 270): # flip text orientation
|
||||
angle -= 180
|
||||
angle = angle % 360.0 # normalize again
|
||||
return angle
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,576 @@
|
||||
# Copyright (c) 2021-2022, Manfred Moitzi
|
||||
# License: MIT License
|
||||
# Debugging tools to analyze DXF entities.
|
||||
from __future__ import annotations
|
||||
from typing import Iterable, Sequence, Optional
|
||||
import textwrap
|
||||
|
||||
import ezdxf
|
||||
from ezdxf import colors
|
||||
from ezdxf.math import Vec2
|
||||
from ezdxf.lldxf import const
|
||||
from ezdxf.enums import TextEntityAlignment
|
||||
from ezdxf.tools.debug import print_bitmask
|
||||
from ezdxf.render.mleader import MLeaderStyleOverride, OVERRIDE_FLAG
|
||||
from ezdxf.entities import (
|
||||
EdgePath,
|
||||
PolylinePath,
|
||||
LineEdge,
|
||||
ArcEdge,
|
||||
EllipseEdge,
|
||||
SplineEdge,
|
||||
mleader,
|
||||
)
|
||||
from ezdxf.entities.polygon import DXFPolygon
|
||||
|
||||
EDGE_START_MARKER = "EDGE_START_MARKER"
|
||||
EDGE_END_MARKER = "EDGE_END_MARKER"
|
||||
HATCH_LAYER = "HATCH"
|
||||
POLYLINE_LAYER = "POLYLINE_MARKER"
|
||||
LINE_LAYER = "LINE_MARKER"
|
||||
ARC_LAYER = "ARC_MARKER"
|
||||
ELLIPSE_LAYER = "ELLIPSE_MARKER"
|
||||
SPLINE_LAYER = "SPLINE_MARKER"
|
||||
|
||||
|
||||
class HatchAnalyzer:
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
marker_size: float = 1.0,
|
||||
angle: float = 45,
|
||||
):
|
||||
self.marker_size = marker_size
|
||||
self.angle = angle
|
||||
self.doc = ezdxf.new()
|
||||
self.msp = self.doc.modelspace()
|
||||
self.init_layers()
|
||||
self.init_markers()
|
||||
|
||||
def init_layers(self):
|
||||
self.doc.layers.add(POLYLINE_LAYER, color=colors.YELLOW)
|
||||
self.doc.layers.add(LINE_LAYER, color=colors.RED)
|
||||
self.doc.layers.add(ARC_LAYER, color=colors.GREEN)
|
||||
self.doc.layers.add(ELLIPSE_LAYER, color=colors.MAGENTA)
|
||||
self.doc.layers.add(SPLINE_LAYER, color=colors.CYAN)
|
||||
self.doc.layers.add(HATCH_LAYER)
|
||||
|
||||
def init_markers(self):
|
||||
blk = self.doc.blocks.new(EDGE_START_MARKER)
|
||||
attribs = {"layer": "0"} # from INSERT
|
||||
radius = self.marker_size / 2.0
|
||||
height = radius
|
||||
|
||||
# start marker: 0-- name
|
||||
blk.add_circle(
|
||||
center=(0, 0),
|
||||
radius=radius,
|
||||
dxfattribs=attribs,
|
||||
)
|
||||
text_start = radius * 4
|
||||
blk.add_line(
|
||||
start=(radius, 0),
|
||||
end=(text_start - radius / 2.0, 0),
|
||||
dxfattribs=attribs,
|
||||
)
|
||||
text = blk.add_attdef(
|
||||
tag="NAME",
|
||||
dxfattribs=attribs,
|
||||
)
|
||||
text.dxf.height = height
|
||||
text.set_placement(
|
||||
(text_start, 0), align=TextEntityAlignment.MIDDLE_LEFT
|
||||
)
|
||||
|
||||
# end marker: name --X
|
||||
blk = self.doc.blocks.new(EDGE_END_MARKER)
|
||||
attribs = {"layer": "0"} # from INSERT
|
||||
blk.add_line(
|
||||
start=(-radius, -radius),
|
||||
end=(radius, radius),
|
||||
dxfattribs=attribs,
|
||||
)
|
||||
blk.add_line(
|
||||
start=(-radius, radius),
|
||||
end=(radius, -radius),
|
||||
dxfattribs=attribs,
|
||||
)
|
||||
text_start = -radius * 4
|
||||
blk.add_line(
|
||||
start=(-radius, 0),
|
||||
end=(text_start + radius / 2.0, 0),
|
||||
dxfattribs=attribs,
|
||||
)
|
||||
text = blk.add_attdef(
|
||||
tag="NAME",
|
||||
dxfattribs=attribs,
|
||||
)
|
||||
text.dxf.height = height
|
||||
text.set_placement(
|
||||
(text_start, 0), align=TextEntityAlignment.MIDDLE_RIGHT
|
||||
)
|
||||
|
||||
def export(self, name: str) -> None:
|
||||
self.doc.saveas(name)
|
||||
|
||||
def add_hatch(self, hatch: DXFPolygon) -> None:
|
||||
hatch.dxf.discard("extrusion")
|
||||
hatch.dxf.layer = HATCH_LAYER
|
||||
self.msp.add_foreign_entity(hatch)
|
||||
|
||||
def add_boundary_markers(self, hatch: DXFPolygon) -> None:
|
||||
hatch.dxf.discard("extrusion")
|
||||
path_num: int = 0
|
||||
|
||||
for p in hatch.paths:
|
||||
path_num += 1
|
||||
if isinstance(p, PolylinePath):
|
||||
self.add_polyline_markers(p, path_num)
|
||||
elif isinstance(p, EdgePath):
|
||||
self.add_edge_markers(p, path_num)
|
||||
else:
|
||||
raise TypeError(f"unknown boundary path type: {type(p)}")
|
||||
|
||||
def add_start_marker(self, location: Vec2, name: str, layer: str) -> None:
|
||||
self.add_marker(EDGE_START_MARKER, location, name, layer)
|
||||
|
||||
def add_end_marker(self, location: Vec2, name: str, layer: str) -> None:
|
||||
self.add_marker(EDGE_END_MARKER, location, name, layer)
|
||||
|
||||
def add_marker(
|
||||
self, blk_name: str, location: Vec2, name: str, layer: str
|
||||
) -> None:
|
||||
blkref = self.msp.add_blockref(
|
||||
name=blk_name,
|
||||
insert=location,
|
||||
dxfattribs={
|
||||
"layer": layer,
|
||||
"rotation": self.angle,
|
||||
},
|
||||
)
|
||||
blkref.add_auto_attribs({"NAME": name})
|
||||
|
||||
def add_polyline_markers(self, p: PolylinePath, num: int) -> None:
|
||||
self.add_start_marker(
|
||||
Vec2(p.vertices[0]), f"Poly-S({num})", POLYLINE_LAYER
|
||||
)
|
||||
self.add_end_marker(
|
||||
Vec2(p.vertices[0]), f"Poly-E({num})", POLYLINE_LAYER
|
||||
)
|
||||
|
||||
def add_edge_markers(self, p: EdgePath, num: int) -> None:
|
||||
edge_num: int = 0
|
||||
for edge in p.edges:
|
||||
edge_num += 1
|
||||
name = f"({num}.{edge_num})"
|
||||
if isinstance(
|
||||
edge,
|
||||
LineEdge,
|
||||
):
|
||||
self.add_line_edge_markers(edge, name)
|
||||
elif isinstance(edge, ArcEdge):
|
||||
self.add_arc_edge_markers(edge, name)
|
||||
elif isinstance(edge, EllipseEdge):
|
||||
self.add_ellipse_edge_markers(edge, name)
|
||||
elif isinstance(edge, SplineEdge):
|
||||
self.add_spline_edge_markers(edge, name)
|
||||
else:
|
||||
raise TypeError(f"unknown edge type: {type(edge)}")
|
||||
|
||||
def add_line_edge_markers(self, line: LineEdge, name: str) -> None:
|
||||
self.add_start_marker(line.start, "Line-S" + name, LINE_LAYER)
|
||||
self.add_end_marker(line.end, "Line-E" + name, LINE_LAYER)
|
||||
|
||||
def add_arc_edge_markers(self, arc: ArcEdge, name: str) -> None:
|
||||
self.add_start_marker(arc.start_point, "Arc-S" + name, ARC_LAYER)
|
||||
self.add_end_marker(arc.end_point, "Arc-E" + name, ARC_LAYER)
|
||||
|
||||
def add_ellipse_edge_markers(self, ellipse: EllipseEdge, name: str) -> None:
|
||||
self.add_start_marker(
|
||||
ellipse.start_point, "Ellipse-S" + name, ELLIPSE_LAYER
|
||||
)
|
||||
self.add_end_marker(
|
||||
ellipse.end_point, "Ellipse-E" + name, ELLIPSE_LAYER
|
||||
)
|
||||
|
||||
def add_spline_edge_markers(self, spline: SplineEdge, name: str) -> None:
|
||||
if len(spline.control_points):
|
||||
# Assuming a clamped B-spline, because this is the only practical
|
||||
# usable B-spline for edges.
|
||||
self.add_start_marker(
|
||||
spline.start_point, "SplineS" + name, SPLINE_LAYER
|
||||
)
|
||||
self.add_end_marker(
|
||||
spline.end_point, "SplineE" + name, SPLINE_LAYER
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def report(hatch: DXFPolygon) -> list[str]:
|
||||
return hatch_report(hatch)
|
||||
|
||||
@staticmethod
|
||||
def print_report(hatch: DXFPolygon) -> None:
|
||||
print("\n".join(hatch_report(hatch)))
|
||||
|
||||
|
||||
def hatch_report(hatch: DXFPolygon) -> list[str]:
|
||||
dxf = hatch.dxf
|
||||
style = const.ISLAND_DETECTION[dxf.hatch_style]
|
||||
pattern_type = const.HATCH_PATTERN_TYPE[dxf.pattern_type]
|
||||
text = [
|
||||
f"{str(hatch)}",
|
||||
f" solid fill: {bool(dxf.solid_fill)}",
|
||||
f" pattern type: {pattern_type}",
|
||||
f" pattern name: {dxf.pattern_name}",
|
||||
f" associative: {bool(dxf.associative)}",
|
||||
f" island detection: {style}",
|
||||
f" has pattern data: {hatch.pattern is not None}",
|
||||
f" has gradient data: {hatch.gradient is not None}",
|
||||
f" seed value count: {len(hatch.seeds)}",
|
||||
f" boundary path count: {len(hatch.paths)}",
|
||||
]
|
||||
num = 0
|
||||
for path in hatch.paths:
|
||||
num += 1
|
||||
if isinstance(path, PolylinePath):
|
||||
text.extend(polyline_path_report(path, num))
|
||||
elif isinstance(path, EdgePath):
|
||||
text.extend(edge_path_report(path, num))
|
||||
return text
|
||||
|
||||
|
||||
def polyline_path_report(p: PolylinePath, num: int) -> list[str]:
|
||||
path_type = ", ".join(const.boundary_path_flag_names(p.path_type_flags))
|
||||
return [
|
||||
f"{num}. Polyline Path, vertex count: {len(p.vertices)}",
|
||||
f" path type: {path_type}",
|
||||
]
|
||||
|
||||
|
||||
def edge_path_report(p: EdgePath, num: int) -> list[str]:
|
||||
closed = False
|
||||
connected = False
|
||||
path_type = ", ".join(const.boundary_path_flag_names(p.path_type_flags))
|
||||
edges = p.edges
|
||||
if len(edges):
|
||||
closed = edges[0].start_point.isclose(edges[-1].end_point)
|
||||
connected = all(
|
||||
e1.end_point.isclose(e2.start_point)
|
||||
for e1, e2 in zip(edges, edges[1:])
|
||||
)
|
||||
|
||||
return [
|
||||
f"{num}. Edge Path, edge count: {len(p.edges)}",
|
||||
f" path type: {path_type}",
|
||||
f" continuously connected edges: {connected}",
|
||||
f" closed edge loop: {closed}",
|
||||
]
|
||||
|
||||
|
||||
MULTILEADER = "MULTILEADER_MARKER"
|
||||
POINT_MARKER = "POINT_MARKER"
|
||||
|
||||
|
||||
class MultileaderAnalyzer:
|
||||
"""Multileader can not be added as foreign entity to a new document.
|
||||
Annotations have to be added to the source document.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
multileader: mleader.MultiLeader,
|
||||
*,
|
||||
marker_size: float = 1.0,
|
||||
report_width: int = 79,
|
||||
):
|
||||
self.marker_size = marker_size
|
||||
self.report_width = report_width
|
||||
self.multileader = multileader
|
||||
assert self.multileader.doc is not None, "valid DXF document required"
|
||||
self.doc = self.multileader.doc
|
||||
self.msp = self.doc.modelspace()
|
||||
self.init_layers()
|
||||
self.init_markers()
|
||||
|
||||
def init_layers(self):
|
||||
if self.doc.layers.has_entry(MULTILEADER):
|
||||
return
|
||||
self.doc.layers.add(MULTILEADER, color=colors.RED)
|
||||
|
||||
def init_markers(self):
|
||||
if POINT_MARKER not in self.doc.blocks:
|
||||
blk = self.doc.blocks.new(POINT_MARKER)
|
||||
attribs = {"layer": "0"} # from INSERT
|
||||
size = self.marker_size
|
||||
size_2 = size / 2.0
|
||||
radius = size / 4.0
|
||||
blk.add_circle(
|
||||
center=(0, 0),
|
||||
radius=radius,
|
||||
dxfattribs=attribs,
|
||||
)
|
||||
blk.add_line((-size_2, 0), (size_2, 0), dxfattribs=attribs)
|
||||
blk.add_line((0, -size_2), (0, size_2), dxfattribs=attribs)
|
||||
|
||||
@property
|
||||
def context(self) -> mleader.MLeaderContext:
|
||||
return self.multileader.context
|
||||
|
||||
@property
|
||||
def mleaderstyle(self) -> Optional[mleader.MLeaderStyle]:
|
||||
handle = self.multileader.dxf.get("style_handle")
|
||||
return self.doc.entitydb.get(handle) # type: ignore
|
||||
|
||||
def divider_line(self, symbol="-") -> str:
|
||||
return symbol * self.report_width
|
||||
|
||||
def shorten_lines(self, lines: Iterable[str]) -> list[str]:
|
||||
return [
|
||||
textwrap.shorten(line, width=self.report_width) for line in lines
|
||||
]
|
||||
|
||||
def report(self) -> list[str]:
|
||||
report = [
|
||||
str(self.multileader),
|
||||
self.divider_line(),
|
||||
"Existing DXF attributes:",
|
||||
self.divider_line(),
|
||||
]
|
||||
report.extend(self.multileader_attributes())
|
||||
report.extend(self.context_attributes())
|
||||
if self.context.mtext is not None:
|
||||
report.extend(self.mtext_attributes())
|
||||
if self.context.block is not None:
|
||||
report.extend(self.block_attributes())
|
||||
if self.multileader.block_attribs:
|
||||
report.extend(self.block_reference_attribs())
|
||||
if self.multileader.context.leaders:
|
||||
report.extend(self.leader_attributes())
|
||||
if self.multileader.arrow_heads:
|
||||
report.extend(self.arrow_heads())
|
||||
return self.shorten_lines(report)
|
||||
|
||||
def print_report(self) -> None:
|
||||
width = self.report_width
|
||||
print()
|
||||
print("=" * width)
|
||||
print("\n".join(self.report()))
|
||||
print("=" * width)
|
||||
|
||||
def print_overridden_properties(self):
|
||||
print("\n".join(self.overridden_attributes()))
|
||||
|
||||
def overridden_attributes(self) -> list[str]:
|
||||
multileader = self.multileader
|
||||
style = self.mleaderstyle
|
||||
if style is not None:
|
||||
report = [
|
||||
self.divider_line(),
|
||||
f"Override attributes of {str(style)}: '{style.dxf.name}'",
|
||||
self.divider_line(),
|
||||
]
|
||||
|
||||
override = MLeaderStyleOverride(style, multileader)
|
||||
for name in OVERRIDE_FLAG.keys():
|
||||
if override.is_overridden(name):
|
||||
report.append(f"{name}: {override.get(name)}")
|
||||
if override.use_mtext_default_content:
|
||||
report.append("use_mtext_default_content: 1")
|
||||
else:
|
||||
handle = self.multileader.dxf.get("style_handle")
|
||||
report = [
|
||||
self.divider_line(),
|
||||
f"MLEADERSTYLE(#{handle}) not found",
|
||||
]
|
||||
return report
|
||||
|
||||
def multileader_attributes(self) -> list[str]:
|
||||
attribs = self.multileader.dxf.all_existing_dxf_attribs()
|
||||
keys = sorted(attribs.keys())
|
||||
return [f"{key}: {attribs[key]}" for key in keys]
|
||||
|
||||
def print_override_state(self):
|
||||
flags = self.multileader.dxf.property_override_flags
|
||||
print(f"\nproperty_override_flags:")
|
||||
print(f"dec: {flags}")
|
||||
print(f"hex: {hex(flags)}")
|
||||
print_bitmask(flags)
|
||||
|
||||
def print_context_attributes(self):
|
||||
print("\n".join(self.context_attributes()))
|
||||
|
||||
def context_attributes(self) -> list[str]:
|
||||
context = self.context
|
||||
if context is None:
|
||||
return []
|
||||
report = [
|
||||
self.divider_line(),
|
||||
"CONTEXT object attributes:",
|
||||
self.divider_line(),
|
||||
f"has MTEXT content: {yes_or_no(context.mtext)}",
|
||||
f"has BLOCK content: {yes_or_no(context.block)}",
|
||||
self.divider_line(),
|
||||
]
|
||||
keys = [
|
||||
key
|
||||
for key in context.__dict__.keys()
|
||||
if key not in ("mtext", "block", "leaders")
|
||||
]
|
||||
attributes = [f"{name}: {getattr(context, name)}" for name in keys]
|
||||
attributes.sort()
|
||||
report.extend(attributes)
|
||||
return self.shorten_lines(report)
|
||||
|
||||
def print_mtext_attributes(self):
|
||||
print("\n".join(self.mtext_attributes()))
|
||||
|
||||
def mtext_attributes(self) -> list[str]:
|
||||
report = [
|
||||
self.divider_line(),
|
||||
"MTEXT content attributes:",
|
||||
self.divider_line(),
|
||||
]
|
||||
mtext = self.context.mtext
|
||||
if mtext is not None:
|
||||
report.extend(_content_attributes(mtext))
|
||||
return self.shorten_lines(report)
|
||||
|
||||
def print_block_attributes(self):
|
||||
print("\n".join(self.block_attributes()))
|
||||
|
||||
def block_attributes(self) -> list[str]:
|
||||
report = [
|
||||
self.divider_line(),
|
||||
"BLOCK content attributes:",
|
||||
self.divider_line(),
|
||||
]
|
||||
block = self.context.block
|
||||
if block is not None:
|
||||
report.extend(_content_attributes(block))
|
||||
return self.shorten_lines(report)
|
||||
|
||||
def print_leader_attributes(self):
|
||||
print("\n".join(self.leader_attributes()))
|
||||
|
||||
def leader_attributes(self) -> list[str]:
|
||||
report = []
|
||||
leaders = self.context.leaders
|
||||
if leaders is not None:
|
||||
for index, leader in enumerate(leaders):
|
||||
report.extend(self._leader_attributes(index, leader))
|
||||
return self.shorten_lines(report)
|
||||
|
||||
def _leader_attributes(
|
||||
self, index: int, leader: mleader.LeaderData
|
||||
) -> list[str]:
|
||||
report = [
|
||||
self.divider_line(),
|
||||
f"{index+1}. LEADER attributes:",
|
||||
self.divider_line(),
|
||||
]
|
||||
report.extend(_content_attributes(leader, exclude=("lines", "breaks")))
|
||||
s = ", ".join(map(str, leader.breaks))
|
||||
report.append(f"breaks: [{s}]")
|
||||
if leader.lines:
|
||||
report.extend(self._leader_lines(leader.lines))
|
||||
return report
|
||||
|
||||
def _leader_lines(self, lines) -> list[str]:
|
||||
report = []
|
||||
for num, line in enumerate(lines):
|
||||
report.extend(
|
||||
[
|
||||
self.divider_line(),
|
||||
f"{num + 1}. LEADER LINE attributes:",
|
||||
self.divider_line(),
|
||||
]
|
||||
)
|
||||
for name, value in line.__dict__.items():
|
||||
if name in ("vertices", "breaks"):
|
||||
vstr = ""
|
||||
if value is not None:
|
||||
vstr = ", ".join(map(str, value))
|
||||
vstr = f"[{vstr}]"
|
||||
else:
|
||||
vstr = str(value)
|
||||
report.append(f"{name}: {vstr}")
|
||||
return report
|
||||
|
||||
def print_block_attribs(self):
|
||||
print("\n".join(self.block_reference_attribs()))
|
||||
|
||||
def block_reference_attribs(self) -> list[str]:
|
||||
report = [
|
||||
self.divider_line(),
|
||||
f"BLOCK reference attributes:",
|
||||
self.divider_line(),
|
||||
]
|
||||
for index, attr in enumerate(self.multileader.block_attribs):
|
||||
report.extend(
|
||||
[
|
||||
f"{index+1}. Attributes",
|
||||
self.divider_line(),
|
||||
]
|
||||
)
|
||||
report.append(f"handle: {attr.handle}")
|
||||
report.append(f"index: {attr.index}")
|
||||
report.append(f"width: {attr.width}")
|
||||
report.append(f"text: {attr.text}")
|
||||
return report
|
||||
|
||||
def arrow_heads(self) -> list[str]:
|
||||
report = [
|
||||
self.divider_line(),
|
||||
f"ARROW HEAD attributes:",
|
||||
self.divider_line(),
|
||||
]
|
||||
for index, attr in enumerate(self.multileader.arrow_heads):
|
||||
report.extend(
|
||||
[
|
||||
f"{index+1}. Arrow Head",
|
||||
self.divider_line(),
|
||||
]
|
||||
)
|
||||
report.append(f"handle: {attr.handle}")
|
||||
report.append(f"index: {attr.index}")
|
||||
return report
|
||||
|
||||
def print_mleaderstyle(self):
|
||||
print("\n".join(self.mleaderstyle_attributes()))
|
||||
|
||||
def mleaderstyle_attributes(self) -> list[str]:
|
||||
report = []
|
||||
style = self.mleaderstyle
|
||||
if style is not None:
|
||||
report.extend(
|
||||
[
|
||||
self.divider_line("="),
|
||||
str(style),
|
||||
self.divider_line("="),
|
||||
]
|
||||
)
|
||||
attribs = style.dxf.all_existing_dxf_attribs()
|
||||
keys = sorted(attribs.keys())
|
||||
report.extend([f"{name}: {attribs[name]}" for name in keys])
|
||||
else:
|
||||
handle = self.multileader.dxf.get("style_handle")
|
||||
report.append(f"MLEADERSTYLE(#{handle}) not found")
|
||||
return self.shorten_lines(report)
|
||||
|
||||
|
||||
def _content_attributes(
|
||||
entity, exclude: Optional[Sequence[str]] = None
|
||||
) -> list[str]:
|
||||
exclude = exclude or []
|
||||
if entity is not None:
|
||||
return [
|
||||
f"{name}: {value}"
|
||||
for name, value in entity.__dict__.items()
|
||||
if name not in exclude
|
||||
]
|
||||
return []
|
||||
|
||||
|
||||
def yes_or_no(data) -> str:
|
||||
return "yes" if data else "no"
|
||||
@@ -0,0 +1,606 @@
|
||||
# Copyright (c) 2014-2022, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import Iterable, Any, Sequence, Union, overload, Optional
|
||||
from array import array
|
||||
import struct
|
||||
from binascii import unhexlify, hexlify
|
||||
from codecs import decode
|
||||
|
||||
Bytes = Union[bytes, bytearray, memoryview]
|
||||
|
||||
|
||||
def hex_strings_to_bytes(data: Iterable[str]) -> bytes:
|
||||
"""Returns multiple hex strings `data` as bytes."""
|
||||
byte_array = array("B")
|
||||
for hexstr in data:
|
||||
byte_array.extend(unhexlify(hexstr))
|
||||
return byte_array.tobytes()
|
||||
|
||||
|
||||
def bytes_to_hexstr(data: bytes) -> str:
|
||||
"""Returns `data` bytes as plain hex string."""
|
||||
return hexlify(data).upper().decode()
|
||||
|
||||
|
||||
NULL_NULL = b"\x00\x00"
|
||||
|
||||
|
||||
class EndOfBufferError(EOFError):
|
||||
pass
|
||||
|
||||
|
||||
class ByteStream:
|
||||
"""Process little endian binary data organized as bytes, data is padded to
|
||||
4 byte boundaries by default.
|
||||
"""
|
||||
|
||||
# Created for Proxy Entity Graphic decoding
|
||||
def __init__(self, buffer: Bytes, align: int = 4):
|
||||
self.buffer = memoryview(buffer)
|
||||
self.index: int = 0
|
||||
self._align: int = align
|
||||
|
||||
@property
|
||||
def has_data(self) -> bool:
|
||||
return self.index < len(self.buffer)
|
||||
|
||||
def align(self, index: int) -> int:
|
||||
modulo = index % self._align
|
||||
return index + self._align - modulo if modulo else index
|
||||
|
||||
def read_struct(self, fmt: str) -> Any:
|
||||
"""Read data defined by a struct format string. Insert little endian
|
||||
format character '<' as first character, if machine has native big
|
||||
endian byte order.
|
||||
"""
|
||||
if not self.has_data:
|
||||
raise EndOfBufferError("Unexpected end of buffer.")
|
||||
|
||||
result = struct.unpack_from(fmt, self.buffer, offset=self.index)
|
||||
self.index = self.align(self.index + struct.calcsize(fmt))
|
||||
return result
|
||||
|
||||
def read_float(self) -> float:
|
||||
return self.read_struct("<d")[0]
|
||||
|
||||
def read_long(self) -> int:
|
||||
return self.read_struct("<L")[0]
|
||||
|
||||
def read_signed_long(self) -> int:
|
||||
return self.read_struct("<l")[0]
|
||||
|
||||
def read_vertex(self) -> Sequence[float]:
|
||||
return self.read_struct("<3d")
|
||||
|
||||
def read_padded_string(self, encoding: str = "utf_8") -> str:
|
||||
"""PS: Padded String. This is a string, terminated with a zero byte.
|
||||
The file’s text encoding (code page) is used to encode/decode the bytes
|
||||
into a string.
|
||||
"""
|
||||
buffer = self.buffer
|
||||
for end_index in range(self.index, len(buffer)):
|
||||
if buffer[end_index] == 0:
|
||||
start_index = self.index
|
||||
self.index = self.align(end_index + 1)
|
||||
# noinspection PyTypeChecker
|
||||
return decode(buffer[start_index:end_index], encoding=encoding)
|
||||
raise EndOfBufferError(
|
||||
"Unexpected end of buffer, did not detect terminating zero byte."
|
||||
)
|
||||
|
||||
def read_padded_unicode_string(self) -> str:
|
||||
"""PUS: Padded Unicode String. The bytes are encoded using Unicode
|
||||
encoding. The bytes consist of byte pairs and the string is terminated
|
||||
by 2 zero bytes.
|
||||
"""
|
||||
buffer = self.buffer
|
||||
for end_index in range(self.index, len(buffer), 2):
|
||||
if buffer[end_index : end_index + 2] == NULL_NULL:
|
||||
start_index = self.index
|
||||
self.index = self.align(end_index + 2)
|
||||
# noinspection PyTypeChecker
|
||||
return decode(
|
||||
buffer[start_index:end_index], encoding="utf_16_le"
|
||||
)
|
||||
raise EndOfBufferError(
|
||||
"Unexpected end of buffer, did not detect terminating zero bytes."
|
||||
)
|
||||
|
||||
|
||||
class BitStream:
|
||||
"""Process little endian binary data organized as bit stream."""
|
||||
|
||||
# Created for Proxy Entity Graphic decoding and DWG bit stream decoding
|
||||
def __init__(
|
||||
self,
|
||||
buffer: Bytes,
|
||||
dxfversion: str = "AC1015",
|
||||
encoding: str = "cp1252",
|
||||
):
|
||||
self.buffer = memoryview(buffer)
|
||||
self.bit_index: int = 0
|
||||
self.dxfversion = dxfversion
|
||||
self.encoding = encoding
|
||||
|
||||
@property
|
||||
def has_data(self) -> bool:
|
||||
return self.bit_index >> 3 < len(self.buffer)
|
||||
|
||||
def align(self, count: int) -> None:
|
||||
"""Align to byte border."""
|
||||
byte_index = (self.bit_index >> 3) + bool(self.bit_index & 7)
|
||||
modulo = byte_index % count
|
||||
if modulo:
|
||||
byte_index += count - modulo
|
||||
self.bit_index = byte_index << 3
|
||||
|
||||
def skip(self, count: int) -> None:
|
||||
"""Skip `count` bits."""
|
||||
self.bit_index += count
|
||||
|
||||
def read_bit(self) -> int:
|
||||
"""Read one bit from buffer."""
|
||||
index = self.bit_index
|
||||
self.bit_index += 1
|
||||
try:
|
||||
return 1 if self.buffer[index >> 3] & (0x80 >> (index & 7)) else 0
|
||||
except IndexError:
|
||||
raise EndOfBufferError("Unexpected end of buffer.")
|
||||
|
||||
def read_bits(self, count) -> int:
|
||||
"""Read `count` bits from buffer."""
|
||||
index = self.bit_index
|
||||
buffer = self.buffer
|
||||
# index of next bit after reading `count` bits
|
||||
next_bit_index = index + count
|
||||
|
||||
if (next_bit_index - 1) >> 3 > len(buffer):
|
||||
# not enough data to read all bits
|
||||
raise EndOfBufferError("Unexpected end of buffer.")
|
||||
self.bit_index = next_bit_index
|
||||
|
||||
test_bit = 0x80 >> (index & 7)
|
||||
test_byte_index = index >> 3
|
||||
value = 0
|
||||
test_byte = buffer[test_byte_index]
|
||||
while count > 0:
|
||||
value <<= 1
|
||||
if test_byte & test_bit:
|
||||
value |= 1
|
||||
count -= 1
|
||||
test_bit >>= 1
|
||||
if not test_bit and count:
|
||||
test_bit = 0x80
|
||||
test_byte_index += 1
|
||||
test_byte = buffer[test_byte_index]
|
||||
return value
|
||||
|
||||
def read_unsigned_byte(self) -> int:
|
||||
"""Read an unsigned byte (8 bit) from buffer."""
|
||||
return self.read_bits(8)
|
||||
|
||||
def read_signed_byte(self) -> int:
|
||||
"""Read a signed byte (8 bit) from buffer."""
|
||||
value = self.read_bits(8)
|
||||
if value & 0x80:
|
||||
# 2er complement
|
||||
return -((~value & 0xFF) + 1)
|
||||
else:
|
||||
return value
|
||||
|
||||
def read_aligned_bytes(self, count: int) -> Sequence[int]:
|
||||
buffer = self.buffer
|
||||
start_index = self.bit_index >> 3
|
||||
end_index = start_index + count
|
||||
if end_index <= len(buffer):
|
||||
self.bit_index += count << 3
|
||||
return buffer[start_index:end_index]
|
||||
else:
|
||||
raise EndOfBufferError("Unexpected end of buffer.")
|
||||
|
||||
def read_unsigned_short(self) -> int:
|
||||
"""Read an unsigned short (16 bit) from buffer."""
|
||||
if self.bit_index & 7:
|
||||
s1 = self.read_bits(8)
|
||||
s2 = self.read_bits(8)
|
||||
else: # aligned data
|
||||
s1, s2 = self.read_aligned_bytes(2)
|
||||
return (s2 << 8) + s1
|
||||
|
||||
def read_signed_short(self) -> int:
|
||||
"""Read a signed short (16 bit) from buffer."""
|
||||
value = self.read_unsigned_short()
|
||||
if value & 0x8000:
|
||||
# 2er complement
|
||||
return -((~value & 0xFFFF) + 1)
|
||||
else:
|
||||
return value
|
||||
|
||||
def read_unsigned_long(self) -> int:
|
||||
"""Read an unsigned long (32 bit) from buffer."""
|
||||
if self.bit_index & 7:
|
||||
read_bits = self.read_bits
|
||||
l1 = read_bits(8)
|
||||
l2 = read_bits(8)
|
||||
l3 = read_bits(8)
|
||||
l4 = read_bits(8)
|
||||
else: # aligned data
|
||||
l1, l2, l3, l4 = self.read_aligned_bytes(4)
|
||||
return (l4 << 24) + (l3 << 16) + (l2 << 8) + l1
|
||||
|
||||
def read_signed_long(self) -> int:
|
||||
"""Read a signed long (32 bit) from buffer."""
|
||||
value = self.read_unsigned_long()
|
||||
if value & 0x80000000:
|
||||
# 2er complement
|
||||
return -((~value & 0xFFFFFFFF) + 1)
|
||||
else:
|
||||
return value
|
||||
|
||||
def read_float(self) -> float:
|
||||
if self.bit_index & 7:
|
||||
read_bits = self.read_bits
|
||||
data = bytes(read_bits(8) for _ in range(8))
|
||||
else: # aligned data
|
||||
data = bytes(self.read_aligned_bytes(8))
|
||||
return struct.unpack("<d", data)[0]
|
||||
|
||||
def read_3_bits(self) -> int:
|
||||
bit = self.read_bit()
|
||||
if bit: # 1
|
||||
bit = self.read_bit()
|
||||
if bit: # 11
|
||||
bit = self.read_bit()
|
||||
if bit:
|
||||
return 7 # 111
|
||||
else:
|
||||
return 6 # 110
|
||||
return 2 # 10
|
||||
else:
|
||||
return 0 # 0
|
||||
|
||||
@overload
|
||||
def read_bit_short(self) -> int:
|
||||
...
|
||||
|
||||
@overload
|
||||
def read_bit_short(self, count: int) -> Sequence[int]:
|
||||
...
|
||||
|
||||
def read_bit_short(self, count: int = 1) -> Union[int, Sequence[int]]:
|
||||
def _read():
|
||||
bits = self.read_bits(2)
|
||||
if bits == 0:
|
||||
return self.read_signed_short()
|
||||
elif bits == 1:
|
||||
return self.read_unsigned_byte()
|
||||
elif bits == 2:
|
||||
return 0
|
||||
else:
|
||||
return 256
|
||||
|
||||
if count == 1:
|
||||
return _read()
|
||||
else:
|
||||
return tuple(_read() for _ in range(count))
|
||||
|
||||
@overload
|
||||
def read_bit_long(self) -> int:
|
||||
...
|
||||
|
||||
@overload
|
||||
def read_bit_long(self, count: int) -> Sequence[int]:
|
||||
...
|
||||
|
||||
def read_bit_long(self, count: int = 1) -> Union[int, Sequence[int]]:
|
||||
def _read():
|
||||
bits = self.read_bits(2)
|
||||
if bits == 0:
|
||||
return self.read_signed_long()
|
||||
elif bits == 1:
|
||||
return self.read_unsigned_byte()
|
||||
elif bits == 2:
|
||||
return 0
|
||||
else: # not used!
|
||||
return 256 # ???
|
||||
|
||||
if count == 1:
|
||||
return _read()
|
||||
else:
|
||||
return tuple(_read() for _ in range(count))
|
||||
|
||||
# LibreDWG: https://github.com/LibreDWG/libredwg/blob/master/src/bits.c
|
||||
# Read 1 bitlonglong (compacted uint64_t) for REQUIREDVERSIONS, preview_size.
|
||||
# ODA doc bug. ODA say 1-3 bits until the first 0 bit. See 3BLL.
|
||||
# The first 3 bits indicate the length l (see paragraph 2.1). Then
|
||||
# l bytes follow, which represent the number (the least significant
|
||||
# byte is first).
|
||||
def read_bit_long_long(self) -> int:
|
||||
value = 0
|
||||
shifting = 0
|
||||
length = self.read_bits(3) # or read_3_bits() ?
|
||||
while length > 0:
|
||||
value += self.read_unsigned_byte() << shifting
|
||||
length -= 1
|
||||
shifting += 8
|
||||
return value
|
||||
|
||||
@overload
|
||||
def read_raw_double(self) -> float:
|
||||
...
|
||||
|
||||
@overload
|
||||
def read_raw_double(self, count: int) -> Sequence[float]:
|
||||
...
|
||||
|
||||
def read_raw_double(self, count: int = 1) -> Union[float, Sequence[float]]:
|
||||
if count == 1:
|
||||
return self.read_float()
|
||||
else:
|
||||
return tuple(self.read_float() for _ in range(count))
|
||||
|
||||
@overload
|
||||
def read_bit_double(self) -> float:
|
||||
...
|
||||
|
||||
@overload
|
||||
def read_bit_double(self, count: int) -> Sequence[float]:
|
||||
...
|
||||
|
||||
def read_bit_double(self, count: int = 1) -> Union[float, Sequence[float]]:
|
||||
def _read():
|
||||
bits = self.read_bits(2)
|
||||
if bits == 0:
|
||||
return self.read_float()
|
||||
elif bits == 1:
|
||||
return 1.0
|
||||
elif bits == 2:
|
||||
return 0.0
|
||||
else: # not used!
|
||||
return 0.0
|
||||
|
||||
if count == 1:
|
||||
return _read()
|
||||
else:
|
||||
return tuple(_read() for _ in range(count))
|
||||
|
||||
@overload
|
||||
def read_bit_double_default(self) -> float:
|
||||
...
|
||||
|
||||
@overload
|
||||
def read_bit_double_default(self, count: int) -> Sequence[float]:
|
||||
...
|
||||
|
||||
@overload
|
||||
def read_bit_double_default(
|
||||
self, count: int, default: float
|
||||
) -> Sequence[float]:
|
||||
...
|
||||
|
||||
def read_bit_double_default(
|
||||
self, count: int = 1, default: float = 0.0
|
||||
) -> Union[float, Sequence[float]]:
|
||||
data = struct.pack("<d", default)
|
||||
|
||||
def _read():
|
||||
bits = self.read_bits(2)
|
||||
if bits == 0:
|
||||
return default
|
||||
elif bits == 1:
|
||||
_data = (
|
||||
bytes(self.read_unsigned_byte() for _ in range(4))
|
||||
+ data[4:]
|
||||
)
|
||||
return struct.unpack("<d", _data)[0]
|
||||
elif bits == 2:
|
||||
_data = bytearray(data)
|
||||
_data[4] = self.read_unsigned_byte()
|
||||
_data[5] = self.read_unsigned_byte()
|
||||
_data[0] = self.read_unsigned_byte()
|
||||
_data[1] = self.read_unsigned_byte()
|
||||
_data[2] = self.read_unsigned_byte()
|
||||
_data[3] = self.read_unsigned_byte()
|
||||
return struct.unpack("<d", _data)[0]
|
||||
else:
|
||||
return self.read_float()
|
||||
|
||||
if count == 1:
|
||||
return _read()
|
||||
else:
|
||||
return tuple(_read() for _ in range(count))
|
||||
|
||||
def read_signed_modular_chars(self) -> int:
|
||||
"""Modular characters are a method of storing compressed integer
|
||||
values. They consist of a stream of bytes, terminating when the high
|
||||
bit (8) of the byte is 0 else another byte follows. Negative numbers
|
||||
are indicated by bit 7 set in the last byte.
|
||||
|
||||
"""
|
||||
shifting = 0
|
||||
value = 0
|
||||
while True:
|
||||
char = self.read_unsigned_byte()
|
||||
if char & 0x80:
|
||||
# bit 8 set = another char follows
|
||||
value |= (char & 0x7F) << shifting
|
||||
shifting += 7
|
||||
else:
|
||||
# bit 8 clear = end of modular char
|
||||
# bit 7 set = negative number
|
||||
value |= (char & 0x3F) << shifting
|
||||
return -value if char & 0x40 else value
|
||||
|
||||
def read_unsigned_modular_chars(self) -> int:
|
||||
"""Modular characters are a method of storing compressed integer
|
||||
values. They consist of a stream of bytes, terminating when the high
|
||||
bit (8) of the byte is 0 else another byte follows.
|
||||
|
||||
"""
|
||||
shifting = 0
|
||||
value = 0
|
||||
while True:
|
||||
char = self.read_unsigned_byte()
|
||||
value |= (char & 0x7F) << shifting
|
||||
shifting += 7
|
||||
# bit 8 set = another char follows
|
||||
if not (char & 0x80):
|
||||
return value
|
||||
|
||||
def read_modular_shorts(self) -> int:
|
||||
"""Modular shorts are a method of storing compressed unsigned integer
|
||||
values. Only 1 or 2 shorts in practical usage (1GB), if the high
|
||||
bit (16) of the first short is set another short follows.
|
||||
|
||||
"""
|
||||
short = self.read_unsigned_short()
|
||||
if short & 0x8000:
|
||||
return (self.read_unsigned_short() << 15) | (short & 0x7FFF)
|
||||
else:
|
||||
return short
|
||||
|
||||
def read_bit_extrusion(self) -> Sequence[float]:
|
||||
if self.read_bit():
|
||||
return 0.0, 0.0, 1.0
|
||||
else:
|
||||
return self.read_bit_double(3)
|
||||
|
||||
def read_bit_thickness(self, dxfversion="AC1015") -> float:
|
||||
if dxfversion >= "AC1015":
|
||||
if self.read_bit():
|
||||
return 0.0
|
||||
return self.read_bit_double()
|
||||
|
||||
def read_cm_color(self) -> int:
|
||||
return self.read_bit_short()
|
||||
|
||||
def read_text(self) -> str:
|
||||
length = self.read_bit_short()
|
||||
data = bytes(self.read_unsigned_byte() for _ in range(length))
|
||||
return data.decode(encoding=self.encoding)
|
||||
|
||||
def read_text_unicode(self) -> str:
|
||||
# Unicode text is read from the "string stream" within the object data,
|
||||
# see the main Object description section for details.
|
||||
length = self.read_bit_short()
|
||||
data = bytes(self.read_unsigned_byte() for _ in range(length * 2))
|
||||
return data.decode(encoding="utf16")
|
||||
|
||||
def read_text_variable(self) -> str:
|
||||
if self.dxfversion < "AC1018": # R2004
|
||||
return self.read_text()
|
||||
else:
|
||||
return self.read_text_unicode()
|
||||
|
||||
def read_cm_color_cms(self) -> tuple[int, str, str]:
|
||||
"""Returns tuple (rgb, color_name, book_name)."""
|
||||
_ = self.read_bit_short() # index always 0
|
||||
color_name = ""
|
||||
book_name = ""
|
||||
rgb = self.read_bit_long()
|
||||
rc = self.read_unsigned_byte()
|
||||
if rc & 1:
|
||||
color_name = self.read_text_variable()
|
||||
if rc & 2:
|
||||
book_name = self.read_text_variable()
|
||||
return rgb, color_name, book_name
|
||||
|
||||
def read_cm_color_enc(self) -> Union[int, Sequence[Optional[int]]]:
|
||||
"""Returns color index as int or tuple (rgb, color_handle,
|
||||
transparency_type, transparency).
|
||||
"""
|
||||
flags_and_index = self.read_bit_short()
|
||||
flags = flags_and_index >> 8
|
||||
index = flags_and_index & 0xFF
|
||||
if flags:
|
||||
rgb = None
|
||||
color_handle = None
|
||||
transparency_type = None
|
||||
transparency = None
|
||||
if flags & 0x80:
|
||||
rgb = self.read_bit_short() & 0x00FFFFFF
|
||||
if flags & 0x40:
|
||||
color_handle = self.read_handle()
|
||||
if flags & 0x20:
|
||||
data = self.read_bit_long()
|
||||
transparency_type = data >> 24
|
||||
transparency = data & 0xFF
|
||||
return rgb, color_handle, transparency_type, transparency
|
||||
else:
|
||||
return index
|
||||
|
||||
def read_object_type(self) -> int:
|
||||
bits = self.read_bits(2)
|
||||
if bits == 0:
|
||||
return self.read_unsigned_byte()
|
||||
elif bits == 1:
|
||||
return self.read_unsigned_byte() + 0x1F0
|
||||
else:
|
||||
return self.read_unsigned_short()
|
||||
|
||||
def read_handle(self, reference: int = 0) -> int:
|
||||
"""Returns handle as integer value."""
|
||||
code = self.read_bits(4)
|
||||
length = self.read_bits(4)
|
||||
if code == 6:
|
||||
return reference + 1
|
||||
if code == 8:
|
||||
return reference - 1
|
||||
|
||||
data = bytearray(b"\x00\x00\x00\x00\x00\x00\x00\x00")
|
||||
for index in range(length):
|
||||
data[index] = self.read_unsigned_byte()
|
||||
offset = struct.unpack("<Q", data)[0]
|
||||
|
||||
if code < 6:
|
||||
return offset
|
||||
else:
|
||||
if code == 10:
|
||||
return reference + offset
|
||||
if code == 12:
|
||||
return reference - offset
|
||||
return 0
|
||||
|
||||
def read_hex_handle(self, reference: int = 0) -> str:
|
||||
"""Returns handle as hex string."""
|
||||
return "%X" % self.read_handle(reference)
|
||||
|
||||
def read_code(self, code: str):
|
||||
"""Read data from bit stream by data codes defined in the
|
||||
ODA reference.
|
||||
|
||||
"""
|
||||
if code == "B":
|
||||
return self.read_bit()
|
||||
elif code == "RC":
|
||||
return self.read_unsigned_byte()
|
||||
elif code == "RS":
|
||||
return self.read_signed_short()
|
||||
elif code == "BS":
|
||||
return self.read_bit_short()
|
||||
elif code == "RL":
|
||||
return self.read_signed_long()
|
||||
elif code == "BL":
|
||||
return self.read_bit_long()
|
||||
elif code == "RD":
|
||||
return self.read_raw_double()
|
||||
elif code == "2RD":
|
||||
return self.read_raw_double(2)
|
||||
elif code == "BD":
|
||||
return self.read_bit_double()
|
||||
elif code == "2BD":
|
||||
return self.read_bit_double(2)
|
||||
elif code == "3BD":
|
||||
return self.read_bit_double(3)
|
||||
elif code == "T":
|
||||
return self.read_text()
|
||||
elif code == "TV":
|
||||
return self.read_text_variable()
|
||||
elif code == "H":
|
||||
return self.read_hex_handle()
|
||||
elif code == "BLL":
|
||||
return self.read_bit_long_long()
|
||||
elif code == "CMC":
|
||||
return self.read_cm_color()
|
||||
raise ValueError(f"Unknown code: {code}")
|
||||
@@ -0,0 +1,470 @@
|
||||
# Copyright (c) 2024, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import (
|
||||
Optional,
|
||||
Iterable,
|
||||
Iterator,
|
||||
Sequence,
|
||||
NamedTuple,
|
||||
Callable,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
import abc
|
||||
|
||||
from ezdxf.math import (
|
||||
Matrix44,
|
||||
Vec2,
|
||||
BoundingBox2d,
|
||||
UVec,
|
||||
is_convex_polygon_2d,
|
||||
is_axes_aligned_rectangle_2d,
|
||||
)
|
||||
from ezdxf.npshapes import NumpyPath2d, NumpyPoints2d
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ezdxf.math.clipping import Clipping
|
||||
|
||||
__all__ = [
|
||||
"ClippingShape",
|
||||
"ClippingPortal",
|
||||
"ClippingRect",
|
||||
"ConvexClippingPolygon",
|
||||
"InvertedClippingPolygon",
|
||||
"MultiClip",
|
||||
"find_best_clipping_shape",
|
||||
"make_inverted_clipping_shape",
|
||||
]
|
||||
|
||||
|
||||
class ClippingShape(abc.ABC):
|
||||
"""The ClippingShape defines a single clipping path and executes the clipping on
|
||||
basic geometries:
|
||||
|
||||
- point: a single point
|
||||
- line: a line between two vertices
|
||||
- polyline: open polyline with one or more straight line segments
|
||||
- polygon: closed shape with straight line as edges
|
||||
- path: open shape with straight lines and Bezier-curves as segments
|
||||
- filled-path: closed shape with straight lines and Bezier-curves as edges
|
||||
|
||||
Difference between open and closed shapes:
|
||||
|
||||
- an open shape is treated as a linear shape without a filling
|
||||
- clipping an open shape returns one or more open shapes
|
||||
- a closed shape is treated as a filled shape, where the first vertex is
|
||||
coincident to the last vertex.
|
||||
- clipping a closed shape returns one or more closed shapes
|
||||
|
||||
Notes:
|
||||
|
||||
An arbitrary clipping polygon can split any basic geometry (except point) into
|
||||
multiple parts.
|
||||
|
||||
All current implemented clipping algorithms flatten Bezier-curves into polylines.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def bbox(self) -> BoundingBox2d: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_completely_inside(self, other: BoundingBox2d) -> bool: ...
|
||||
|
||||
# returning False means: I don't know!
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_completely_outside(self, other: BoundingBox2d) -> bool: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def clip_point(self, point: Vec2) -> Optional[Vec2]: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def clip_line(self, start: Vec2, end: Vec2) -> Sequence[tuple[Vec2, Vec2]]: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def clip_polyline(self, points: NumpyPoints2d) -> Sequence[NumpyPoints2d]: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def clip_polygon(self, points: NumpyPoints2d) -> Sequence[NumpyPoints2d]: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def clip_paths(
|
||||
self, paths: Iterable[NumpyPath2d], max_sagitta: float
|
||||
) -> Iterator[NumpyPath2d]: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def clip_filled_paths(
|
||||
self, paths: Iterable[NumpyPath2d], max_sagitta: float
|
||||
) -> Iterator[NumpyPath2d]: ...
|
||||
|
||||
|
||||
class ClippingStage(NamedTuple):
|
||||
portal: ClippingShape
|
||||
transform: Matrix44 | None
|
||||
|
||||
|
||||
class ClippingPortal:
|
||||
"""The ClippingPortal manages a clipping path stack."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._stages: list[ClippingStage] = []
|
||||
|
||||
@property
|
||||
def is_active(self) -> bool:
|
||||
return bool(self._stages)
|
||||
|
||||
def push(self, portal: ClippingShape, transform: Matrix44 | None) -> None:
|
||||
self._stages.append(ClippingStage(portal, transform))
|
||||
|
||||
def pop(self) -> None:
|
||||
if self._stages:
|
||||
self._stages.pop()
|
||||
|
||||
def foreach_stage(self, command: Callable[[ClippingStage], bool]) -> None:
|
||||
for stage in self._stages[::-1]:
|
||||
if not command(stage):
|
||||
return
|
||||
|
||||
def clip_point(self, point: Vec2) -> Optional[Vec2]:
|
||||
result: Vec2 | None = point
|
||||
|
||||
def do(stage: ClippingStage) -> bool:
|
||||
nonlocal result
|
||||
assert result is not None
|
||||
if stage.transform:
|
||||
result = Vec2(stage.transform.transform(result))
|
||||
result = stage.portal.clip_point(result)
|
||||
return result is not None
|
||||
|
||||
self.foreach_stage(do)
|
||||
return result
|
||||
|
||||
def clip_line(self, start: Vec2, end: Vec2) -> list[tuple[Vec2, Vec2]]:
|
||||
def do(stage: ClippingStage) -> bool:
|
||||
lines = list(result)
|
||||
result.clear()
|
||||
for s, e in lines:
|
||||
if stage.transform:
|
||||
s, e = stage.transform.fast_2d_transform((s, e))
|
||||
result.extend(stage.portal.clip_line(s, e))
|
||||
return bool(result)
|
||||
|
||||
result = [(start, end)]
|
||||
self.foreach_stage(do)
|
||||
return result
|
||||
|
||||
def clip_polyline(self, points: NumpyPoints2d) -> list[NumpyPoints2d]:
|
||||
def do(stage: ClippingStage) -> bool:
|
||||
polylines = list(result)
|
||||
result.clear()
|
||||
for polyline in polylines:
|
||||
if stage.transform:
|
||||
polyline.transform_inplace(stage.transform)
|
||||
result.extend(stage.portal.clip_polyline(polyline))
|
||||
return bool(result)
|
||||
|
||||
result = [points]
|
||||
self.foreach_stage(do)
|
||||
return result
|
||||
|
||||
def clip_polygon(self, points: NumpyPoints2d) -> list[NumpyPoints2d]:
|
||||
def do(stage: ClippingStage) -> bool:
|
||||
polygons = list(result)
|
||||
result.clear()
|
||||
for polygon in polygons:
|
||||
if stage.transform:
|
||||
polygon.transform_inplace(stage.transform)
|
||||
result.extend(stage.portal.clip_polygon(polygon))
|
||||
return bool(result)
|
||||
|
||||
result = [points]
|
||||
self.foreach_stage(do)
|
||||
return result
|
||||
|
||||
def clip_paths(
|
||||
self, paths: Iterable[NumpyPath2d], max_sagitta: float
|
||||
) -> list[NumpyPath2d]:
|
||||
def do(stage: ClippingStage) -> bool:
|
||||
paths = list(result)
|
||||
result.clear()
|
||||
for path in paths:
|
||||
if stage.transform:
|
||||
path.transform_inplace(stage.transform)
|
||||
result.extend(stage.portal.clip_paths(paths, max_sagitta))
|
||||
return bool(result)
|
||||
|
||||
result = list(paths)
|
||||
self.foreach_stage(do)
|
||||
return result
|
||||
|
||||
def clip_filled_paths(
|
||||
self, paths: Iterable[NumpyPath2d], max_sagitta: float
|
||||
) -> list[NumpyPath2d]:
|
||||
def do(stage: ClippingStage) -> bool:
|
||||
paths = list(result)
|
||||
result.clear()
|
||||
for path in paths:
|
||||
if stage.transform:
|
||||
path.transform_inplace(stage.transform)
|
||||
result.extend(stage.portal.clip_filled_paths(paths, max_sagitta))
|
||||
return bool(result)
|
||||
|
||||
result = list(paths)
|
||||
self.foreach_stage(do)
|
||||
return result
|
||||
|
||||
def transform_matrix(self, m: Matrix44) -> Matrix44:
|
||||
for _, transform in self._stages[::-1]:
|
||||
if transform is not None:
|
||||
m @= transform
|
||||
return m
|
||||
|
||||
|
||||
class ClippingPolygon(ClippingShape):
|
||||
"""Represents an arbitrary polygon as clipping shape. Removes the geometry
|
||||
outside the clipping polygon.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, bbox: BoundingBox2d, clipper: Clipping) -> None:
|
||||
if not bbox.has_data:
|
||||
raise ValueError("clipping box not detectable")
|
||||
self._bbox = bbox
|
||||
self.clipper = clipper
|
||||
|
||||
def bbox(self) -> BoundingBox2d:
|
||||
return self._bbox
|
||||
|
||||
def clip_point(self, point: Vec2) -> Optional[Vec2]:
|
||||
is_inside = self.clipper.is_inside(Vec2(point))
|
||||
if not is_inside:
|
||||
return None
|
||||
return point
|
||||
|
||||
def clip_line(self, start: Vec2, end: Vec2) -> Sequence[tuple[Vec2, Vec2]]:
|
||||
return self.clipper.clip_line(start, end)
|
||||
|
||||
def clip_polyline(self, points: NumpyPoints2d) -> Sequence[NumpyPoints2d]:
|
||||
clipper = self.clipper
|
||||
if len(points) == 0:
|
||||
return tuple()
|
||||
polyline_bbox = BoundingBox2d(points.extents())
|
||||
if self.is_completely_outside(polyline_bbox):
|
||||
return tuple()
|
||||
if self.is_completely_inside(polyline_bbox):
|
||||
return (points,)
|
||||
return [
|
||||
NumpyPoints2d(part)
|
||||
for part in clipper.clip_polyline(points.vertices())
|
||||
if len(part) > 0
|
||||
]
|
||||
|
||||
def clip_polygon(self, points: NumpyPoints2d) -> Sequence[NumpyPoints2d]:
|
||||
clipper = self.clipper
|
||||
if len(points) < 2:
|
||||
return tuple()
|
||||
polygon_bbox = BoundingBox2d(points.extents())
|
||||
if self.is_completely_outside(polygon_bbox):
|
||||
return tuple()
|
||||
if self.is_completely_inside(polygon_bbox):
|
||||
return (points,)
|
||||
return [
|
||||
NumpyPoints2d(part)
|
||||
for part in clipper.clip_polygon(points.vertices())
|
||||
if len(part) > 0
|
||||
]
|
||||
|
||||
def clip_paths(
|
||||
self, paths: Iterable[NumpyPath2d], max_sagitta: float
|
||||
) -> Iterator[NumpyPath2d]:
|
||||
clipper = self.clipper
|
||||
for path in paths:
|
||||
for sub_path in path.sub_paths():
|
||||
path_bbox = BoundingBox2d(sub_path.control_vertices())
|
||||
if not path_bbox.has_data:
|
||||
continue
|
||||
if self.is_completely_inside(path_bbox):
|
||||
yield sub_path
|
||||
continue
|
||||
if self.is_completely_outside(path_bbox):
|
||||
continue
|
||||
polyline = Vec2.list(sub_path.flattening(max_sagitta, segments=4))
|
||||
for part in clipper.clip_polyline(polyline):
|
||||
if len(part) > 0:
|
||||
yield NumpyPath2d.from_vertices(part, close=False)
|
||||
|
||||
def clip_filled_paths(
|
||||
self, paths: Iterable[NumpyPath2d], max_sagitta: float
|
||||
) -> Iterator[NumpyPath2d]:
|
||||
clipper = self.clipper
|
||||
for path in paths:
|
||||
for sub_path in path.sub_paths():
|
||||
if len(sub_path) < 2:
|
||||
continue
|
||||
path_bbox = BoundingBox2d(sub_path.control_vertices())
|
||||
if self.is_completely_inside(path_bbox):
|
||||
yield sub_path
|
||||
continue
|
||||
if self.is_completely_outside(path_bbox):
|
||||
continue
|
||||
for part in clipper.clip_polygon(
|
||||
Vec2.list(sub_path.flattening(max_sagitta, segments=4))
|
||||
):
|
||||
if len(part) > 0:
|
||||
yield NumpyPath2d.from_vertices(part, close=True)
|
||||
|
||||
|
||||
class ClippingRect(ClippingPolygon):
|
||||
"""Represents a rectangle as clipping shape where the edges are parallel to
|
||||
the x- and y-axis of the coordinate system. Removes the geometry outside the
|
||||
clipping rectangle.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, vertices: Iterable[UVec]) -> None:
|
||||
from ezdxf.math.clipping import ClippingRect2d
|
||||
|
||||
polygon = Vec2.list(vertices)
|
||||
bbox = BoundingBox2d(polygon)
|
||||
if not bbox.has_data:
|
||||
raise ValueError("clipping box not detectable")
|
||||
size: Vec2 = bbox.size
|
||||
self.remove_all = size.x * size.y < 1e-9
|
||||
|
||||
super().__init__(bbox, ClippingRect2d(bbox.extmin, bbox.extmax))
|
||||
|
||||
def is_completely_inside(self, other: BoundingBox2d) -> bool:
|
||||
return self._bbox.contains(other)
|
||||
|
||||
def is_completely_outside(self, other: BoundingBox2d) -> bool:
|
||||
return not self._bbox.has_intersection(other)
|
||||
|
||||
def clip_point(self, point: Vec2) -> Optional[Vec2]:
|
||||
if self.remove_all:
|
||||
return None
|
||||
return super().clip_point(point)
|
||||
|
||||
def clip_line(self, start: Vec2, end: Vec2) -> Sequence[tuple[Vec2, Vec2]]:
|
||||
if self.remove_all:
|
||||
return tuple()
|
||||
return self.clipper.clip_line(start, end)
|
||||
|
||||
def clip_polyline(self, points: NumpyPoints2d) -> Sequence[NumpyPoints2d]:
|
||||
if self.remove_all:
|
||||
return (NumpyPoints2d(tuple()),)
|
||||
return super().clip_polyline(points)
|
||||
|
||||
def clip_polygon(self, points: NumpyPoints2d) -> Sequence[NumpyPoints2d]:
|
||||
if self.remove_all:
|
||||
return (NumpyPoints2d(tuple()),)
|
||||
return super().clip_polygon(points)
|
||||
|
||||
def clip_paths(
|
||||
self, paths: Iterable[NumpyPath2d], max_sagitta: float
|
||||
) -> Iterator[NumpyPath2d]:
|
||||
if self.remove_all:
|
||||
return iter(tuple())
|
||||
return super().clip_paths(paths, max_sagitta)
|
||||
|
||||
def clip_filled_paths(
|
||||
self, paths: Iterable[NumpyPath2d], max_sagitta: float
|
||||
) -> Iterator[NumpyPath2d]:
|
||||
if self.remove_all:
|
||||
return iter(tuple())
|
||||
return super().clip_filled_paths(paths, max_sagitta)
|
||||
|
||||
|
||||
class ConvexClippingPolygon(ClippingPolygon):
|
||||
"""Represents an arbitrary convex polygon as clipping shape. Removes the geometry
|
||||
outside the clipping polygon.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, vertices: Iterable[UVec]) -> None:
|
||||
from ezdxf.math.clipping import ConvexClippingPolygon2d
|
||||
|
||||
polygon = Vec2.list(vertices)
|
||||
super().__init__(BoundingBox2d(polygon), ConvexClippingPolygon2d(polygon))
|
||||
|
||||
def is_completely_inside(self, other: BoundingBox2d) -> bool:
|
||||
return False # I don't know!
|
||||
|
||||
def is_completely_outside(self, other: BoundingBox2d) -> bool:
|
||||
return not self._bbox.has_intersection(other)
|
||||
|
||||
|
||||
class ConcaveClippingPolygon(ClippingPolygon):
|
||||
"""Represents an arbitrary concave polygon as clipping shape. Removes the geometry
|
||||
outside the clipping polygon.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, vertices: Iterable[UVec]) -> None:
|
||||
from ezdxf.math.clipping import ConcaveClippingPolygon2d
|
||||
|
||||
polygon = Vec2.list(vertices)
|
||||
super().__init__(BoundingBox2d(polygon), ConcaveClippingPolygon2d(polygon))
|
||||
|
||||
def is_completely_inside(self, other: BoundingBox2d) -> bool:
|
||||
return False # I don't know!
|
||||
|
||||
def is_completely_outside(self, other: BoundingBox2d) -> bool:
|
||||
return not self._bbox.has_intersection(other)
|
||||
|
||||
|
||||
class InvertedClippingPolygon(ClippingPolygon):
|
||||
"""Represents an arbitrary inverted clipping polygon. Removes the geometry
|
||||
inside the clipping polygon.
|
||||
|
||||
.. Important::
|
||||
|
||||
The `outer_bounds` must be larger than the content to clip to work correctly.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, vertices: Iterable[UVec], outer_bounds: BoundingBox2d) -> None:
|
||||
from ezdxf.math.clipping import InvertedClippingPolygon2d
|
||||
|
||||
polygon = Vec2.list(vertices)
|
||||
super().__init__(outer_bounds, InvertedClippingPolygon2d(polygon, outer_bounds))
|
||||
|
||||
def is_completely_inside(self, other: BoundingBox2d) -> bool:
|
||||
# returning False means: I don't know!
|
||||
return False # not easy to detect
|
||||
|
||||
def is_completely_outside(self, other: BoundingBox2d) -> bool:
|
||||
return not self._bbox.has_intersection(other)
|
||||
|
||||
|
||||
def find_best_clipping_shape(polygon: Iterable[UVec]) -> ClippingShape:
|
||||
"""Returns the best clipping shape for the given clipping polygon.
|
||||
|
||||
The function analyses the given polygon (rectangular, convex or concave polygon, ...)
|
||||
and returns the optimized (fastest) clipping shape.
|
||||
|
||||
Args:
|
||||
polygon: clipping polygon as iterable vertices
|
||||
|
||||
"""
|
||||
points = Vec2.list(polygon)
|
||||
if is_axes_aligned_rectangle_2d(points):
|
||||
return ClippingRect(points)
|
||||
elif is_convex_polygon_2d(points, strict=False):
|
||||
return ConvexClippingPolygon(points)
|
||||
return ConcaveClippingPolygon(points)
|
||||
|
||||
|
||||
def make_inverted_clipping_shape(
|
||||
polygon: Iterable[UVec], outer_bounds: BoundingBox2d
|
||||
) -> ClippingShape:
|
||||
"""Returns an inverted clipping shape that removes the geometry inside the clipping
|
||||
polygon and beyond the outer bounds.
|
||||
|
||||
Args:
|
||||
polygon: clipping polygon as iterable vertices
|
||||
outer_bounds: outer bounds of the clipping shape
|
||||
|
||||
"""
|
||||
|
||||
return InvertedClippingPolygon(polygon, outer_bounds)
|
||||
@@ -0,0 +1,91 @@
|
||||
# Purpose: constant values
|
||||
# Created: 10.03.2011
|
||||
# Copyright (c) 2011-2018, Manfred Moitzi
|
||||
# License: MIT License
|
||||
|
||||
# --- For Unix ---
|
||||
# 0: undefined Rises an error.
|
||||
# 1: ascii $20-$7F,%%c,%%d,%%p Only
|
||||
# 2: iso8859_1 Western Europe(Unix) $20-$FF,%%c,%%d,%%p
|
||||
# 3: iso8859_2 Central Europe(Unix)
|
||||
# 4: iso8859_3 Eastern Europe(Unix)
|
||||
# 5: iso8859_4 Baltic(Unix)
|
||||
# 6: iso8859_5 Cyrillic(Unix)
|
||||
# 7: iso8859_6 Arabic(Unix)
|
||||
# 8: iso8859_7 Greek(Unix)
|
||||
# 9: iso8859_8 Hebrew(Unix)
|
||||
# 10: iso8859_9 Turkish(Unix)
|
||||
#
|
||||
# --- For DOS/Mac ---
|
||||
# 11: dos437 DOS USA
|
||||
# 12: dos850 DOS Western Europe
|
||||
# 13: dos852 DOS Eastern Europe
|
||||
# 14: dos855 IBM Russian
|
||||
# 15: dos857 IBM Turkish
|
||||
# 16: dos860 DOS Portuguese
|
||||
# 17: dos861 DOS Icelandic
|
||||
# 18: dos863 DOS Canadian French
|
||||
# 19: dos864 DOS Arabic
|
||||
# 20: dos865 DOS Norwegian
|
||||
# 21: dos869 DOS Greek
|
||||
# 22: dos932 DOS Japanese
|
||||
# 23: mac-roman Mac
|
||||
# 24: big5 DOS Traditional Chinese
|
||||
# 25: ksc5601 Korean Wansung
|
||||
# 26: johab Korean Johab
|
||||
# 27: dos866 DOS Russian
|
||||
# 31: gb2312 DOS Simplified Chinese
|
||||
#
|
||||
# --- For Windows ---
|
||||
# 28: ansi_1250 Win Eastern Europe
|
||||
# 29: ansi_1251 Win Russian
|
||||
# 30: ansi_1252 Win Western Europe(ANSI)
|
||||
# 32: ansi_1253 Win Greek
|
||||
# 33: ansi_1254 Win Turkish
|
||||
# 34: ansi_1255 Win Hebrew
|
||||
# 35: ansi_1256 Win Arabic
|
||||
# 36: ansi_1257 Win Baltic
|
||||
# 37: ansi_874 Win Thai
|
||||
# 38: ansi_932 Win Japanese
|
||||
# 39: ansi_936 Win Simplified Chinese GB
|
||||
# 40: ansi_949 Win Korean Wansung
|
||||
# 41: ansi_950 Win Traditional Chinese big5
|
||||
# 42: ansi_1361 Win Korean Johab
|
||||
# 43: ansi_1200 Unicode (reserved)
|
||||
# --: ansi_1258 Win Vietnamese (reserved)
|
||||
|
||||
codepage_to_encoding = {
|
||||
"874": "cp874", # Thai,
|
||||
"932": "cp932", # Japanese
|
||||
"936": "gbk", # UnifiedChinese
|
||||
"949": "cp949", # Korean
|
||||
"950": "cp950", # TradChinese
|
||||
"1250": "cp1250", # CentralEurope
|
||||
"1251": "cp1251", # Cyrillic
|
||||
"1252": "cp1252", # WesternEurope
|
||||
"1253": "cp1253", # Greek
|
||||
"1254": "cp1254", # Turkish
|
||||
"1255": "cp1255", # Hebrew
|
||||
"1256": "cp1256", # Arabic
|
||||
"1257": "cp1257", # Baltic
|
||||
"1258": "cp1258", # Vietnam
|
||||
}
|
||||
|
||||
encoding_to_codepage = {
|
||||
codec: ansi for ansi, codec in codepage_to_encoding.items()
|
||||
}
|
||||
|
||||
|
||||
def is_supported_encoding(encoding: str = "cp1252") -> bool:
|
||||
return encoding in encoding_to_codepage
|
||||
|
||||
|
||||
def toencoding(dxfcodepage: str) -> str:
|
||||
for codepage, encoding in codepage_to_encoding.items():
|
||||
if dxfcodepage.endswith(codepage):
|
||||
return encoding
|
||||
return "cp1252"
|
||||
|
||||
|
||||
def tocodepage(encoding: str) -> str:
|
||||
return "ANSI_" + encoding_to_codepage.get(encoding, "1252")
|
||||
@@ -0,0 +1,214 @@
|
||||
# Purpose: compiler for line type definitions
|
||||
# Copyright (c) 2018-2021, Manfred Moitzi
|
||||
# License: MIT License
|
||||
|
||||
# Auszug acadlt.lin
|
||||
#
|
||||
# *RAND,Rand __ __ . __ __ . __ __ . __ __ . __ __ .
|
||||
# A,.5,-.25,.5,-.25,0,-.25
|
||||
# *RAND2,Rand (.5x) __.__.__.__.__.__.__.__.__.__.__.
|
||||
# A,.25,-.125,.25,-.125,0,-.125
|
||||
# *RANDX2,Rand (2x) ____ ____ . ____ ____ . ___
|
||||
# A,1.0,-.5,1.0,-.5,0,-.5
|
||||
#
|
||||
# *MITTE,Mitte ____ _ ____ _ ____ _ ____ _ ____ _ ____
|
||||
# A,1.25,-.25,.25,-.25
|
||||
# *CENTER2,Mitte (.5x) ___ _ ___ _ ___ _ ___ _ ___ _ ___
|
||||
# A,.75,-.125,.125,-.125
|
||||
# *MITTEX2,Mitte (2x) ________ __ ________ __ _____
|
||||
# A,2.5,-.5,.5,-.5
|
||||
#
|
||||
# ;; Komplexe Linientypen
|
||||
# ;;
|
||||
# ;; Dieser Datei sind komplexe Linientypen hinzugefügt worden.
|
||||
# ;; Diese Linientypen wurden in LTYPESHP.LIN in
|
||||
# ;; Release 13 definiert und wurden in ACAD.LIN in
|
||||
# ;; Release 14 aufgenommen.
|
||||
# ;;
|
||||
# ;; Diese Linientypdefinitionen verwenden LTYPESHP.SHX.
|
||||
# ;;
|
||||
# *GRENZE1,Grenze rund ----0-----0----0-----0----0-----0--
|
||||
# A,.25,-.1,[CIRC1,ltypeshp.shx,x=-.1,s=.1],-.1,1
|
||||
# *GRENZE2,Grenze eckig ----[]-----[]----[]-----[]----[]---
|
||||
# A,.25,-.1,[BOX,ltypeshp.shx,x=-.1,s=.1],-.1,1
|
||||
# *EISENBAHN,Eisenbahn -|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-
|
||||
# A,.15,[TRACK1,ltypeshp.shx,s=.25],.15
|
||||
# *ISOLATION,Isolation SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
|
||||
# A,.0001,-.1,[BAT,ltypeshp.shx,x=-.1,s=.1],-.2,[BAT,ltypeshp.shx,r=180,x=.1,s=.1],-.1
|
||||
# *HEISSWASSERLEITUNG,Heißwasserleitung ---- HW ---- HW ---- HW ----
|
||||
# A,.5,-.2,["HW",STANDARD,S=.1,U=0.0,X=-0.1,Y=-.05],-.2
|
||||
# *GASLEITUNG,Gasleitung ----GAS----GAS----GAS----GAS----GAS----GAS--
|
||||
# A,.5,-.2,["GAS",STANDARD,S=.1,U=0.0,X=-0.1,Y=-.05],-.25
|
||||
# *ZICKZACK,Zickzack /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
|
||||
# A,.0001,-.2,[ZIG,ltypeshp.shx,x=-.2,s=.2],-.4,[ZIG,ltypeshp.shx,r=180,x=.2,s=.2],-.2
|
||||
from __future__ import annotations
|
||||
from typing import TYPE_CHECKING, Iterable, Sequence, Union, Any
|
||||
from ezdxf.lldxf.const import DXFValueError, DXFTableEntryError
|
||||
from ezdxf.lldxf.tags import DXFTag, Tags
|
||||
|
||||
if TYPE_CHECKING: # import forward references
|
||||
from ezdxf.document import Drawing
|
||||
|
||||
Token = Union[str, float, list]
|
||||
|
||||
|
||||
def lin_compiler(definition: str) -> Sequence[DXFTag]:
|
||||
"""
|
||||
Compiles line type definitions like 'A,.5,-.25,.5,-.25,0,-.25' or
|
||||
'A,.5,-.2,["GAS",STANDARD,S=.1,U=0.0,X=-0.1,Y=-.05],-.25' into DXFTags().
|
||||
|
||||
Args:
|
||||
definition: definition string
|
||||
|
||||
Returns:
|
||||
list of DXFTag()
|
||||
"""
|
||||
# 'A,.5,-.2,["GAS",STANDARD,S=.1,U=0.0,X=-0.1,Y=-.05],-.25'
|
||||
# ['A', .5, -.2, ['TEXT', 'GAS', 'STANDARD', 's', .1, 'u', 0.0, 'x', -.1, 'y', -.05], -.25]
|
||||
tags = []
|
||||
for token in lin_parser(definition):
|
||||
if token == "A":
|
||||
continue
|
||||
elif isinstance(token, float):
|
||||
tags.append(
|
||||
DXFTag(49, token)
|
||||
) # Dash, dot or space length (one entry per element)
|
||||
elif isinstance(token, list): # yield from
|
||||
tags.append(compile_complex_definition(token)) # type: ignore
|
||||
return tags
|
||||
|
||||
|
||||
class ComplexLineTypePart:
|
||||
def __init__(self, type_: str, value, font: str = "STANDARD"):
|
||||
self.type = type_
|
||||
self.value = value
|
||||
self.font = font
|
||||
self.tags = Tags()
|
||||
|
||||
def complex_ltype_tags(self, doc: "Drawing") -> Sequence[DXFTag]:
|
||||
def get_font_handle() -> str:
|
||||
if self.type == "SHAPE":
|
||||
# Create new shx or returns existing entry:
|
||||
font = doc.styles.get_shx(self.font)
|
||||
else:
|
||||
try:
|
||||
# Case insensitive search for text style:
|
||||
font = doc.styles.get(self.font)
|
||||
except DXFTableEntryError:
|
||||
font = doc.styles.new(self.font)
|
||||
return font.dxf.handle
|
||||
|
||||
# Note: AutoCAD/BricsCAD do NOT report an error or even crash, if the
|
||||
# text style handle is invalid!
|
||||
if doc is not None:
|
||||
handle = get_font_handle()
|
||||
else:
|
||||
handle = "0"
|
||||
tags = []
|
||||
if self.type == "TEXT":
|
||||
tags.append(DXFTag(74, 2))
|
||||
tags.append(DXFTag(75, 0))
|
||||
else: # SHAPE
|
||||
tags.append(DXFTag(74, 4))
|
||||
tags.append(DXFTag(75, self.value))
|
||||
tags.append(DXFTag(340, handle))
|
||||
tags.extend(self.tags)
|
||||
if self.type == "TEXT":
|
||||
tags.append(DXFTag(9, self.value))
|
||||
return tags
|
||||
|
||||
|
||||
CMD_CODES = {
|
||||
"s": 46, # scaling factor
|
||||
"r": 50, # rotation angle, r == u
|
||||
"u": 50, # rotation angle
|
||||
"x": 44, # shift x units = parallel to line direction
|
||||
"y": 45, # shift y units = normal to line direction
|
||||
}
|
||||
|
||||
|
||||
def compile_complex_definition(tokens: Sequence) -> ComplexLineTypePart:
|
||||
part = ComplexLineTypePart(tokens[0], tokens[1], tokens[2])
|
||||
commands = list(reversed(tokens[3:]))
|
||||
params = {}
|
||||
while len(commands):
|
||||
cmd = commands.pop()
|
||||
value = commands.pop()
|
||||
code = CMD_CODES.get(cmd, 0)
|
||||
params[code] = DXFTag(code, value)
|
||||
|
||||
for code in (46, 50, 44, 45):
|
||||
tag = params.get(code, DXFTag(code, 0.0))
|
||||
part.tags.append(tag)
|
||||
return part
|
||||
|
||||
|
||||
def lin_parser(definition: str) -> Sequence[Token]:
|
||||
bag: list[Any] = []
|
||||
sublist = None
|
||||
first = True
|
||||
for token in lin_tokenizer(definition):
|
||||
if token == "A" and first:
|
||||
bag.append(token)
|
||||
first = False
|
||||
continue
|
||||
|
||||
try:
|
||||
value = float(token) # only outside of TEXT or SHAPE definition
|
||||
bag.append(value)
|
||||
continue
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if token.startswith("["):
|
||||
if sublist is not None:
|
||||
raise DXFValueError(
|
||||
"Complex line type error. {}".format(definition)
|
||||
)
|
||||
sublist = []
|
||||
if token.startswith('["'):
|
||||
sublist.append("TEXT")
|
||||
sublist.append(
|
||||
token[2:-1]
|
||||
) # text without surrounding '["' and '"'
|
||||
else:
|
||||
sublist.append("SHAPE")
|
||||
try:
|
||||
sublist.append(int(token[1:])) # type: ignore # shape index! required
|
||||
except ValueError:
|
||||
raise DXFValueError(
|
||||
"Complex line type with shapes requires shape index not shape name!"
|
||||
)
|
||||
else:
|
||||
_token = token.rstrip("]")
|
||||
subtokens = _token.split("=")
|
||||
if len(subtokens) == 2:
|
||||
sublist.append(subtokens[0].lower())
|
||||
sublist.append(float(subtokens[1])) # type: ignore
|
||||
else:
|
||||
sublist.append(_token)
|
||||
if token.endswith("]"):
|
||||
if sublist is None:
|
||||
raise DXFValueError(
|
||||
"Complex line type error. {}".format(definition)
|
||||
)
|
||||
bag.append(sublist)
|
||||
sublist = None # type: ignore
|
||||
return bag
|
||||
|
||||
|
||||
def lin_tokenizer(definition: str) -> Iterable[str]:
|
||||
token = ""
|
||||
escape = False
|
||||
for char in definition:
|
||||
if char == "," and not escape:
|
||||
yield token.strip()
|
||||
token = ""
|
||||
continue
|
||||
token += char
|
||||
if char == '"':
|
||||
escape = not escape
|
||||
if escape:
|
||||
raise DXFValueError("Line type parsing error: '{}'".format(definition))
|
||||
if token:
|
||||
yield token.strip()
|
||||
@@ -0,0 +1,58 @@
|
||||
# Purpose: decode/encode DXF proprietary data
|
||||
# Created: 01.05.2014
|
||||
# Copyright (c) 2014-2018, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from typing import Iterable
|
||||
|
||||
_decode_table = {
|
||||
0x20: ' ',
|
||||
0x40: '_',
|
||||
0x5F: '@',
|
||||
}
|
||||
for c in range(0x41, 0x5F):
|
||||
_decode_table[c] = chr(0x41 + (0x5E - c)) # 0x5E -> 'A', 0x5D->'B', ...
|
||||
|
||||
|
||||
def decode(text_lines: Iterable[str]) -> Iterable[str]:
|
||||
""" Decode the Standard :term:`ACIS` Text (SAT) format "encrypted" by AutoCAD. """
|
||||
def _decode(text):
|
||||
dectab = _decode_table # fast local var
|
||||
s = []
|
||||
text = bytes(text, 'ascii')
|
||||
skip = False
|
||||
for c in text:
|
||||
if skip:
|
||||
skip = False
|
||||
continue
|
||||
if c in dectab:
|
||||
s += dectab[c]
|
||||
skip = (c == 0x5E) # skip space after 'A'
|
||||
else:
|
||||
s += chr(c ^ 0x5F)
|
||||
return ''.join(s)
|
||||
return (_decode(line) for line in text_lines)
|
||||
|
||||
|
||||
_encode_table = {
|
||||
' ': ' ', # 0x20
|
||||
'_': '@', # 0x40
|
||||
'@': '_', # 0x5F
|
||||
}
|
||||
for c in range(0x41, 0x5F):
|
||||
_encode_table[chr(c)] = chr(0x5E - (c - 0x41)) # 0x5E->'A', 'B'->0x5D, ...
|
||||
|
||||
|
||||
def encode(text_lines: Iterable[str]) -> Iterable[str]:
|
||||
""" Encode the Standard :term:`ACIS` Text (SAT) format by AutoCAD "encryption" algorithm. """
|
||||
def _encode(text):
|
||||
s = []
|
||||
enctab = _encode_table # fast local var
|
||||
for c in text:
|
||||
if c in enctab:
|
||||
s += enctab[c]
|
||||
if c == 'A':
|
||||
s += ' ' # append a space for an 'A' -> cryptography
|
||||
else:
|
||||
s += chr(ord(c) ^ 0x5F)
|
||||
return ''.join(s)
|
||||
return (_encode(line) for line in text_lines)
|
||||
@@ -0,0 +1,54 @@
|
||||
# Copyright (c) 2021, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from typing import Sequence
|
||||
|
||||
|
||||
def group_chars(s: str, n: int = 4, sep="-") -> str:
|
||||
chars = []
|
||||
for index, char in enumerate(reversed(s)):
|
||||
if index % n == 0:
|
||||
chars.append(sep)
|
||||
chars.append(char)
|
||||
if chars:
|
||||
chars.reverse()
|
||||
chars.pop()
|
||||
return "".join(chars)
|
||||
return ""
|
||||
|
||||
|
||||
def bitmask_strings(
|
||||
value: int, base: int = 10, sep: str = "-"
|
||||
) -> Sequence[str]:
|
||||
if base == 10:
|
||||
top, bottom = (
|
||||
"3322-2222-2222-1111-1111-1100-0000-0000",
|
||||
"1098-7654-3210-9876-5432-1098-7654-3210",
|
||||
)
|
||||
elif base == 16:
|
||||
top, bottom = (
|
||||
"1111-1111-1111-1111-0000-0000-0000-0000",
|
||||
"FEDC-BA98-7654-3210-FEDC-BA98-7654-3210",
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"invalid base {base}, valid bases: 10, 16")
|
||||
top = top.replace("-", sep)
|
||||
bottom = bottom.replace("-", sep)
|
||||
l0 = len(top)
|
||||
bin_str = group_chars(bin(value)[2:], n=4, sep=sep)
|
||||
l1 = len(bin_str)
|
||||
return [
|
||||
top[l0 - l1 :],
|
||||
bottom[l0 - l1 :],
|
||||
bin_str,
|
||||
]
|
||||
|
||||
|
||||
def print_bitmask(value: int, *, base=10, sep="-"):
|
||||
lines = bitmask_strings(value, base, sep)
|
||||
assert len(lines) > 2
|
||||
divider_line = "=" * (max(map(len, lines)) + 4)
|
||||
print(divider_line)
|
||||
print("x0 :" + lines[0])
|
||||
print("0x :" + lines[1])
|
||||
print(divider_line)
|
||||
print("bin:" + lines[2])
|
||||
@@ -0,0 +1,80 @@
|
||||
# Copyright (c) 2021-2022, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import Iterator, NamedTuple, Iterable, Optional
|
||||
from difflib import SequenceMatcher
|
||||
import enum
|
||||
|
||||
from ezdxf.lldxf.tags import Tags
|
||||
from ezdxf.lldxf.types import DXFVertex, DXFTag
|
||||
|
||||
__all__ = ["diff_tags", "print_diff", "OpCode"]
|
||||
|
||||
# https://docs.python.org/3/library/difflib.html
|
||||
|
||||
|
||||
class OpCode(enum.Enum):
|
||||
replace = enum.auto()
|
||||
delete = enum.auto()
|
||||
insert = enum.auto()
|
||||
equal = enum.auto()
|
||||
|
||||
|
||||
class Operation(NamedTuple):
|
||||
opcode: OpCode
|
||||
i1: int
|
||||
i2: int
|
||||
j1: int
|
||||
j2: int
|
||||
|
||||
|
||||
CONVERT = {
|
||||
"replace": OpCode.replace,
|
||||
"delete": OpCode.delete,
|
||||
"insert": OpCode.insert,
|
||||
"equal": OpCode.equal,
|
||||
}
|
||||
|
||||
|
||||
def convert_opcodes(opcodes) -> Iterator[Operation]:
|
||||
for tag, i1, i2, j1, j2 in opcodes:
|
||||
yield Operation(CONVERT[tag], i1, i2, j1, j2)
|
||||
|
||||
|
||||
def round_tags(tags: Tags, ndigits: int) -> Iterator[DXFTag]:
|
||||
for tag in tags:
|
||||
if isinstance(tag, DXFVertex):
|
||||
yield DXFVertex(tag.code, (round(d, ndigits) for d in tag.value))
|
||||
elif isinstance(tag.value, float):
|
||||
yield DXFTag(tag.code, round(tag.value, ndigits))
|
||||
else:
|
||||
yield tag
|
||||
|
||||
|
||||
def diff_tags(
|
||||
a: Tags, b: Tags, ndigits: Optional[int] = None
|
||||
) -> Iterator[Operation]:
|
||||
if ndigits is not None:
|
||||
a = Tags(round_tags(a, ndigits))
|
||||
b = Tags(round_tags(b, ndigits))
|
||||
|
||||
sequencer = SequenceMatcher(a=a, b=b)
|
||||
return convert_opcodes(sequencer.get_opcodes())
|
||||
|
||||
|
||||
def print_diff(a: Tags, b: Tags, operations: Iterable[Operation]):
|
||||
t1: DXFTag
|
||||
t2: DXFTag
|
||||
print()
|
||||
for op in operations:
|
||||
if op.opcode == OpCode.insert:
|
||||
for t1 in b[op.j1 : op.j2]:
|
||||
print(f"insert a[{op.i1}:{op.i2}]: {t1}")
|
||||
elif op.opcode == OpCode.replace:
|
||||
for index, t1 in enumerate(a[op.i1 : op.i2], op.i1):
|
||||
print(f"replace a[{index}]: {t1}")
|
||||
for index, t2 in enumerate(b[op.j1 : op.j2], op.j1):
|
||||
print(f" by b[{index}]: {t2}")
|
||||
elif op.opcode == OpCode.delete:
|
||||
for index, t1 in enumerate(a[op.i1 : op.i2], op.i1):
|
||||
print(f"delete a[{index}]: {t1}")
|
||||
@@ -0,0 +1,48 @@
|
||||
# Copyright (c) 2011-2023, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from ezdxf.lldxf.types import is_valid_handle
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ezdxf.document import Drawing
|
||||
|
||||
START_HANDLE = "1"
|
||||
|
||||
|
||||
class HandleGenerator:
|
||||
def __init__(self, start_value: str = START_HANDLE):
|
||||
self._handle: int = max(1, int(start_value, 16))
|
||||
|
||||
reset = __init__
|
||||
|
||||
def __str__(self):
|
||||
return "%X" % self._handle
|
||||
|
||||
def next(self) -> str:
|
||||
next_handle = self.__str__()
|
||||
self._handle += 1
|
||||
return next_handle
|
||||
|
||||
__next__ = next
|
||||
|
||||
def copy(self) -> HandleGenerator:
|
||||
return HandleGenerator(str(self))
|
||||
|
||||
|
||||
class UnderlayKeyGenerator(HandleGenerator):
|
||||
def __str__(self):
|
||||
return "Underlay%05d" % self._handle
|
||||
|
||||
|
||||
def safe_handle(handle: Optional[str], doc: Optional["Drawing"] = None) -> str:
|
||||
if handle is None:
|
||||
return "0"
|
||||
assert isinstance(handle, str), "invalid type"
|
||||
if doc is not None:
|
||||
if handle not in doc.entitydb:
|
||||
return "0"
|
||||
return handle
|
||||
if not is_valid_handle(handle):
|
||||
return "0"
|
||||
return handle.upper()
|
||||
@@ -0,0 +1,29 @@
|
||||
# created: 23.04.2018
|
||||
# Copyright (c) 2018 Manfred Moitzi
|
||||
# License: MIT License
|
||||
|
||||
from typing import Iterable
|
||||
|
||||
|
||||
class Index:
|
||||
def __init__(self, item):
|
||||
try:
|
||||
self.length = len(item)
|
||||
except TypeError:
|
||||
self.length = int(item)
|
||||
|
||||
def index(self, item: int, error=None) -> int:
|
||||
if item < 0:
|
||||
result = self.length + int(item)
|
||||
else:
|
||||
result = int(item)
|
||||
if error and not (0 <= result < self.length):
|
||||
raise error('index out of range')
|
||||
return result
|
||||
|
||||
def slicing(self, *args) -> Iterable[int]:
|
||||
if isinstance(args[0], slice):
|
||||
s = args[0]
|
||||
else:
|
||||
s = slice(*args)
|
||||
return range(*s.indices(self.length))
|
||||
@@ -0,0 +1,71 @@
|
||||
# Copyright (c) 2011-2022, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from math import floor
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def frac(number: float) -> float:
|
||||
return number - floor(number)
|
||||
|
||||
|
||||
class JulianDate:
|
||||
def __init__(self, date: datetime):
|
||||
self.date = date
|
||||
self.result: float = self.julian_date() + self.fractional_day()
|
||||
|
||||
def fractional_day(self) -> float:
|
||||
seconds = (
|
||||
self.date.hour * 3600.0 + self.date.minute * 60.0 + self.date.second
|
||||
)
|
||||
return seconds / 86400.0
|
||||
|
||||
def julian_date(self) -> float:
|
||||
y = self.date.year + (float(self.date.month) - 2.85) / 12.0
|
||||
A = floor(367.0 * y) - 1.75 * floor(y) + self.date.day
|
||||
B = floor(A) - 0.75 * floor(y / 100.0)
|
||||
return floor(B) + 1721115.0
|
||||
|
||||
|
||||
class CalendarDate:
|
||||
def __init__(self, juliandate: float):
|
||||
self.jdate = juliandate
|
||||
year, month, day = self.get_date()
|
||||
hour, minute, second = frac2time(self.jdate)
|
||||
self.result = datetime(year, month, day, hour, minute, second)
|
||||
|
||||
def get_date(self) -> tuple[int, int, int]:
|
||||
Z = floor(self.jdate)
|
||||
|
||||
if Z < 2299161:
|
||||
A = Z # julian calendar
|
||||
else:
|
||||
g = floor((Z - 1867216.25) / 36524.25) # gregorian calendar
|
||||
A = Z + 1 + g - floor(g / 4.0)
|
||||
|
||||
B = A + 1524.
|
||||
C = floor((B - 122.1) / 365.25)
|
||||
D = floor(365.25 * C)
|
||||
E = floor((B - D) / 30.6001)
|
||||
|
||||
day = B - D - floor(30.6001 * E)
|
||||
month = E - 1 if E < 14 else E - 13
|
||||
year = C - 4716 if month > 2 else C - 4715
|
||||
return int(year), int(month), int(day)
|
||||
|
||||
|
||||
def frac2time(jdate) -> tuple[int, int, int]:
|
||||
seconds = int(frac(jdate) * 86400.0)
|
||||
hour = int(seconds / 3600)
|
||||
seconds = seconds % 3600
|
||||
minute = int(seconds / 60)
|
||||
second = seconds % 60
|
||||
return hour, minute, second
|
||||
|
||||
|
||||
def juliandate(date: datetime) -> float:
|
||||
return JulianDate(date).result
|
||||
|
||||
|
||||
def calendardate(juliandate: float) -> datetime:
|
||||
return CalendarDate(juliandate).result
|
||||
@@ -0,0 +1,190 @@
|
||||
# Copyright (c) 2015-2022, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import Sequence, Tuple, Optional
|
||||
from typing_extensions import TypeAlias
|
||||
from ezdxf.math import Vec2
|
||||
from ._iso_pattern import ISO_PATTERN
|
||||
|
||||
# Predefined hatch pattern prior to ezdxf v0.11 were scaled for imperial units,
|
||||
# and were too small for ISO units by a factor of 1/25.4, to replicate this
|
||||
# pattern scaling use load(measurement=0).
|
||||
|
||||
__all__ = [
|
||||
"load",
|
||||
"scale_pattern",
|
||||
"scale_all",
|
||||
"parse",
|
||||
"ISO_PATTERN",
|
||||
"IMPERIAL_PATTERN",
|
||||
"HatchPatternLineType",
|
||||
"HatchPatternType",
|
||||
"PatternAnalyser",
|
||||
]
|
||||
IMPERIAL_SCALE_FACTOR = 1.0 / 25.4
|
||||
HatchPatternLineType: TypeAlias = Tuple[
|
||||
float, Sequence[float], Sequence[float], Sequence[float]
|
||||
]
|
||||
HatchPatternType: TypeAlias = Sequence[HatchPatternLineType]
|
||||
|
||||
|
||||
def load(measurement: int = 1, factor: Optional[float] = None):
|
||||
"""Load hatch pattern definition, default scaling is like the iso.pat of
|
||||
BricsCAD, set `measurement` to 0 to use the imperial (US) scaled pattern,
|
||||
which has a scaling factor of 1/25.4 = ~0.03937.
|
||||
|
||||
Args:
|
||||
measurement: like the $MEASUREMENT header variable, 0 to user imperial
|
||||
scaled pattern, 1 to use ISO scaled pattern.
|
||||
factor: hatch pattern scaling factor, overrides `measurement`
|
||||
|
||||
Returns: hatch pattern dict of scaled pattern
|
||||
|
||||
"""
|
||||
if factor is None:
|
||||
factor = 1.0 if measurement == 1 else IMPERIAL_SCALE_FACTOR
|
||||
pattern = ISO_PATTERN
|
||||
if factor != 1.0:
|
||||
pattern = scale_all(pattern, factor=factor)
|
||||
return pattern
|
||||
|
||||
|
||||
def scale_pattern(
|
||||
pattern: HatchPatternType, factor: float = 1, angle: float = 0
|
||||
) -> HatchPatternType:
|
||||
ndigits = 10
|
||||
|
||||
def _scale(iterable) -> Sequence[float]:
|
||||
return [round(i * factor, ndigits) for i in iterable]
|
||||
|
||||
def _scale_line(line) -> HatchPatternLineType:
|
||||
angle0, base_point, offset, dash_length_items = line
|
||||
if angle:
|
||||
base_point = Vec2(base_point).rotate_deg(angle)
|
||||
offset = Vec2(offset).rotate_deg(angle)
|
||||
angle0 = (angle0 + angle) % 360.0
|
||||
|
||||
# noinspection PyTypeChecker
|
||||
return [ # type: ignore
|
||||
round(angle0, ndigits),
|
||||
tuple(_scale(base_point)),
|
||||
tuple(_scale(offset)),
|
||||
_scale(dash_length_items),
|
||||
]
|
||||
|
||||
return [_scale_line(line) for line in pattern]
|
||||
|
||||
|
||||
def scale_all(pattern: dict, factor: float = 1, angle: float = 0):
|
||||
return {name: scale_pattern(p, factor, angle) for name, p in pattern.items()}
|
||||
|
||||
|
||||
def parse(pattern: str) -> dict:
|
||||
try:
|
||||
comp = PatternFileCompiler(pattern)
|
||||
return comp.compile_pattern()
|
||||
except Exception:
|
||||
raise ValueError("Incompatible pattern definition.")
|
||||
|
||||
|
||||
def _tokenize_pattern_line(line: str) -> list:
|
||||
return line.split(",", maxsplit=1 if line.startswith("*") else -1)
|
||||
|
||||
|
||||
class PatternFileCompiler:
|
||||
def __init__(self, content: str):
|
||||
self._lines = [
|
||||
_tokenize_pattern_line(line)
|
||||
for line in (line.strip() for line in content.split("\n"))
|
||||
if line and line[0] != ";"
|
||||
]
|
||||
|
||||
def _parse_pattern(self):
|
||||
pattern = []
|
||||
for line in self._lines:
|
||||
if line[0].startswith("*"):
|
||||
if pattern:
|
||||
yield pattern
|
||||
pattern = [[line[0][1:], line[1]]] # name, description
|
||||
else:
|
||||
pattern.append([float(e) for e in line]) # list[floats]
|
||||
|
||||
if pattern:
|
||||
yield pattern
|
||||
|
||||
def compile_pattern(self, ndigits: int = 10) -> dict:
|
||||
pattern = dict()
|
||||
for p in self._parse_pattern():
|
||||
pat = []
|
||||
for line in p[1:]:
|
||||
# offset before rounding:
|
||||
offset = Vec2(line[3], line[4])
|
||||
|
||||
# round all values:
|
||||
line = [round(e, ndigits) for e in line]
|
||||
pat_line = []
|
||||
|
||||
angle = line[0]
|
||||
pat_line.append(angle)
|
||||
|
||||
# base point:
|
||||
pat_line.append((line[1], line[2]))
|
||||
|
||||
# rotate offset:
|
||||
offset = offset.rotate_deg(angle)
|
||||
pat_line.append((round(offset.x, ndigits), round(offset.y, ndigits)))
|
||||
|
||||
# line dash pattern
|
||||
pat_line.append(line[5:])
|
||||
pat.append(pat_line)
|
||||
pattern[p[0][0]] = pat
|
||||
return pattern
|
||||
|
||||
|
||||
IMPERIAL_PATTERN = load(measurement=0)
|
||||
|
||||
|
||||
def is_solid(pattern: Sequence[float]) -> bool:
|
||||
return not bool(len(pattern))
|
||||
|
||||
|
||||
def round_angle_15_deg(angle: float) -> int:
|
||||
return round((angle % 180) / 15) * 15
|
||||
|
||||
|
||||
class PatternAnalyser:
|
||||
def __init__(self, pattern: HatchPatternType):
|
||||
# List of 2-tuples: (angle, is solid line pattern)
|
||||
# angle is rounded to a multiple of 15° in the range [0, 180)
|
||||
self._lines: list[tuple[int, bool]] = [
|
||||
(round_angle_15_deg(angle), is_solid(line_pattern))
|
||||
for angle, _, _, line_pattern in pattern
|
||||
]
|
||||
|
||||
def has_angle(self, angle: int) -> bool:
|
||||
return any(angle_ == angle for angle_, _ in self._lines)
|
||||
|
||||
def all_angles(self, angle: int) -> bool:
|
||||
return all(angle_ == angle for angle_, _ in self._lines)
|
||||
|
||||
def has_line(self, angle: int, solid: bool) -> bool:
|
||||
return any(
|
||||
angle_ == angle and solid_ == solid for angle_, solid_ in self._lines
|
||||
)
|
||||
|
||||
def all_lines(self, angle: int, solid: bool) -> bool:
|
||||
return all(
|
||||
angle_ == angle and solid_ == solid for angle_, solid_ in self._lines
|
||||
)
|
||||
|
||||
def has_solid_line(self) -> bool:
|
||||
return any(solid for _, solid in self._lines)
|
||||
|
||||
def has_dashed_line(self) -> bool:
|
||||
return any(not solid for _, solid in self._lines)
|
||||
|
||||
def all_solid_lines(self) -> bool:
|
||||
return all(solid for _, solid in self._lines)
|
||||
|
||||
def all_dashed_lines(self) -> bool:
|
||||
return all(not solid for _, solid in self._lines)
|
||||
@@ -0,0 +1,36 @@
|
||||
# Copyright (c) 2021-2022, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import Union, Iterable, TYPE_CHECKING
|
||||
from pathlib import Path
|
||||
|
||||
from ezdxf.lldxf import loader
|
||||
from ezdxf.lldxf.types import DXFTag
|
||||
from ezdxf.lldxf.tagger import ascii_tags_loader
|
||||
from ezdxf.lldxf.validator import is_dxf_file
|
||||
from ezdxf.filemanagement import dxf_file_info
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ezdxf.eztypes import SectionDict
|
||||
|
||||
|
||||
def raw_structure_loader(filename: Union[str, Path]) -> SectionDict:
|
||||
"""Load content of ASCII DXF file `filename` as SectionDict, all tags are in
|
||||
raw format with the group code as integer and the value as string: (int, str).
|
||||
|
||||
"""
|
||||
tagger = get_tag_loader(filename)
|
||||
return loader.load_dxf_structure(tagger)
|
||||
|
||||
|
||||
def get_tag_loader(
|
||||
filename: Union[str, Path], errors: str = "ignore"
|
||||
) -> Iterable[DXFTag]:
|
||||
|
||||
filename = str(filename)
|
||||
if not is_dxf_file(filename):
|
||||
raise IOError(f"File '{filename}' is not an ASCII DXF file.")
|
||||
|
||||
info = dxf_file_info(filename)
|
||||
with open(filename, mode="rt", encoding=info.encoding, errors=errors) as fp:
|
||||
return list(ascii_tags_loader(fp, skip_comments=True))
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,192 @@
|
||||
# Copyright (c) 2021-2022, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import BinaryIO, Optional, Sequence
|
||||
from ezdxf.lldxf.validator import is_dxf_file, DXFStructureError
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class TagWriter:
|
||||
def __init__(self, fp: BinaryIO):
|
||||
self.fp = fp
|
||||
|
||||
def write(self, raw_code_str: bytes, raw_value_str: bytes):
|
||||
self.fp.write(raw_code_str)
|
||||
self.fp.write(raw_value_str)
|
||||
|
||||
|
||||
class ThumbnailRemover(TagWriter):
|
||||
def __init__(self, fp: BinaryIO):
|
||||
super().__init__(fp)
|
||||
self._start_section = False
|
||||
self._skip_tags = False
|
||||
self._section_code: Optional[bytes] = None
|
||||
self._section_value: Optional[bytes] = None
|
||||
self.removed_thumbnail_image = False
|
||||
|
||||
def write(self, raw_code_str: bytes, raw_value_str: bytes):
|
||||
code = raw_code_str.strip()
|
||||
value = raw_value_str.strip()
|
||||
if self._start_section:
|
||||
self._start_section = False
|
||||
if code == b"2" and value == b"THUMBNAILIMAGE":
|
||||
self._skip_tags = True
|
||||
self.removed_thumbnail_image = True
|
||||
else:
|
||||
# write buffered section tag:
|
||||
super().write(self._section_code, self._section_value) # type: ignore
|
||||
|
||||
if code == b"0":
|
||||
if value == b"SECTION":
|
||||
self._start_section = True
|
||||
self._skip_tags = False
|
||||
# buffer section tag:
|
||||
self._section_code = raw_code_str
|
||||
self._section_value = raw_value_str
|
||||
return
|
||||
elif value == b"ENDSEC":
|
||||
skip = self._skip_tags
|
||||
self._skip_tags = False
|
||||
if skip: # don't write ENDSEC
|
||||
return
|
||||
|
||||
if not self._skip_tags:
|
||||
super().write(raw_code_str, raw_value_str)
|
||||
|
||||
|
||||
def strip_tags(
|
||||
infile: BinaryIO,
|
||||
tagwriter: TagWriter,
|
||||
codes: Sequence[int],
|
||||
verbose=False,
|
||||
) -> int:
|
||||
search_codes = set(codes)
|
||||
line_number: int = 1
|
||||
removed_tags: int = 0
|
||||
while True:
|
||||
try:
|
||||
raw_code_str = infile.readline()
|
||||
except EOFError:
|
||||
raw_code_str = b""
|
||||
if raw_code_str == b"": # regular end of file
|
||||
return removed_tags
|
||||
try:
|
||||
code = int(raw_code_str)
|
||||
except ValueError:
|
||||
code_str = raw_code_str.strip().decode(encoding="utf8", errors="ignore")
|
||||
raise DXFStructureError(
|
||||
f'CANCELED: "{infile.name}" - found invalid '
|
||||
f'group code "{code_str}" at line {line_number}'
|
||||
)
|
||||
|
||||
try:
|
||||
raw_value_str = infile.readline()
|
||||
except EOFError:
|
||||
raw_value_str = b""
|
||||
|
||||
if raw_value_str == b"":
|
||||
raise DXFStructureError(
|
||||
f'CANCELED: "{infile.name}" - premature end of file'
|
||||
)
|
||||
line_number += 2
|
||||
if code not in search_codes:
|
||||
tagwriter.write(raw_code_str, raw_value_str)
|
||||
else:
|
||||
if verbose:
|
||||
value = raw_value_str.strip()
|
||||
_value = value.decode(encoding="utf8", errors="ignore")
|
||||
print(f'removing tag: ({code}, "{_value}")')
|
||||
removed_tags += 1
|
||||
|
||||
|
||||
def safe_rename(source: Path, target: Path, backup=True, verbose=False) -> bool:
|
||||
backup_file = target.with_suffix(".bak")
|
||||
backup_file.unlink(missing_ok=True)
|
||||
_target = Path(target)
|
||||
if _target.exists():
|
||||
if verbose:
|
||||
print(f'renaming "{_target.name}" to "{backup_file.name}"')
|
||||
try:
|
||||
_target.rename(backup_file)
|
||||
except IOError as e:
|
||||
print(f"IOError: {str(e)}")
|
||||
return False
|
||||
|
||||
if verbose:
|
||||
print(f'renaming "{source.name}" to "{target.name}"')
|
||||
try:
|
||||
source.rename(target)
|
||||
except IOError as e:
|
||||
print(f"IOError: {str(e)}")
|
||||
return False
|
||||
|
||||
if not backup:
|
||||
if verbose:
|
||||
print(f'deleting backup file "{backup_file.name}"')
|
||||
backup_file.unlink(missing_ok=True)
|
||||
return True
|
||||
|
||||
|
||||
DEFAULT_CODES = (999,)
|
||||
|
||||
|
||||
def strip(
|
||||
filename: str,
|
||||
backup=False,
|
||||
thumbnail=False,
|
||||
verbose=False,
|
||||
codes: Sequence[int] = DEFAULT_CODES,
|
||||
):
|
||||
def remove_tmp_file():
|
||||
if tmp_file.exists():
|
||||
if verbose:
|
||||
print(f'deleting temp file: "{tmp_file.name}"')
|
||||
tmp_file.unlink(missing_ok=True)
|
||||
|
||||
if verbose:
|
||||
print(f'\nProcessing file: "{filename}"')
|
||||
try:
|
||||
if not is_dxf_file(filename):
|
||||
print(
|
||||
f'CANCELED: "{filename}" is not a DXF file, binary DXF files '
|
||||
f"are not supported"
|
||||
)
|
||||
return
|
||||
except IOError as e:
|
||||
print(f"IOError: {str(e)}")
|
||||
return
|
||||
source_file = Path(filename)
|
||||
tmp_file = source_file.with_suffix(".ezdxf.tmp")
|
||||
error = False
|
||||
tagwriter: TagWriter
|
||||
if verbose:
|
||||
print(f'make a temporary copy: "{tmp_file.name}"')
|
||||
with open(tmp_file, "wb") as fp, open(source_file, "rb") as infile:
|
||||
if thumbnail:
|
||||
tagwriter = ThumbnailRemover(fp)
|
||||
else:
|
||||
tagwriter = TagWriter(fp)
|
||||
try:
|
||||
removed_tags = strip_tags(infile, tagwriter, codes=codes, verbose=verbose)
|
||||
except IOError as e:
|
||||
print(f"IOError: {str(e)}")
|
||||
error = True
|
||||
except DXFStructureError as e:
|
||||
print(str(e))
|
||||
error = True
|
||||
|
||||
if not error:
|
||||
rename = False
|
||||
if thumbnail and tagwriter.removed_thumbnail_image: # type: ignore
|
||||
print(f'"{source_file.name}" - removed THUMBNAILIMAGE section')
|
||||
rename = True
|
||||
|
||||
if removed_tags > 0:
|
||||
tags = "tag" if removed_tags == 1 else "tags"
|
||||
print(f'"{source_file.name}" - {removed_tags} {tags} removed')
|
||||
rename = True
|
||||
|
||||
if rename:
|
||||
safe_rename(tmp_file, source_file, backup, verbose)
|
||||
|
||||
remove_tmp_file()
|
||||
@@ -0,0 +1,50 @@
|
||||
# Copyright (c) 2011-2022, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import Sequence, TYPE_CHECKING, Iterable
|
||||
from ezdxf.lldxf.tagger import internal_tag_compiler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ezdxf.lldxf.types import DXFTag
|
||||
from ezdxf.lldxf.extendedtags import ExtendedTags
|
||||
|
||||
|
||||
def compile_tags_without_handles(text: str) -> Iterable[DXFTag]:
|
||||
return (
|
||||
tag for tag in internal_tag_compiler(text) if tag.code not in (5, 105)
|
||||
)
|
||||
|
||||
|
||||
def normlines(text: str) -> Sequence[str]:
|
||||
lines = text.split("\n")
|
||||
return [line.strip() for line in lines]
|
||||
|
||||
|
||||
def load_section(text: str, name: str) -> list[ExtendedTags]:
|
||||
from ezdxf.lldxf.loader import load_dxf_structure
|
||||
|
||||
dxf = load_dxf_structure(
|
||||
internal_tag_compiler(text), ignore_missing_eof=True
|
||||
)
|
||||
return dxf[name] # type: ignore
|
||||
|
||||
|
||||
def load_entities(text: str, name: str):
|
||||
from ezdxf.lldxf.loader import load_dxf_structure, load_dxf_entities
|
||||
|
||||
dxf = load_dxf_structure(
|
||||
internal_tag_compiler(text), ignore_missing_eof=True
|
||||
)
|
||||
return load_dxf_entities(dxf[name]) # type: ignore
|
||||
|
||||
|
||||
def parse_hex_dump(txt: str) -> bytes:
|
||||
b = bytearray()
|
||||
lines = txt.split("\n")
|
||||
for line in lines:
|
||||
if line == "":
|
||||
continue
|
||||
data = [int(v, 16) for v in line.strip().split(" ")]
|
||||
assert data[0] == len(b)
|
||||
b.extend(data[1:])
|
||||
return b
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,197 @@
|
||||
# Copyright (c) 2021-2023, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import Sequence, Optional
|
||||
from dataclasses import dataclass
|
||||
|
||||
from ezdxf.math import Matrix44, Vec2
|
||||
from ezdxf.entities import Text, MText, get_font_name
|
||||
from ezdxf.fonts import fonts
|
||||
from ezdxf.tools import text_layout as tl
|
||||
from ezdxf.tools.text import MTextContext
|
||||
from ezdxf.render.abstract_mtext_renderer import AbstractMTextRenderer
|
||||
from ezdxf.tools.text import estimate_mtext_extents
|
||||
|
||||
__all__ = [
|
||||
"text_size",
|
||||
"mtext_size",
|
||||
"TextSize",
|
||||
"MTextSize",
|
||||
"WordSizeDetector",
|
||||
# estimate_mtext_extents() belongs also to the topic of this module, users
|
||||
# may look here first
|
||||
"estimate_mtext_extents",
|
||||
]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class TextSize:
|
||||
width: float
|
||||
# The text entity has a fixed font:
|
||||
cap_height: float # height of "X" without descender
|
||||
total_height: float # including the descender
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class MTextSize:
|
||||
total_width: float
|
||||
total_height: float
|
||||
column_width: float
|
||||
gutter_width: float
|
||||
column_heights: Sequence[float]
|
||||
|
||||
# Storing additional font metrics like "cap_height" makes no sense, because
|
||||
# the font metrics can be variable by using inline codes to vary the text
|
||||
# height or the width factor or even changing the used font at all.
|
||||
@property
|
||||
def column_count(self) -> int:
|
||||
return len(self.column_heights)
|
||||
|
||||
|
||||
def text_size(text: Text) -> TextSize:
|
||||
"""Returns the measured text width, the font cap-height and the font
|
||||
total-height for a :class:`~ezdxf.entities.Text` entity.
|
||||
This function uses the optional `Matplotlib` package if available to measure
|
||||
the final rendering width and font-height for the :class:`Text` entity as
|
||||
close as possible. This function does not measure the real char height!
|
||||
Without access to the `Matplotlib` package the
|
||||
:class:`~ezdxf.tools.fonts.MonospaceFont` is used and the measurements are
|
||||
very inaccurate.
|
||||
|
||||
See the :mod:`~ezdxf.addons.text2path` add-on for more tools to work
|
||||
with the text path objects created by the `Matplotlib` package.
|
||||
|
||||
"""
|
||||
width_factor: float = text.dxf.get_default("width")
|
||||
text_width: float = 0.0
|
||||
cap_height: float = text.dxf.get_default("height")
|
||||
font: fonts.AbstractFont = fonts.MonospaceFont(cap_height, width_factor)
|
||||
if text.doc is not None:
|
||||
font_name = get_font_name(text)
|
||||
font = fonts.make_font(font_name, cap_height, width_factor)
|
||||
|
||||
total_height = font.measurements.total_height
|
||||
content = text.plain_text()
|
||||
if content:
|
||||
text_width = font.text_width(content)
|
||||
return TextSize(text_width, cap_height, total_height)
|
||||
|
||||
|
||||
def mtext_size(
|
||||
mtext: MText, tool: Optional[MTextSizeDetector] = None
|
||||
) -> MTextSize:
|
||||
"""Returns the total-width, -height and columns information for a
|
||||
:class:`~ezdxf.entities.MText` entity.
|
||||
|
||||
This function uses the optional `Matplotlib` package if available to do
|
||||
font measurements and the internal text layout engine to determine the final
|
||||
rendering size for the :class:`MText` entity as close as possible.
|
||||
Without access to the `Matplotlib` package the :class:`~ezdxf.tools.fonts.MonospaceFont`
|
||||
is used and the measurements are very inaccurate.
|
||||
|
||||
Attention: The required full layout calculation is slow!
|
||||
|
||||
The first call to this function with `Matplotlib` support is very slow,
|
||||
because `Matplotlib` lookup all available fonts on the system. To speedup
|
||||
the calculation and accepting inaccurate results you can disable the
|
||||
`Matplotlib` support manually::
|
||||
|
||||
ezdxf.option.use_matplotlib = False
|
||||
|
||||
"""
|
||||
tool = tool or MTextSizeDetector()
|
||||
column_heights: list[float] = [0.0]
|
||||
gutter_width = 0.0
|
||||
column_width = 0.0
|
||||
if mtext.text:
|
||||
columns: list[tl.Column] = list(tool.measure(mtext))
|
||||
if len(columns):
|
||||
first_column = columns[0]
|
||||
# same values for all columns
|
||||
column_width = first_column.total_width
|
||||
gutter_width = first_column.gutter
|
||||
column_heights = [column.total_height for column in columns]
|
||||
|
||||
count = len(column_heights)
|
||||
return MTextSize(
|
||||
total_width=column_width * count + gutter_width * (count - 1),
|
||||
total_height=max(column_heights),
|
||||
column_width=column_width,
|
||||
gutter_width=gutter_width,
|
||||
column_heights=tuple(column_heights),
|
||||
)
|
||||
|
||||
|
||||
class MTextSizeDetector(AbstractMTextRenderer):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.do_nothing = tl.DoNothingRenderer()
|
||||
self.renderer = self.do_nothing
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def word(self, text: str, ctx: MTextContext) -> tl.ContentCell:
|
||||
return tl.Text(
|
||||
# The first call to get_font() is very slow!
|
||||
width=self.get_font(ctx).text_width(text),
|
||||
height=ctx.cap_height,
|
||||
valign=tl.CellAlignment(ctx.align),
|
||||
renderer=self.renderer,
|
||||
)
|
||||
|
||||
def fraction(self, data: tuple, ctx: MTextContext) -> tl.ContentCell:
|
||||
upr, lwr, type_ = data
|
||||
if type_:
|
||||
return tl.Fraction(
|
||||
top=self.word(upr, ctx),
|
||||
bottom=self.word(lwr, ctx),
|
||||
stacking=self.get_stacking(type_),
|
||||
renderer=self.renderer,
|
||||
)
|
||||
else:
|
||||
return self.word(upr, ctx)
|
||||
|
||||
def get_font_face(self, mtext: MText) -> fonts.FontFace:
|
||||
return fonts.get_entity_font_face(mtext)
|
||||
|
||||
def make_bg_renderer(self, mtext: MText) -> tl.ContentRenderer:
|
||||
return self.do_nothing
|
||||
|
||||
def measure(self, mtext: MText) -> tl.Layout:
|
||||
self.reset()
|
||||
layout = self.layout_engine(mtext)
|
||||
layout.place()
|
||||
return layout
|
||||
|
||||
|
||||
class WordSizeCollector(tl.DoNothingRenderer):
|
||||
"""Collects word sizes as tuples of the lower left corner and the upper
|
||||
right corner as Vec2 objects, ignores lines.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.word_boxes: list[tuple[Vec2, Vec2]] = []
|
||||
|
||||
def render(
|
||||
self,
|
||||
left: float,
|
||||
bottom: float,
|
||||
right: float,
|
||||
top: float,
|
||||
m: Optional[Matrix44] = None,
|
||||
) -> None:
|
||||
self.word_boxes.append((Vec2(left, bottom), Vec2(right, top)))
|
||||
|
||||
|
||||
class WordSizeDetector(MTextSizeDetector):
|
||||
def reset(self):
|
||||
self.renderer = WordSizeCollector()
|
||||
|
||||
def measure(self, mtext: MText) -> tl.Layout:
|
||||
layout = super().measure(mtext)
|
||||
layout.render()
|
||||
return layout
|
||||
|
||||
def word_boxes(self) -> list[tuple[Vec2, Vec2]]:
|
||||
return self.renderer.word_boxes
|
||||
@@ -0,0 +1,87 @@
|
||||
# Copyright (c) 2014-2023, Manfred Moitzi
|
||||
# License: MIT License
|
||||
from __future__ import annotations
|
||||
from typing import BinaryIO, cast, TextIO, Optional, Iterator
|
||||
import zipfile
|
||||
from contextlib import contextmanager
|
||||
|
||||
from ezdxf.lldxf.validator import is_dxf_stream, dxf_info
|
||||
|
||||
CRLF = b"\r\n"
|
||||
LF = b"\n"
|
||||
|
||||
|
||||
class ZipReader:
|
||||
def __init__(self, zip_archive_name: str, errors="surrogateescape"):
|
||||
if not zipfile.is_zipfile(zip_archive_name):
|
||||
raise IOError(f"'{zip_archive_name}' is not a zip archive.")
|
||||
self.zip_archive_name = zip_archive_name
|
||||
self.zip_archive: Optional[zipfile.ZipFile] = None
|
||||
self.dxf_file_name: Optional[str] = None
|
||||
self.dxf_file: Optional[BinaryIO] = None
|
||||
self.encoding = "cp1252"
|
||||
self.errors = errors
|
||||
self.dxfversion = "AC1009"
|
||||
|
||||
def open(self, dxf_file_name: Optional[str] = None) -> None:
|
||||
def open_dxf_file() -> BinaryIO:
|
||||
# Open always in binary mode:
|
||||
return cast(BinaryIO, self.zip_archive.open(self.dxf_file_name)) # type: ignore
|
||||
|
||||
self.zip_archive = zipfile.ZipFile(self.zip_archive_name)
|
||||
self.dxf_file_name = (
|
||||
dxf_file_name
|
||||
if dxf_file_name is not None
|
||||
else self.get_first_dxf_file_name()
|
||||
)
|
||||
self.dxf_file = open_dxf_file()
|
||||
|
||||
# Reading with standard encoding 'cp1252' - readline() fails if leading
|
||||
# comments contain none ASCII characters.
|
||||
if not is_dxf_stream(cast(TextIO, self)):
|
||||
raise IOError(f"'{self.dxf_file_name}' is not a DXF file.")
|
||||
self.dxf_file = open_dxf_file() # restart
|
||||
self.get_dxf_info()
|
||||
self.dxf_file = open_dxf_file() # restart
|
||||
|
||||
def get_first_dxf_file_name(self) -> str:
|
||||
dxf_file_names = self.get_dxf_file_names()
|
||||
if len(dxf_file_names) > 0:
|
||||
return dxf_file_names[0]
|
||||
else:
|
||||
raise IOError("No DXF files found.")
|
||||
|
||||
def get_dxf_file_names(self) -> list[str]:
|
||||
assert self.zip_archive is not None
|
||||
return [
|
||||
name
|
||||
for name in self.zip_archive.namelist()
|
||||
if name.lower().endswith(".dxf")
|
||||
]
|
||||
|
||||
def get_dxf_info(self) -> None:
|
||||
info = dxf_info(cast(TextIO, self))
|
||||
# Since DXF R2007 (AC1021) file encoding is always 'utf-8'
|
||||
self.encoding = info.encoding if info.version < "AC1021" else "utf-8"
|
||||
self.dxfversion = info.version
|
||||
|
||||
def readline(self) -> str:
|
||||
assert self.dxf_file is not None
|
||||
next_line = self.dxf_file.readline().replace(CRLF, LF)
|
||||
return str(next_line, self.encoding, self.errors)
|
||||
|
||||
def close(self) -> None:
|
||||
assert self.zip_archive is not None
|
||||
self.zip_archive.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def ctxZipReader(
|
||||
zipfilename: str,
|
||||
filename: Optional[str] = None,
|
||||
errors: str = "surrogateescape",
|
||||
) -> Iterator[ZipReader]:
|
||||
zip_reader = ZipReader(zipfilename, errors=errors)
|
||||
zip_reader.open(filename)
|
||||
yield zip_reader
|
||||
zip_reader.close()
|
||||
Reference in New Issue
Block a user