diff --git a/io_mesh_3mf/__init__.py b/io_mesh_3mf/__init__.py
new file mode 100644
index 000000000..8a82d8213
--- /dev/null
+++ b/io_mesh_3mf/__init__.py
@@ -0,0 +1,81 @@
+# Blender add-on to import and export 3MF files.
+# Copyright (C) 2020 Ghostkeeper
+# This add-on is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
+# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
+# later version.
+# This add-on is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# details.
+# You should have received a copy of the GNU Affero General Public License along with this plug-in. If not, see
+# .
+
+#
+
+bl_info = {
+ "name": "3MF format",
+ "author": "Ghostkeeper",
+ "version": (1, 0, 0),
+ "blender": (2, 80, 0),
+ "location": "File > Import-Export",
+ "description": "Import-Export 3MF files",
+ "category": "Import-Export"
+}
+
+"""
+Import and export 3MF files in Blender.
+"""
+
+# Reload functionality.
+if "bpy" in locals():
+ import importlib
+ if "import_3mf" in locals():
+ importlib.reload(import_3mf)
+ if "export_3mf" in locals():
+ importlib.reload(export_3mf)
+
+import bpy.utils # To (un)register the add-on.
+import bpy.types # To (un)register the add-on as an import/export function.
+
+from .import_3mf import Import3MF # Imports 3MF files.
+from .export_3mf import Export3MF # Exports 3MF files.
+
+
+def menu_import(self, _):
+ """
+ Calls the 3MF import operator from the menu item.
+ """
+ self.layout.operator(Import3MF.bl_idname, text="3D Manufacturing Format (.3mf)")
+
+
+def menu_export(self, _):
+ """
+ Calls the 3MF export operator from the menu item.
+ """
+ self.layout.operator(Export3MF.bl_idname, text="3D Manufacturing Format (.3mf)")
+
+
+classes = (
+ Import3MF,
+ Export3MF
+)
+
+
+def register():
+ for cls in classes:
+ bpy.utils.register_class(cls)
+
+ bpy.types.TOPBAR_MT_file_import.append(menu_import)
+ bpy.types.TOPBAR_MT_file_export.append(menu_export)
+
+
+def unregister():
+ for cls in classes:
+ bpy.utils.unregister_class(cls)
+
+ bpy.types.TOPBAR_MT_file_import.remove(menu_import)
+ bpy.types.TOPBAR_MT_file_export.remove(menu_export)
+
+
+# Allow the add-on to be ran directly without installation.
+if __name__ == "__main__":
+ register()
diff --git a/io_mesh_3mf/annotations.py b/io_mesh_3mf/annotations.py
new file mode 100644
index 000000000..60bde8d91
--- /dev/null
+++ b/io_mesh_3mf/annotations.py
@@ -0,0 +1,326 @@
+# Blender add-on to import and export 3MF files.
+# Copyright (C) 2020 Ghostkeeper
+# This add-on is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
+# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
+# later version.
+# This add-on is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# details.
+# You should have received a copy of the GNU Affero General Public License along with this plug-in. If not, see
+# .
+
+#
+
+import bpy # To store the annotations long-term in the Blender context.
+import collections # Namedtuple data structure for annotations, and Counter to write optimized content types.
+import json # To serialize the data for long-term storage in the Blender scene.
+import logging # Reporting parsing errors.
+import os.path # To parse target paths in relationships.
+import urllib.parse # To parse relative target paths in relationships.
+import xml.etree.ElementTree # To parse the relationships files.
+
+from .constants import *
+
+
+# These are the different types of annotations we can store.
+Relationship = collections.namedtuple("Relationship", ["namespace", "source"])
+ContentType = collections.namedtuple("ContentType", ["mime_type"])
+
+# Flag object to denote that different 3MF archives give different content types to the same file in the archive.
+ConflictingContentType = object()
+
+ANNOTATION_FILE = ".3mf_annotations" # File name to use to store the annotations in the Blender data.
+
+
+class Annotations:
+ """
+ This is a collection of annotations for a 3MF document. It annotates the files in the archive with metadata
+ information.
+
+ The class contains serialisation and deserialization functions in order to be able to load and save the annotations
+ from/to a 3MF archive, and to load and save the annotations in the Blender scene.
+
+ The annotations are stored in the `self.annotations` dictionary. The keys of this dictionary are the targets of the
+ annotations, normally the files in this archive. It can be any URI however, and the files don't necessarily need to
+ exist.
+
+ The values are sets of annotations. The annotations are named tuples as described in the beginning of this module.
+ The set can contain any mixture of these named tuples. Duplicates will get filtered out by the nature of the set
+ data structure.
+ """
+
+ def __init__(self):
+ """
+ Creates an empty collection of annotations.
+ """
+ # All of the annotations so far. Keys are the target files of the annotations. Values are sets of annotation
+ # objects.
+ self.annotations = {}
+
+ def add_rels(self, rels_file):
+ """
+ Add relationships to this collection from a file stream containing a .rels file from a 3MF archive.
+
+ A relationship is treated as a file annotation, because it only contains a file that the relationship is
+ targeting, and a meaningless namespace. The relationship also originates from a source, indicated by the path to
+ the relationship file. This will also get stored, so that it can be properly restored later.
+
+ Duplicate relationships won't get stored.
+ :param rels_file: A file stream containing a .rels file.
+ """
+ # Relationships are evaluated relative to the path that the _rels folder around the .rels file is on. If any.
+ base_path = os.path.dirname(rels_file.name) + "/"
+ if os.path.basename(os.path.dirname(base_path)) == RELS_FOLDER:
+ base_path = os.path.dirname(os.path.dirname(base_path)) + "/"
+
+ try:
+ root = xml.etree.ElementTree.ElementTree(file=rels_file)
+ except xml.etree.ElementTree.ParseError as e:
+ logging.warning(
+ f"Relationship file {rels_file.name} has malformed XML (position {e.position[0]}:{e.position[1]}).")
+ return # Skip this file.
+
+ for relationship_node in root.iterfind(RELS_RELATIONSHIP_FIND, RELS_NAMESPACES):
+ try:
+ target = relationship_node.attrib["Target"]
+ namespace = relationship_node.attrib["Type"]
+ except KeyError as e:
+ logging.warning(f"Relationship missing attribute: {str(e)}")
+ continue # Skip this relationship.
+ if namespace == MODEL_REL: # Don't store relationships that we will write ourselves.
+ continue
+
+ # Evaluate any relative URIs based on the path to this .rels file in the archive.
+ target = urllib.parse.urljoin(base_path, target)
+
+ if target != "" and target[0] == "/":
+ # To coincide with the convention held by the zipfile package, paths in this archive will not start with
+ # a slash.
+ target = target[1:]
+
+ if target not in self.annotations:
+ self.annotations[target] = set()
+
+ # Add to the annotations as a relationship (since it's a set, don't create duplicates).
+ self.annotations[target].add(Relationship(namespace=namespace, source=base_path))
+
+ def add_content_types(self, files_by_content_type):
+ """
+ Add annotations that signal the content types of the files in the archive.
+
+ If a file already got a different content type from a different 3MF archive, the content type of the file now
+ becomes unknown (and subsequently won't get stored in any exported 3MF archive).
+
+ Content types for files known to this 3MF implementation will not get stored. This add-on will rewrite those
+ files and may change the file location and such.
+ :param files_by_content_type: The files in this archive, sorted by content type.
+ """
+ for content_type, file_set in files_by_content_type.items():
+ if content_type == "":
+ continue # Don't store content type if the content type is unknown.
+ if content_type in {RELS_MIMETYPE, MODEL_MIMETYPE}:
+ continue # Don't store content type if it's a file we'll rewrite with this add-on.
+ for file in file_set:
+ filename = file.name
+ if filename not in self.annotations:
+ self.annotations[filename] = set()
+ if ConflictingContentType in self.annotations[filename]:
+ # Content type was already conflicting through multiple previous files. It'll stay in conflict.
+ continue
+ content_type_annotations = list(filter(lambda annotation: type(annotation) == ContentType,
+ self.annotations[filename]))
+ if any(content_type_annotations) and content_type_annotations[0].mime_type != content_type:
+ # There was already a content type and it is different from this one.
+ # This file now has conflicting content types!
+ logging.warning(f"Found conflicting content types for file: {filename}")
+ for annotation in content_type_annotations:
+ self.annotations[filename].remove(annotation)
+ self.annotations[filename].add(ConflictingContentType)
+ else:
+ # No content type yet, or the existing content type is the same.
+ # Adding it again wouldn't have any effect if it is the same.
+ self.annotations[filename].add(ContentType(content_type))
+
+ def write_rels(self, archive):
+ """
+ Write the relationship annotations in this collections to an archive as .rels files.
+
+ Multiple relationship files may be added to the archive, if relationships came from multiple sources in the
+ original archives.
+ :param archive: A zip archive to add the relationships to.
+ """
+ current_id = 0 # Have an incrementing ID number to make all relationship IDs unique across the whole archive.
+
+ # First sort all relationships by their source, so that we know which relationship goes into which file.
+ # We always want to create a .rels file for the archive root, with our default relationships.
+ rels_by_source = {"/": set()}
+
+ for target, annotations in self.annotations.items():
+ for annotation in annotations:
+ if type(annotation) is not Relationship:
+ continue
+ if annotation.source not in rels_by_source:
+ rels_by_source[annotation.source] = set()
+ rels_by_source[annotation.source].add((target, annotation.namespace))
+
+ for source, annotations in rels_by_source.items():
+ if source == "/": # Writing to the archive root. Don't want to start zipfile paths with a slash.
+ source = ""
+ # Create an XML document containing all relationships for this source.
+ root = xml.etree.ElementTree.Element(f"{{{RELS_NAMESPACE}}}Relationships")
+ for target, namespace in annotations:
+ xml.etree.ElementTree.SubElement(root, f"{{{RELS_NAMESPACE}}}Relationship", attrib={
+ f"{{{RELS_NAMESPACE}}}Id": "rel" + str(current_id),
+ f"{{{RELS_NAMESPACE}}}Target": "/" + target,
+ f"{{{RELS_NAMESPACE}}}Type": namespace
+ })
+ current_id += 1
+
+ # Write relationships for files that we create.
+ if source == "":
+ xml.etree.ElementTree.SubElement(root, f"{{{RELS_NAMESPACE}}}Relationship", attrib={
+ f"{{{RELS_NAMESPACE}}}Id": "rel" + str(current_id),
+ f"{{{RELS_NAMESPACE}}}Target": "/" + MODEL_LOCATION,
+ f"{{{RELS_NAMESPACE}}}Type": MODEL_REL
+ })
+ current_id += 1
+
+ document = xml.etree.ElementTree.ElementTree(root)
+
+ # Write that XML document to a file.
+ rels_file = source + RELS_FOLDER + "/.rels" # _rels folder in the "source" folder.
+ with archive.open(rels_file, 'w') as f:
+ document.write(f, xml_declaration=True, encoding='UTF-8', default_namespace=RELS_NAMESPACE)
+
+ def write_content_types(self, archive):
+ """
+ Write a [Content_Types].xml file to a 3MF archive, containing all of the content types that we have assigned.
+ :param archive: A zip archive to add the content types to.
+ """
+ # First sort all of the content types by their extension, so that we can find out what the most common content
+ # type is for each extension.
+ content_types_by_extension = {}
+ for target, annotations in self.annotations.items():
+ for annotation in annotations:
+ if type(annotation) is not ContentType:
+ continue
+ extension = os.path.splitext(target)[1]
+ if extension not in content_types_by_extension:
+ content_types_by_extension[extension] = []
+ content_types_by_extension[extension].append(annotation.mime_type)
+
+ # Then find out which is the most common content type to assign to that extension.
+ most_common = {}
+ for extension, mime_types in content_types_by_extension.items():
+ counter = collections.Counter(mime_types)
+ most_common[extension] = counter.most_common(1)[0][0]
+
+ # Add the content types for files that this add-on creates by itself.
+ most_common[".rels"] = RELS_MIMETYPE
+ most_common[".model"] = MODEL_MIMETYPE
+
+ # Write an XML file that contains the extension rules for the most common cases,
+ # but specific overrides for the outliers.
+ root = xml.etree.ElementTree.Element(f"{{{CONTENT_TYPES_NAMESPACE}}}Types")
+
+ # First add all of the extension-based rules.
+ for extension, mime_type in most_common.items():
+ if not extension: # Skip files without extension.
+ continue
+ xml.etree.ElementTree.SubElement(root, f"{{{CONTENT_TYPES_NAMESPACE}}}Default", attrib={
+ f"{{{CONTENT_TYPES_NAMESPACE}}}Extension": extension[1:], # Don't include the period.
+ f"{{{CONTENT_TYPES_NAMESPACE}}}ContentType": mime_type
+ })
+
+ # Then write the overrides for files that don't have the same content type as most of their exceptions.
+ for target, annotations in self.annotations.items():
+ for annotation in annotations:
+ if type(annotation) is not ContentType:
+ continue
+ extension = os.path.splitext(target)[1]
+ if not extension or annotation.mime_type != most_common[extension]:
+ # This is an exceptional case that should be stored as an override.
+ xml.etree.ElementTree.SubElement(root, f"{{{CONTENT_TYPES_NAMESPACE}}}Override", attrib={
+ f"{{{CONTENT_TYPES_NAMESPACE}}}PartName": "/" + target,
+ f"{{{CONTENT_TYPES_NAMESPACE}}}ContentType": annotation.mime_type
+ })
+
+ # Output all that to the [Content_Types].xml file.
+ document = xml.etree.ElementTree.ElementTree(root)
+ with archive.open(CONTENT_TYPES_LOCATION, 'w') as f:
+ document.write(f, xml_declaration=True, encoding='UTF-8', default_namespace=CONTENT_TYPES_NAMESPACE)
+
+ def store(self):
+ """
+ Stores this `Annotations` instance in the Blender scene.
+
+ The instance will serialize itself and put that data in a hidden JSON file in the scene. This way the data can
+ survive until it needs to be saved to a 3MF document again, even when shared through a Blend file.
+ """
+ # Generate a JSON document containing all annotations.
+ document = {}
+ for target, annotations in self.annotations.items():
+ serialized_annotations = []
+ for annotation in annotations:
+ if type(annotation) == Relationship:
+ serialized_annotations.append({
+ "annotation": 'relationship',
+ "namespace": annotation.namespace,
+ "source": annotation.source
+ })
+ elif type(annotation) == ContentType:
+ serialized_annotations.append({
+ "annotation": 'content_type',
+ "mime_type": annotation.mime_type
+ })
+ elif annotation == ConflictingContentType:
+ serialized_annotations.append({
+ "annotation": 'content_type_conflict'
+ })
+ document[target] = serialized_annotations
+
+ # Store this in the Blender context.
+ if ANNOTATION_FILE in bpy.data.texts:
+ bpy.data.texts.remove(bpy.data.texts[ANNOTATION_FILE])
+ text_file = bpy.data.texts.new(ANNOTATION_FILE)
+ text_file.write(json.dumps(document))
+
+ def retrieve(self):
+ """
+ Retrieves any existing annotations from the Blender scene.
+
+ This looks for a serialized annotation file in the Blender data. If it exists, it parses that file and retrieves
+ the data from it, restoring the state of the annotations collection that stored that file.
+ """
+ # If there's nothing stored in the current scene, this clears the state of the annotations.
+ self.annotations.clear()
+
+ if ANNOTATION_FILE not in bpy.data.texts:
+ return # Nothing to read. Done!
+ try:
+ annotation_data = json.loads(bpy.data.texts[ANNOTATION_FILE].as_string())
+ except json.JSONDecodeError:
+ logging.warning("Annotation file exists, but is not properly formatted.")
+ return # File was meddled with?
+
+ for target, annotations in annotation_data.items():
+ self.annotations[target] = set()
+ try:
+ for annotation in annotations:
+ if annotation['annotation'] == 'relationship':
+ self.annotations[target].add(
+ Relationship(namespace=annotation['namespace'], source=annotation['source']))
+ elif annotation['annotation'] == 'content_type':
+ self.annotations[target].add(ContentType(mime_type=annotation['mime_type']))
+ elif annotation['annotation'] == 'content_type_conflict':
+ self.annotations[target].add(ConflictingContentType)
+ else:
+ logging.warning(f"Unknown annotation type \"{annotation['annotation']}\" encountered.")
+ continue
+ except TypeError: # Raised when `annotations` is not iterable.
+ logging.warning(f"Annotation for target \"{target}\" is not properly structured.")
+ except KeyError as e:
+ # Raised when missing the 'annotation' key or a required key belonging to that annotation.
+ logging.warning(f"Annotation for target \"{target}\" missing key: {str(e)}")
+ if not self.annotations[target]: # Nothing was added in the end.
+ del self.annotations[target] # Don't store the empty target either then.
diff --git a/io_mesh_3mf/constants.py b/io_mesh_3mf/constants.py
new file mode 100644
index 000000000..730703c64
--- /dev/null
+++ b/io_mesh_3mf/constants.py
@@ -0,0 +1,56 @@
+# Blender add-on to import and export 3MF files.
+# Copyright (C) 2020 Ghostkeeper
+# This add-on is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
+# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
+# later version.
+# This add-on is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# details.
+# You should have received a copy of the GNU Affero General Public License along with this plug-in. If not, see
+# .
+
+#
+
+"""
+This module defines some constants for 3MF's file structure.
+
+These are the constants that are inherent to the 3MF file format.
+"""
+
+SUPPORTED_EXTENSIONS = set() # Set of namespaces for 3MF extensions that we support.
+# File contents to use when files must be preserved but there's a file with different content in a previous archive.
+# Only for flagging. This will not be in the final 3MF archives.
+conflicting_mustpreserve_contents = ""
+
+# Default storage locations.
+MODEL_LOCATION = "3D/3dmodel.model" # Conventional location for the 3D model data.
+CONTENT_TYPES_LOCATION = "[Content_Types].xml" # Location of the content types definition.
+RELS_FOLDER = "_rels" # Folder name to store relationships files in.
+
+# Relationship types.
+MODEL_REL = "http://schemas.microsoft.com/3dmanufacturing/2013/01/3dmodel" # Relationship type of 3D models.
+THUMBNAIL_REL = "http://schemas.openxmlformats.org/package/2006/relationships/metadata/thumbnail"
+
+# MIME types of files in the archive.
+RELS_MIMETYPE = "application/vnd.openxmlformats-package.relationships+xml" # MIME type of .rels files.
+MODEL_MIMETYPE = "application/vnd.ms-package.3dmanufacturing-3dmodel+xml" # MIME type of .model files.
+
+# Constants in the 3D model file.
+MODEL_NAMESPACE = "http://schemas.microsoft.com/3dmanufacturing/core/2015/02"
+MODEL_NAMESPACES = {
+ "3mf": MODEL_NAMESPACE
+}
+MODEL_DEFAULT_UNIT = "millimeter" # If the unit is missing, it will be this.
+
+# Constants in the ContentTypes file.
+CONTENT_TYPES_NAMESPACE = "http://schemas.openxmlformats.org/package/2006/content-types"
+CONTENT_TYPES_NAMESPACES = {
+ "ct": CONTENT_TYPES_NAMESPACE
+}
+
+# Constants in the .rels files.
+RELS_NAMESPACE = "http://schemas.openxmlformats.org/package/2006/relationships"
+RELS_NAMESPACES = { # Namespaces used for the rels files.
+ "rel": RELS_NAMESPACE
+}
+RELS_RELATIONSHIP_FIND = "rel:Relationship"
diff --git a/io_mesh_3mf/export_3mf.py b/io_mesh_3mf/export_3mf.py
new file mode 100644
index 000000000..3175f100d
--- /dev/null
+++ b/io_mesh_3mf/export_3mf.py
@@ -0,0 +1,527 @@
+# Blender add-on to import and export 3MF files.
+# Copyright (C) 2020 Ghostkeeper
+# This add-on is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
+# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
+# later version.
+# This add-on is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# details.
+# You should have received a copy of the GNU Affero General Public License along with this plug-in. If not, see
+# .
+
+#
+
+import base64 # To decode files that must be preserved.
+import bpy # The Blender API.
+import bpy.props # To define metadata properties for the operator.
+import bpy.types # This class is an operator in Blender, and to find meshes in the scene.
+import bpy_extras.io_utils # Helper functions to export meshes more easily.
+import bpy_extras.node_shader_utils # Converting material colors to sRGB.
+import collections # Counter, to find the most common material of an object.
+import itertools
+import logging # To debug and log progress.
+import mathutils # For the transformation matrices.
+import xml.etree.ElementTree # To write XML documents with the 3D model data.
+import zipfile # To write zip archives, the shell of the 3MF file.
+
+from .annotations import Annotations # To store file annotations
+from .constants import *
+from .metadata import Metadata # To store metadata from the Blender scene into the 3MF file.
+from .unit_conversions import blender_to_metre, threemf_to_metre
+
+log = logging.getLogger(__name__)
+
+
+class Export3MF(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
+ """
+ Operator that exports a 3MF file from Blender.
+ """
+
+ # Metadata.
+ bl_idname = "export_mesh.threemf"
+ bl_label = "Export 3MF"
+ bl_description = "Save the current scene to 3MF"
+ filename_ext = ".3mf"
+
+ # Options for the user.
+ filter_glob: bpy.props.StringProperty(
+ default="*.3mf",
+ options={'HIDDEN'})
+ use_selection: bpy.props.BoolProperty(
+ name="Selection Only",
+ description="Export selected objects only.",
+ default=False)
+ global_scale: bpy.props.FloatProperty(
+ name="Scale",
+ default=1.0,
+ soft_min=0.001,
+ soft_max=1000.0,
+ min=1e-6,
+ max=1e6)
+ use_mesh_modifiers: bpy.props.BoolProperty(
+ name="Apply Modifiers",
+ description="Apply the modifiers before saving.",
+ default=True)
+ coordinate_precision: bpy.props.IntProperty(
+ name="Precision",
+ description="The number of decimal digits to use in coordinates in the file.",
+ default=4,
+ min=0,
+ max=12)
+
+ def __init__(self):
+ """
+ Initialize some fields with defaults before starting.
+ """
+ super().__init__()
+ self.next_resource_id = 1 # Which resource ID to generate for the next object.
+ self.num_written = 0 # How many objects we've written to the file.
+ self.material_resource_id = -1 # We write one material. This is the resource ID of that material.
+ self.material_name_to_index = {} # For each material in Blender, the index in the 3MF materials group.
+
+ def execute(self, context):
+ """
+ The main routine that writes the 3MF archive.
+
+ This function serves as a high-level overview of the steps involved to write a 3MF file.
+ :param context: The Blender context.
+ :return: A set of status flags to indicate whether the write succeeded or not.
+ """
+ # Reset state.
+ self.next_resource_id = 1 # Starts counting at 1 for some inscrutable reason.
+ self.material_resource_id = -1
+ self.num_written = 0
+
+ archive = self.create_archive(self.filepath)
+ if archive is None:
+ return {'CANCELLED'}
+
+ if self.use_selection:
+ blender_objects = context.selected_objects
+ else:
+ blender_objects = context.scene.objects
+
+ global_scale = self.unit_scale(context)
+
+ # Due to an open bug in Python 3.7 (Blender's version) we need to prefix all elements with the namespace.
+ # Bug: https://bugs.python.org/issue17088
+ # Workaround: https://stackoverflow.com/questions/4997848/4999510#4999510
+ root = xml.etree.ElementTree.Element(f"{{{MODEL_NAMESPACE}}}model")
+
+ scene_metadata = Metadata()
+ scene_metadata.retrieve(bpy.context.scene)
+ self.write_metadata(root, scene_metadata)
+
+ resources_element = xml.etree.ElementTree.SubElement(root, f"{{{MODEL_NAMESPACE}}}resources")
+ self.material_name_to_index = self.write_materials(resources_element, blender_objects)
+ self.write_objects(root, resources_element, blender_objects, global_scale)
+
+ document = xml.etree.ElementTree.ElementTree(root)
+ with archive.open(MODEL_LOCATION, 'w', force_zip64=True) as f:
+ document.write(f, xml_declaration=True, encoding='UTF-8', default_namespace=MODEL_NAMESPACE)
+ try:
+ archive.close()
+ except EnvironmentError as e:
+ log.error(f"Unable to complete writing to 3MF archive: {e}")
+ return {'CANCELLED'}
+
+ log.info(f"Exported {self.num_written} objects to 3MF archive {self.filepath}.")
+ return {'FINISHED'}
+
+ # The rest of the functions are in order of when they are called.
+
+ def create_archive(self, filepath):
+ """
+ Creates an empty 3MF archive.
+
+ The archive is complete according to the 3MF specs except that the actual 3dmodel.model file is missing.
+ :param filepath: The path to write the file to.
+ :return: A zip archive that other functions can add things to.
+ """
+ try:
+ archive = zipfile.ZipFile(filepath, 'w', compression=zipfile.ZIP_DEFLATED, compresslevel=9)
+
+ # Store the file annotations we got from imported 3MF files, and store them in the archive.
+ annotations = Annotations()
+ annotations.retrieve()
+ annotations.write_rels(archive)
+ annotations.write_content_types(archive)
+ self.must_preserve(archive)
+ except EnvironmentError as e:
+ log.error(f"Unable to write 3MF archive to {filepath}: {e}")
+ return None
+
+ return archive
+
+ def must_preserve(self, archive):
+ """
+ Write files that must be preserved to the archive.
+
+ These files were stored in the Blender scene in a hidden location.
+ :param archive: The archive to write files to.
+ """
+ for textfile in bpy.data.texts:
+ filename = textfile.name
+ if not filename.startswith(".3mf_preserved/"):
+ continue # Unrelated file. Not ours to read.
+ contents = textfile.as_string()
+ if contents == conflicting_mustpreserve_contents:
+ continue # This file was in conflict. Don't preserve any copy of it then.
+ contents = base64.b85decode(contents.encode("UTF-8"))
+ filename = filename[len(".3mf_preserved/"):]
+ with archive.open(filename, 'w') as f:
+ f.write(contents)
+
+ def unit_scale(self, context):
+ """
+ Get the scaling factor we need to transform the document to millimetres.
+ :param context: The Blender context to get the unit from.
+ :return: Floating point value that we need to scale this model by. A small number (<1) means that we need to
+ make the coordinates in the 3MF file smaller than the coordinates in Blender. A large number (>1) means we need
+ to make the coordinates in the file larger than the coordinates in Blender.
+ """
+ scale = self.global_scale
+
+ if context.scene.unit_settings.scale_length != 0:
+ scale *= context.scene.unit_settings.scale_length # Apply the global scale of the units in Blender.
+
+ threemf_unit = MODEL_DEFAULT_UNIT
+ blender_unit = context.scene.unit_settings.length_unit
+ scale /= threemf_to_metre[threemf_unit] # Convert 3MF units to metre.
+ scale *= blender_to_metre[blender_unit] # Convert metre to Blender's units.
+
+ return scale
+
+ def write_materials(self, resources_element, blender_objects):
+ """
+ Write the materials on the specified blender objects to a 3MF document.
+
+ We'll write all materials to one single tag in the resources.
+
+ Aside from writing the materials to the document, this function also returns a mapping from the names of the
+ materials in Blender (which must be unique) to the index in the material group. Using that
+ mapping, the objects and triangles can write down an index referring to the list of tags.
+
+ Since the material can only hold a color, we'll write the diffuse color of the material to the file.
+ :param resources_element: A node from a 3MF document.
+ :param blender_objects: A list of Blender objects that may have materials which we need to write to the
+ document.
+ :return: A mapping from material name to the index of that material in the tag.
+ """
+ name_to_index = {} # The output list, mapping from material name to indexes in the tag.
+ next_index = 0
+
+ # Create an element lazily. We don't want to create an element if there are no materials to write.
+ basematerials_element = None
+
+ for blender_object in blender_objects:
+ for material_slot in blender_object.material_slots:
+ material = material_slot.material
+
+ material_name = material.name
+ if material_name in name_to_index: # Already have this material through another object.
+ continue
+
+ # Wrap this material into a principled render node, to convert its color to sRGB.
+ principled = bpy_extras.node_shader_utils.PrincipledBSDFWrapper(material, is_readonly=True)
+ color = principled.base_color
+ red = min(255, round(color[0] * 255))
+ green = min(255, round(color[1] * 255))
+ blue = min(255, round(color[2] * 255))
+ alpha = principled.alpha
+ if alpha >= 1.0: # Completely opaque. Leave out the alpha component.
+ color_hex = "#%0.2X%0.2X%0.2X" % (red, green, blue)
+ else:
+ alpha = min(255, round(alpha * 255))
+ color_hex = "#%0.2X%0.2X%0.2X%0.2X" % (red, green, blue, alpha)
+
+ if basematerials_element is None:
+ self.material_resource_id = str(self.next_resource_id)
+ self.next_resource_id += 1
+ basematerials_element = xml.etree.ElementTree.SubElement(
+ resources_element,
+ f"{{{MODEL_NAMESPACE}}}basematerials", attrib={
+ f"{{{MODEL_NAMESPACE}}}id": self.material_resource_id
+ })
+ xml.etree.ElementTree.SubElement(basematerials_element, f"{{{MODEL_NAMESPACE}}}base", attrib={
+ f"{{{MODEL_NAMESPACE}}}name": material_name,
+ f"{{{MODEL_NAMESPACE}}}displaycolor": color_hex
+ })
+ name_to_index[material_name] = next_index
+ next_index += 1
+
+ return name_to_index
+
+ def write_objects(self, root, resources_element, blender_objects, global_scale):
+ """
+ Writes a group of objects into the 3MF archive.
+ :param root: An XML root element to write the objects into.
+ :param resources_element: An XML element to write resources into.
+ :param blender_objects: A list of Blender objects that need to be written to that XML element.
+ :param global_scale: A scaling factor to apply to all objects to convert the units.
+ """
+ transformation = mathutils.Matrix.Scale(global_scale, 4)
+
+ build_element = xml.etree.ElementTree.SubElement(root, f"{{{MODEL_NAMESPACE}}}build")
+ for blender_object in blender_objects:
+ if blender_object.parent is not None:
+ continue # Only write objects that have no parent, since we'll get the child objects recursively.
+ if blender_object.type not in {'MESH', 'EMPTY'}:
+ continue
+
+ objectid, mesh_transformation = self.write_object_resource(resources_element, blender_object)
+
+ item_element = xml.etree.ElementTree.SubElement(build_element, f"{{{MODEL_NAMESPACE}}}item")
+ self.num_written += 1
+ item_element.attrib[f"{{{MODEL_NAMESPACE}}}objectid"] = str(objectid)
+ mesh_transformation = transformation @ mesh_transformation
+ if mesh_transformation != mathutils.Matrix.Identity(4):
+ item_element.attrib[f"{{{MODEL_NAMESPACE}}}transform"] =\
+ self.format_transformation(mesh_transformation)
+
+ metadata = Metadata()
+ metadata.retrieve(blender_object)
+ if "3mf:partnumber" in metadata:
+ item_element.attrib[f"{{{MODEL_NAMESPACE}}}partnumber"] = metadata["3mf:partnumber"].value
+ del metadata["3mf:partnumber"]
+ if metadata:
+ metadatagroup_element = xml.etree.ElementTree.SubElement(
+ item_element,
+ f"{{{MODEL_NAMESPACE}}}metadatagroup")
+ self.write_metadata(metadatagroup_element, metadata)
+
+ def write_object_resource(self, resources_element, blender_object):
+ """
+ Write a single Blender object and all of its children to the resources of a 3MF document.
+
+ If the object contains a mesh it'll get written to the document as an object with a mesh resource. If the object
+ contains children it'll get written to the document as an object with components. If the object contains both,
+ two objects will be written; one with the mesh and another with the components. The mesh then gets added as a
+ component of the object with components.
+ :param resources_element: The element of the 3MF document to write into.
+ :param blender_object: A Blender object to write to that XML element.
+ :return: A tuple, containing the object ID of the newly written resource and a transformation matrix that this
+ resource must be saved with.
+ """
+ new_resource_id = self.next_resource_id
+ self.next_resource_id += 1
+ object_element = xml.etree.ElementTree.SubElement(resources_element, f"{{{MODEL_NAMESPACE}}}object")
+ object_element.attrib[f"{{{MODEL_NAMESPACE}}}id"] = str(new_resource_id)
+
+ metadata = Metadata()
+ metadata.retrieve(blender_object)
+ if "3mf:object_type" in metadata:
+ object_type = metadata["3mf:object_type"].value
+ if object_type != "model": # Only write if not the default.
+ object_element.attrib[f"{{{MODEL_NAMESPACE}}}type"] = object_type
+ del metadata["3mf:object_type"]
+
+ if blender_object.mode == 'EDIT':
+ blender_object.update_from_editmode() # Apply recent changes made to the model.
+ mesh_transformation = blender_object.matrix_world
+
+ child_objects = blender_object.children
+ if child_objects: # Only write the tag if there are actually components.
+ components_element = xml.etree.ElementTree.SubElement(
+ object_element,
+ f"{{{MODEL_NAMESPACE}}}components")
+ for child in blender_object.children:
+ if child.type != 'MESH':
+ continue
+ # Recursively write children to the resources.
+ child_id, child_transformation = self.write_object_resource(resources_element, child)
+ # Use pseudo-inverse for safety, but the epsilon then doesn't matter since it'll get multiplied by 0
+ # later anyway then.
+ child_transformation = mesh_transformation.inverted_safe() @ child_transformation
+ component_element = xml.etree.ElementTree.SubElement(
+ components_element,
+ f"{{{MODEL_NAMESPACE}}}component")
+ self.num_written += 1
+ component_element.attrib[f"{{{MODEL_NAMESPACE}}}objectid"] = str(child_id)
+ if child_transformation != mathutils.Matrix.Identity(4):
+ component_element.attrib[f"{{{MODEL_NAMESPACE}}}transform"] =\
+ self.format_transformation(child_transformation)
+
+ # In the tail recursion, get the vertex data.
+ # This is necessary because we may need to apply the mesh modifiers, which causes these objects to lose their
+ # children.
+ if self.use_mesh_modifiers:
+ dependency_graph = bpy.context.evaluated_depsgraph_get()
+ blender_object = blender_object.evaluated_get(dependency_graph)
+
+ try:
+ mesh = blender_object.to_mesh()
+ except RuntimeError: # Object.to_mesh() is not guaranteed to return Optional[Mesh], apparently.
+ return new_resource_id, mesh_transformation
+ if mesh is None:
+ return new_resource_id, mesh_transformation
+
+ # Need to convert this to triangles-only, because 3MF doesn't support faces with more than 3 vertices.
+ mesh.calc_loop_triangles()
+
+ if len(mesh.vertices) > 0: # Only write a tag if there is mesh data.
+ # If this object already contains components, we can't also store a mesh. So create a new object and use
+ # that object as another component.
+ if child_objects:
+ mesh_id = self.next_resource_id
+ self.next_resource_id += 1
+ mesh_object_element = xml.etree.ElementTree.SubElement(
+ resources_element,
+ f"{{{MODEL_NAMESPACE}}}object")
+ mesh_object_element.attrib[f"{{{MODEL_NAMESPACE}}}id"] = str(mesh_id)
+ component_element = xml.etree.ElementTree.SubElement(
+ components_element,
+ f"{{{MODEL_NAMESPACE}}}component")
+ self.num_written += 1
+ component_element.attrib[f"{{{MODEL_NAMESPACE}}}objectid"] = str(mesh_id)
+ else: # No components, then we can write directly into this object resource.
+ mesh_object_element = object_element
+ mesh_element = xml.etree.ElementTree.SubElement(mesh_object_element, f"{{{MODEL_NAMESPACE}}}mesh")
+
+ # Find the most common material for this mesh, for maximum compression.
+ material_indices = [triangle.material_index for triangle in mesh.loop_triangles]
+ # If there are no triangles, we provide 0 as index, but it'll not get read by write_triangles either then.
+ most_common_material_list_index = 0
+
+ if material_indices and blender_object.material_slots:
+ counter = collections.Counter(material_indices)
+ # most_common_material_object_index is an index from the MeshLoopTriangle, referring to the list of
+ # materials attached to the Blender object.
+ most_common_material_object_index = counter.most_common(1)[0][0]
+ most_common_material = blender_object.material_slots[most_common_material_object_index].material
+ # most_common_material_list_index is an index referring to our own list of materials that we put in the
+ # resources.
+ most_common_material_list_index = self.material_name_to_index[most_common_material.name]
+ # We always only write one group of materials. The resource ID was determined when it was written.
+ object_element.attrib[f"{{{MODEL_NAMESPACE}}}pid"] = str(self.material_resource_id)
+ object_element.attrib[f"{{{MODEL_NAMESPACE}}}pindex"] = str(most_common_material_list_index)
+
+ self.write_vertices(mesh_element, mesh.vertices)
+ self.write_triangles(
+ mesh_element,
+ mesh.loop_triangles,
+ most_common_material_list_index,
+ blender_object.material_slots)
+
+ # If the object has metadata, write that to a metadata object.
+ if "3mf:partnumber" in metadata:
+ mesh_object_element.attrib[f"{{{MODEL_NAMESPACE}}}partnumber"] =\
+ metadata["3mf:partnumber"].value
+ del metadata["3mf:partnumber"]
+ if "3mf:object_type" in metadata:
+ object_type = metadata["3mf:object_type"].value
+ if object_type != "model" and object_type != "other":
+ # Only write if not the default.
+ # Don't write "other" object types since we're not allowed to refer to them. Pretend they are normal
+ # models.
+ mesh_object_element.attrib[f"{{{MODEL_NAMESPACE}}}type"] = object_type
+ del metadata["3mf:object_type"]
+ if metadata:
+ metadatagroup_element = xml.etree.ElementTree.SubElement(
+ object_element,
+ f"{{{MODEL_NAMESPACE}}}metadatagroup")
+ self.write_metadata(metadatagroup_element, metadata)
+
+ return new_resource_id, mesh_transformation
+
+ def write_metadata(self, node, metadata):
+ """
+ Writes metadata from a metadata storage into an XML node.
+ :param node: The node to add tags to.
+ :param metadata: The collection of metadata to write to that node.
+ """
+ for metadata_entry in metadata.values():
+ metadata_node = xml.etree.ElementTree.SubElement(node, f"{{{MODEL_NAMESPACE}}}metadata")
+ metadata_node.attrib[f"{{{MODEL_NAMESPACE}}}name"] = metadata_entry.name
+ if metadata_entry.preserve:
+ metadata_node.attrib[f"{{{MODEL_NAMESPACE}}}preserve"] = "1"
+ if metadata_entry.datatype:
+ metadata_node.attrib[f"{{{MODEL_NAMESPACE}}}type"] = metadata_entry.datatype
+ metadata_node.text = metadata_entry.value
+
+ def format_transformation(self, transformation):
+ """
+ Formats a transformation matrix in 3MF's formatting.
+
+ This transformation matrix can then be written to an attribute.
+ :param transformation: The transformation matrix to format.
+ :return: A serialisation of the transformation matrix.
+ """
+ pieces = (row[:3] for row in transformation.transposed()) # Don't convert the 4th column.
+ result = ""
+ for cell in itertools.chain.from_iterable(pieces):
+ if result != "": # First loop, don't put a space in.
+ result += " "
+ result += self.format_number(cell, 6) # Never use scientific notation!
+ return result
+
+ def write_vertices(self, mesh_element, vertices):
+ """
+ Writes a list of vertices into the specified mesh element.
+
+ This then becomes a resource that can be used in a build.
+ :param mesh_element: The element of the 3MF document.
+ :param vertices: A list of Blender vertices to add.
+ """
+ vertices_element = xml.etree.ElementTree.SubElement(mesh_element, f"{{{MODEL_NAMESPACE}}}vertices")
+
+ # Precompute some names for better performance.
+ vertex_name = f"{{{MODEL_NAMESPACE}}}vertex"
+ x_name = f"{{{MODEL_NAMESPACE}}}x"
+ y_name = f"{{{MODEL_NAMESPACE}}}y"
+ z_name = f"{{{MODEL_NAMESPACE}}}z"
+
+ for vertex in vertices: # Create the elements.
+ vertex_element = xml.etree.ElementTree.SubElement(vertices_element, vertex_name)
+ vertex_element.attrib[x_name] = self.format_number(vertex.co[0], self.coordinate_precision)
+ vertex_element.attrib[y_name] = self.format_number(vertex.co[1], self.coordinate_precision)
+ vertex_element.attrib[z_name] = self.format_number(vertex.co[2], self.coordinate_precision)
+
+ def write_triangles(self, mesh_element, triangles, object_material_list_index, material_slots):
+ """
+ Writes a list of triangles into the specified mesh element.
+
+ This then becomes a resource that can be used in a build.
+ :param mesh_element: The element of the 3MF document.
+ :param triangles: A list of triangles. Each list is a list of indices to the list of vertices.
+ :param object_material_list_index: The index of the material that the object was written with to which these
+ triangles belong. If the triangle has a different index, we need to write the index with the triangle.
+ :param material_slots: List of materials belonging to the object for which we write triangles. These are
+ necessary to interpret the material indices stored in the MeshLoopTriangles.
+ """
+ triangles_element = xml.etree.ElementTree.SubElement(mesh_element, f"{{{MODEL_NAMESPACE}}}triangles")
+
+ # Precompute some names for better performance.
+ triangle_name = f"{{{MODEL_NAMESPACE}}}triangle"
+ v1_name = f"{{{MODEL_NAMESPACE}}}v1"
+ v2_name = f"{{{MODEL_NAMESPACE}}}v2"
+ v3_name = f"{{{MODEL_NAMESPACE}}}v3"
+ p1_name = f"{{{MODEL_NAMESPACE}}}p1"
+
+ for triangle in triangles:
+ triangle_element = xml.etree.ElementTree.SubElement(triangles_element, triangle_name)
+ triangle_element.attrib[v1_name] = str(triangle.vertices[0])
+ triangle_element.attrib[v2_name] = str(triangle.vertices[1])
+ triangle_element.attrib[v3_name] = str(triangle.vertices[2])
+
+ if triangle.material_index < len(material_slots):
+ # Convert to index in our global list.
+ material_index = self.material_name_to_index[material_slots[triangle.material_index].material.name]
+ if material_index != object_material_list_index:
+ # Not equal to the index that our parent object was written with, so we must override it here.
+ triangle_element.attrib[p1_name] = str(material_index)
+
+ def format_number(self, number, decimals):
+ """
+ Properly formats a floating point number to a certain precision.
+
+ This format will never use scientific notation (no 3.14e-5 nonsense) and will have a fixed limit to the number
+ of decimals. It will not have a limit to the length of the integer part. Any trailing zeros are stripped.
+ :param number: A floating point number to format.
+ :param decimals: The maximum number of places after the radix to write.
+ :return: A string representing that number.
+ """
+ formatted = ("{:." + str(decimals) + "f}").format(number).rstrip("0").rstrip(".")
+ if formatted == "":
+ return "0"
+ return formatted
diff --git a/io_mesh_3mf/import_3mf.py b/io_mesh_3mf/import_3mf.py
new file mode 100644
index 000000000..e455427b1
--- /dev/null
+++ b/io_mesh_3mf/import_3mf.py
@@ -0,0 +1,744 @@
+# Blender add-on to import and export 3MF files.
+# Copyright (C) 2020 Ghostkeeper
+# This add-on is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
+# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option)
+# any later version.
+# This add-on is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# details.
+# You should have received a copy of the GNU Affero General Public License along with this plug-in. If not, see
+# .
+
+#
+
+import base64 # To encode MustPreserve files in the Blender scene.
+import bpy # The Blender API.
+import bpy.ops # To adjust the camera to fit models.
+import bpy.props # To define metadata properties for the operator.
+import bpy.types # This class is an operator in Blender.
+import bpy_extras.io_utils # Helper functions to import meshes more easily.
+import bpy_extras.node_shader_utils # Getting correct color spaces for materials.
+import logging # To debug and log progress.
+import collections # For namedtuple.
+import mathutils # For the transformation matrices.
+import os.path # To take file paths relative to the selected directory.
+import re # To find files in the archive based on the content types.
+import xml.etree.ElementTree # To parse the 3dmodel.model file.
+import zipfile # To read the 3MF files which are secretly zip archives.
+
+from .annotations import Annotations, ContentType, Relationship # To use annotations to decide on what to import.
+from .constants import *
+from .metadata import MetadataEntry, Metadata # To store and serialize metadata.
+from .unit_conversions import blender_to_metre, threemf_to_metre # To convert to Blender's units.
+
+log = logging.getLogger(__name__)
+
+ResourceObject = collections.namedtuple("ResourceObject", [
+ "vertices",
+ "triangles",
+ "materials",
+ "components",
+ "metadata"])
+Component = collections.namedtuple("Component", ["resource_object", "transformation"])
+ResourceMaterial = collections.namedtuple("ResourceMaterial", ["name", "color"])
+
+
+class Import3MF(bpy.types.Operator, bpy_extras.io_utils.ImportHelper):
+ """
+ Operator that imports a 3MF file into Blender.
+ """
+
+ # Metadata.
+ bl_idname = "import_mesh.threemf"
+ bl_label = "Import 3MF"
+ bl_description = "Load a 3MF scene"
+ bl_options = {'UNDO'}
+ filename_ext = ".3mf"
+
+ # Options for the user.
+ filter_glob: bpy.props.StringProperty(default="*.3mf", options={'HIDDEN'})
+ files: bpy.props.CollectionProperty(name="File Path", type=bpy.types.OperatorFileListElement)
+ directory: bpy.props.StringProperty(subtype='DIR_PATH')
+ global_scale: bpy.props.FloatProperty(name="Scale", default=1.0, soft_min=0.001, soft_max=1000.0, min=1e-6, max=1e6)
+
+ def __init__(self):
+ """
+ Initializes the importer with empty fields.
+ """
+ super().__init__()
+ self.resource_objects = {} # Dictionary mapping resource IDs to ResourceObjects.
+
+ # Dictionary mapping resource IDs to dictionaries mapping indexes to ResourceMaterial objects.
+ self.resource_materials = {}
+
+ # Which of our resource materials already exists in the Blender scene as a Blender material.
+ self.resource_to_material = {}
+
+ self.num_loaded = 0
+
+ def execute(self, context):
+ """
+ The main routine that reads out the 3MF file.
+
+ This function serves as a high-level overview of the steps involved to read the 3MF file.
+ :param context: The Blender context.
+ :return: A set of status flags to indicate whether the operation succeeded or not.
+ """
+ # Reset state.
+ self.resource_objects = {}
+ self.resource_materials = {}
+ self.resource_to_material = {}
+ self.num_loaded = 0
+ scene_metadata = Metadata()
+ # If there was already metadata in the scene, combine that with this file.
+ scene_metadata.retrieve(bpy.context.scene)
+ # Don't load the title from the old scene. If there is a title in the imported 3MF, use that.
+ # Else, we'll not override the scene title and it gets retained.
+ del scene_metadata["Title"]
+ annotations = Annotations()
+ annotations.retrieve() # If there were already annotations in the scene, combine that with this file.
+
+ # Preparation of the input parameters.
+ paths = [os.path.join(self.directory, name.name) for name in self.files]
+ if not paths:
+ paths.append(self.filepath)
+
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT') # Switch to object mode to view the new file.
+ if bpy.ops.object.select_all.poll():
+ bpy.ops.object.select_all(action='DESELECT') # Deselect other files.
+
+ for path in paths:
+ files_by_content_type = self.read_archive(path) # Get the files from the archive.
+
+ # File metadata.
+ for rels_file in files_by_content_type.get(RELS_MIMETYPE, []):
+ annotations.add_rels(rels_file)
+ annotations.add_content_types(files_by_content_type)
+ self.must_preserve(files_by_content_type, annotations)
+
+ # Read the model data.
+ for model_file in files_by_content_type.get(MODEL_MIMETYPE, []):
+ try:
+ document = xml.etree.ElementTree.ElementTree(file=model_file)
+ except xml.etree.ElementTree.ParseError as e:
+ log.error(f"3MF document in {path} is malformed: {str(e)}")
+ continue
+ if document is None:
+ # This file is corrupt or we can't read it. There is no error code to communicate this to Blender
+ # though.
+ continue # Leave the scene empty / skip this file.
+ root = document.getroot()
+ if not self.is_supported(root.attrib.get("requiredextensions", "")):
+ log.warning(f"3MF document in {path} requires unknown extensions.")
+ # Still continue processing even though the spec says not to. Our aim is to retrieve whatever
+ # information we can.
+
+ scale_unit = self.unit_scale(context, root)
+ self.resource_objects = {}
+ self.resource_materials = {}
+ scene_metadata = self.read_metadata(root, scene_metadata)
+ self.read_materials(root)
+ self.read_objects(root)
+ self.build_items(root, scale_unit)
+
+ scene_metadata.store(bpy.context.scene)
+ annotations.store()
+
+ # Zoom the camera to view the imported objects.
+ for area in bpy.context.screen.areas:
+ if area.type == 'VIEW_3D':
+ for region in area.regions:
+ if region.type == 'WINDOW':
+ override = {'area': area, 'region': region, 'edit_object': bpy.context.edit_object}
+ bpy.ops.view3d.view_selected(override)
+
+ log.info(f"Imported {self.num_loaded} objects from 3MF files.")
+
+ return {'FINISHED'}
+
+ # The rest of the functions are in order of when they are called.
+
+ def read_archive(self, path):
+ """
+ Creates file streams from all the files in the archive.
+
+ The results are sorted by their content types. Consumers of this data can pick the content types that they know
+ from the file and process those.
+ :param path: The path to the archive to read.
+ :return: A dictionary with all of the resources in the archive by content type. The keys in this dictionary are
+ the different content types available in the file. The values in this dictionary are lists of input streams
+ referring to files in the archive.
+ """
+ result = {}
+ try:
+ archive = zipfile.ZipFile(path)
+ content_types = self.read_content_types(archive)
+ mime_types = self.assign_content_types(archive, content_types)
+ for path, mime_type in mime_types.items():
+ if mime_type not in result:
+ result[mime_type] = []
+ # Zipfile can open an infinite number of streams at the same time. Don't worry about it.
+ result[mime_type].append(archive.open(path))
+ except (zipfile.BadZipFile, EnvironmentError) as e:
+ # File is corrupt, or the OS prevents us from reading it (doesn't exist, no permissions, etc.)
+ log.error(f"Unable to read archive: {e}")
+ return result
+ return result
+
+ def read_content_types(self, archive):
+ """
+ Read the content types from a 3MF archive.
+
+ The output of this reading is a list of MIME types that are each mapped to a regular expression that matches on
+ the file paths within the archive that could contain this content type. This encodes both types of descriptors
+ for the content types that can occur in the content types document: Extensions and full paths.
+
+ The output is ordered in priority. Matches that should be evaluated first will be put in the front of the output
+ list.
+ :param archive: The 3MF archive to read the contents from.
+ :return: A list of tuples, in order of importance, where the first element describes a regex of paths that
+ match, and the second element is the MIME type string of the content type.
+ """
+ namespaces = {"ct": "http://schemas.openxmlformats.org/package/2006/content-types"}
+ result = []
+
+ try:
+ with archive.open(CONTENT_TYPES_LOCATION) as f:
+ try:
+ root = xml.etree.ElementTree.ElementTree(file=f)
+ except xml.etree.ElementTree.ParseError as e:
+ log.warning(
+ f"{CONTENT_TYPES_LOCATION} has malformed XML"
+ f"(position {e.position[0]}:{e.position[1]}).")
+ root = None
+
+ if root is not None:
+ # Overrides are more important than defaults, so put those in front.
+ for override_node in root.iterfind("ct:Override", namespaces):
+ if "PartName" not in override_node.attrib or "ContentType" not in override_node.attrib:
+ log.warning("[Content_Types].xml malformed: Override node without path or MIME type.")
+ continue # Ignore the broken one.
+ match_regex = re.compile(re.escape(override_node.attrib["PartName"]))
+ result.append((match_regex, override_node.attrib["ContentType"]))
+
+ for default_node in root.iterfind("ct:Default", namespaces):
+ if "Extension" not in default_node.attrib or "ContentType" not in default_node.attrib:
+ log.warning("[Content_Types].xml malformed: Default node without extension or MIME type.")
+ continue # Ignore the broken one.
+ match_regex = re.compile(r".*\." + re.escape(default_node.attrib["Extension"]))
+ result.append((match_regex, default_node.attrib["ContentType"]))
+ except KeyError: # ZipFile reports that the content types file doesn't exist.
+ log.warning(f"{CONTENT_TYPES_LOCATION} file missing!")
+
+ # This parser should be robust to slightly broken files and retrieve what we can.
+ # In case the document is broken or missing, here we'll append the default ones for 3MF.
+ # If the content types file was fine, this gets least priority so the actual data still wins.
+ result.append((re.compile(r".*\.rels"), RELS_MIMETYPE))
+ result.append((re.compile(r".*\.model"), MODEL_MIMETYPE))
+
+ return result
+
+ def assign_content_types(self, archive, content_types):
+ """
+ Assign a MIME type to each file in the archive.
+
+ The MIME types are obtained through the content types file from the archive. This content types file itself is
+ not in the result though.
+ :param archive: A 3MF archive with files to assign content types to.
+ :param content_types: The content types for files in that archive, in order of priority.
+ :return: A dictionary mapping all file paths in the archive to a content types. If the content type for a file
+ is unknown, the content type will be an empty string.
+ """
+ result = {}
+ for file_info in archive.filelist:
+ file_path = file_info.filename
+ if file_path == CONTENT_TYPES_LOCATION: # Don't index this one.
+ continue
+ for pattern, content_type in content_types: # Process in the correct order!
+ if pattern.fullmatch(file_path):
+ result[file_path] = content_type
+ break
+ else: # None of the patterns matched.
+ result[file_path] = ""
+
+ return result
+
+ def must_preserve(self, files_by_content_type, annotations):
+ """
+ Preserves files that are marked with the 'MustPreserve' relationship and PrintTickets.
+
+ These files are saved in the Blender context as text files in a hidden folder. If the preserved files are in
+ conflict with previously loaded 3MF archives (same file path, different content) then they will not be
+ preserved.
+
+ Archived files are stored in Base85 encoding to allow storing arbitrary files, even binary files. This sadly
+ means that the file size will increase by about 25%, and that the files are not human-readable any more when
+ opened in Blender, even if they were originally human-readable.
+ :param files_by_content_type: The files in this 3MF archive, by content type. They must be provided by content
+ type because that is how the ``read_archive`` function stores them, which is not ideal. But this function will
+ sort that out.
+ :param annotations: Collection of annotations gathered so far.
+ """
+ preserved_files = set() # Find all files which must be preserved according to the annotations.
+ for target, its_annotations in annotations.annotations.items():
+ for annotation in its_annotations:
+ if type(annotation) == Relationship:
+ if annotation.namespace in {
+ "http://schemas.openxmlformats.org/package/2006/relationships/mustpreserve",
+ "http://schemas.microsoft.com/3dmanufacturing/2013/01/printticket"
+ }:
+ preserved_files.add(target)
+ elif type(annotation) == ContentType:
+ if annotation.mime_type == "application/vnd.ms-printing.printticket+xml":
+ preserved_files.add(target)
+
+ for files in files_by_content_type.values():
+ for file in files:
+ if file.name in preserved_files:
+ filename = ".3mf_preserved/" + file.name
+ if filename in bpy.data.texts:
+ if bpy.data.texts[filename].as_string() == conflicting_mustpreserve_contents:
+ # This file was previously already in conflict. The new file will always be in conflict with
+ # one of the previous files.
+ continue
+ # Encode as Base85 so that the file can be saved in Blender's Text objects.
+ file_contents = base64.b85encode(file.read()).decode('UTF-8')
+ if filename in bpy.data.texts:
+ if bpy.data.texts[filename].as_string() == file_contents:
+ # File contents are EXACTLY the same, so the file is not in conflict.
+ continue # But we also don't need to re-add the same file then.
+ else: # Same file exists with different contents, so they are in conflict.
+ bpy.data.texts[filename].clear()
+ bpy.data.texts[filename].write(conflicting_mustpreserve_contents)
+ continue
+ else: # File doesn't exist yet.
+ handle = bpy.data.texts.new(filename)
+ handle.write(file_contents)
+
+ def is_supported(self, required_extensions):
+ """
+ Determines if a document is supported by this add-on.
+ :param required_extensions: The value of the `requiredextensions` attribute of the root node of the XML
+ document.
+ :return: `True` if the document is supported, or `False` if it's not.
+ """
+ extensions = required_extensions.split(" ")
+ extensions = set(filter(lambda x: x != "", extensions))
+ return extensions <= SUPPORTED_EXTENSIONS
+
+ def unit_scale(self, context, root):
+ """
+ Get the scaling factor we need to use for this document, according to its unit.
+ :param context: The Blender context.
+ :param root: An ElementTree root element containing the entire 3MF file.
+ :return: Floating point value that we need to scale this model by. A small number (<1) means that we need to
+ make the coordinates in Blender smaller than the coordinates in the file. A large number (>1) means we need to
+ make the coordinates in Blender larger than the coordinates in the file.
+ """
+ scale = self.global_scale
+
+ if context.scene.unit_settings.scale_length != 0:
+ scale /= context.scene.unit_settings.scale_length # Apply the global scale of the units in Blender.
+
+ threemf_unit = root.attrib.get("unit", MODEL_DEFAULT_UNIT)
+ blender_unit = context.scene.unit_settings.length_unit
+ scale *= threemf_to_metre[threemf_unit] # Convert 3MF units to metre.
+ scale /= blender_to_metre[blender_unit] # Convert metre to Blender's units.
+
+ return scale
+
+ def read_metadata(self, node, original_metadata=None):
+ """
+ Reads the metadata tags from a metadata group.
+ :param node: A node in the 3MF document that contains tags. This can be either a root node, or a
+ node.
+ :param original_metadata: If there was already metadata for this context from other documents, you can provide
+ that metadata here. The metadata of those documents will be combined then.
+ :return: A `Metadata` object.
+ """
+ if original_metadata is not None:
+ metadata = original_metadata
+ else:
+ metadata = Metadata() # Create a new Metadata object.
+
+ for metadata_node in node.iterfind("./3mf:metadata", MODEL_NAMESPACES):
+ if "name" not in metadata_node.attrib:
+ log.warning("Metadata entry without name is discarded.")
+ continue # This attribute has no name, so there's no key by which I can save the metadata.
+ name = metadata_node.attrib["name"]
+ preserve_str = metadata_node.attrib.get("preserve", "0")
+ # We don't use this ourselves since we always preserve, but the preserve attribute itself will also be
+ # preserved.
+ preserve = preserve_str != "0" and preserve_str.lower() != "false"
+ datatype = metadata_node.attrib.get("type", "")
+ value = metadata_node.text
+
+ # Always store all metadata so that they are preserved.
+ metadata[name] = MetadataEntry(name=name, preserve=preserve, datatype=datatype, value=value)
+
+ return metadata
+
+ def read_materials(self, root):
+ """
+ Read out all of the material resources from the 3MF document.
+
+ The materials will be stored in `self.resource_materials` until it gets used to build the items.
+ :param root: The root of an XML document that may contain materials.
+ """
+ for basematerials_item in root.iterfind("./3mf:resources/3mf:basematerials", MODEL_NAMESPACES):
+ try:
+ material_id = basematerials_item.attrib["id"]
+ except KeyError:
+ log.warning("Encountered a basematerials item without resource ID.")
+ continue # Need to have an ID, or no item can reference to the materials. Skip this one.
+ if material_id in self.resource_materials:
+ log.warning(f"Duplicate material ID: {material_id}")
+ continue
+
+ # Use a dictionary mapping indices to resources, because some indices may be skipped due to being invalid.
+ self.resource_materials[material_id] = {}
+ index = 0
+
+ # "Base" must be the stupidest name for a material resource. Oh well.
+ for base_item in basematerials_item.iterfind("./3mf:base", MODEL_NAMESPACES):
+ name = base_item.attrib.get("name", "3MF Material")
+ color = base_item.attrib.get("displaycolor")
+ if color is not None:
+ # Parse the color. It's a hexadecimal number indicating RGB or RGBA.
+ color = color.lstrip("#") # Should start with a #. We'll be lenient if it's not.
+ try:
+ color_int = int(color, 16)
+ # Separate out up to four bytes from this int, from right to left.
+ b1 = (color_int & 0x000000FF) / 255
+ b2 = ((color_int & 0x0000FF00) >> 8) / 255
+ b3 = ((color_int & 0x00FF0000) >> 16) / 255
+ b4 = ((color_int & 0xFF000000) >> 24) / 255
+ if len(color) == 6: # RGB format.
+ color = (b3, b2, b1, 1.0) # b1, b2 and b3 are B, G, R respectively. b4 is always 0.
+ else: # RGBA format, or invalid.
+ color = (b4, b3, b2, b1) # b1, b2, b3 and b4 are A, B, G, R respectively.
+ except ValueError:
+ log.warning(f"Invalid color for material {name} of resource {material_id}: {color}")
+ color = None # Don't add a color for this material.
+
+ # Input is valid. Create a resource.
+ self.resource_materials[material_id][index] = ResourceMaterial(name=name, color=color)
+ index += 1
+
+ if len(self.resource_materials[material_id]) == 0:
+ del self.resource_materials[material_id] # Don't leave empty material sets hanging.
+
+ def read_objects(self, root):
+ """
+ Reads all repeatable build objects from the resources of an XML root node.
+
+ This stores them in the resource_objects field.
+ :param root: The root node of a 3dmodel.model XML file.
+ """
+ for object_node in root.iterfind("./3mf:resources/3mf:object", MODEL_NAMESPACES):
+ try:
+ objectid = object_node.attrib["id"]
+ except KeyError:
+ log.warning("Object resource without ID!")
+ continue # ID is required, otherwise the build can't refer to it.
+
+ pid = object_node.attrib.get("pid") # Material ID.
+ pindex = object_node.attrib.get("pindex") # Index within a collection of materials.
+ material = None
+ if pid is not None and pindex is not None:
+ try:
+ index = int(pindex)
+ material = self.resource_materials[pid][index]
+ except KeyError:
+ log.warning(
+ f"Object with ID {objectid} refers to material collection {pid} with index {pindex}"
+ f"which doesn't exist.")
+ except ValueError:
+ log.warning(f"Object with ID {objectid} specifies material index {pindex}, which is not integer.")
+
+ vertices = self.read_vertices(object_node)
+ triangles, materials = self.read_triangles(object_node, material, pid)
+ components = self.read_components(object_node)
+ metadata = Metadata()
+ for metadata_node in object_node.iterfind("./3mf:metadatagroup", MODEL_NAMESPACES):
+ metadata = self.read_metadata(metadata_node, metadata)
+ if "partnumber" in object_node.attrib:
+ # Blender has no way to ensure that custom properties get preserved if a mesh is split up, but for most
+ # operations this is retained properly.
+ metadata["3mf:partnumber"] = MetadataEntry(
+ name="3mf:partnumber",
+ preserve=True,
+ datatype="xs:string",
+ value=object_node.attrib["partnumber"])
+ metadata["3mf:object_type"] = MetadataEntry(
+ name="3mf:object_type",
+ preserve=True,
+ datatype="xs:string",
+ value=object_node.attrib.get("type", "model"))
+
+ self.resource_objects[objectid] = ResourceObject(
+ vertices=vertices,
+ triangles=triangles,
+ materials=materials,
+ components=components,
+ metadata=metadata)
+
+ def read_vertices(self, object_node):
+ """
+ Reads out the vertices from an XML node of an object.
+
+ If any vertex is corrupt, like with a coordinate missing or not proper floats, then the 0 coordinate will be
+ used. This is to prevent messing up the list of indices.
+ :param object_node: An