Browse Source

Merge pull request #5514 from repsac/io_three

New Blender exporter
Mr.doob 10 years ago
parent
commit
4345597a6e
100 changed files with 8316 additions and 3789 deletions
  1. 2 0
      utils/exporters/blender/.gitignore
  2. 0 462
      utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/__init__.py
  3. 0 2689
      utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/export_threejs.py
  4. 0 633
      utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/import_threejs.py
  5. 17 5
      utils/exporters/blender/README.md
  6. 643 0
      utils/exporters/blender/addons/io_three/__init__.py
  7. 311 0
      utils/exporters/blender/addons/io_three/constants.py
  8. 86 0
      utils/exporters/blender/addons/io_three/exporter/__init__.py
  9. 216 0
      utils/exporters/blender/addons/io_three/exporter/_json.py
  10. 31 0
      utils/exporters/blender/addons/io_three/exporter/api/__init__.py
  11. 66 0
      utils/exporters/blender/addons/io_three/exporter/api/camera.py
  12. 29 0
      utils/exporters/blender/addons/io_three/exporter/api/constants.py
  13. 28 0
      utils/exporters/blender/addons/io_three/exporter/api/image.py
  14. 41 0
      utils/exporters/blender/addons/io_three/exporter/api/light.py
  15. 221 0
      utils/exporters/blender/addons/io_three/exporter/api/material.py
  16. 900 0
      utils/exporters/blender/addons/io_three/exporter/api/mesh.py
  17. 407 0
      utils/exporters/blender/addons/io_three/exporter/api/object.py
  18. 102 0
      utils/exporters/blender/addons/io_three/exporter/api/texture.py
  19. 100 0
      utils/exporters/blender/addons/io_three/exporter/base_classes.py
  20. 8 0
      utils/exporters/blender/addons/io_three/exporter/exceptions.py
  21. 353 0
      utils/exporters/blender/addons/io_three/exporter/geometry.py
  22. 25 0
      utils/exporters/blender/addons/io_three/exporter/image.py
  23. 67 0
      utils/exporters/blender/addons/io_three/exporter/io.py
  24. 58 0
      utils/exporters/blender/addons/io_three/exporter/logger.py
  25. 83 0
      utils/exporters/blender/addons/io_three/exporter/material.py
  26. 116 0
      utils/exporters/blender/addons/io_three/exporter/object.py
  27. 196 0
      utils/exporters/blender/addons/io_three/exporter/scene.py
  28. 32 0
      utils/exporters/blender/addons/io_three/exporter/texture.py
  29. 64 0
      utils/exporters/blender/addons/io_three/exporter/utilities.py
  30. 2 0
      utils/exporters/blender/modules/README.md
  31. 54 0
      utils/exporters/blender/modules/msgpack/__init__.py
  32. BIN
      utils/exporters/blender/modules/msgpack/__pycache__/__init__.cpython-34.pyc
  33. BIN
      utils/exporters/blender/modules/msgpack/__pycache__/_version.cpython-34.pyc
  34. BIN
      utils/exporters/blender/modules/msgpack/__pycache__/exceptions.cpython-34.pyc
  35. BIN
      utils/exporters/blender/modules/msgpack/__pycache__/fallback.cpython-34.pyc
  36. 295 0
      utils/exporters/blender/modules/msgpack/_packer.pyx
  37. 426 0
      utils/exporters/blender/modules/msgpack/_unpacker.pyx
  38. 1 0
      utils/exporters/blender/modules/msgpack/_version.py
  39. 29 0
      utils/exporters/blender/modules/msgpack/exceptions.py
  40. 714 0
      utils/exporters/blender/modules/msgpack/fallback.py
  41. 103 0
      utils/exporters/blender/modules/msgpack/pack.h
  42. 785 0
      utils/exporters/blender/modules/msgpack/pack_template.h
  43. 194 0
      utils/exporters/blender/modules/msgpack/sysdep.h
  44. 263 0
      utils/exporters/blender/modules/msgpack/unpack.h
  45. 95 0
      utils/exporters/blender/modules/msgpack/unpack_define.h
  46. 475 0
      utils/exporters/blender/modules/msgpack/unpack_template.h
  47. 19 0
      utils/exporters/blender/tests/README.md
  48. BIN
      utils/exporters/blender/tests/blend/anim.blend
  49. BIN
      utils/exporters/blender/tests/blend/cubeA.blend
  50. BIN
      utils/exporters/blender/tests/blend/cubeB.blend
  51. BIN
      utils/exporters/blender/tests/blend/cubeC.blend
  52. BIN
      utils/exporters/blender/tests/blend/light_setup.blend
  53. BIN
      utils/exporters/blender/tests/blend/lightmap.blend
  54. BIN
      utils/exporters/blender/tests/blend/persp_camera.blend
  55. BIN
      utils/exporters/blender/tests/blend/planeA.blend
  56. BIN
      utils/exporters/blender/tests/blend/planeB.blend
  57. BIN
      utils/exporters/blender/tests/blend/scene_area_light.blend
  58. BIN
      utils/exporters/blender/tests/blend/scene_directional_light.blend
  59. BIN
      utils/exporters/blender/tests/blend/scene_hemi_light.blend
  60. BIN
      utils/exporters/blender/tests/blend/scene_instancing.blend
  61. BIN
      utils/exporters/blender/tests/blend/scene_maps.blend
  62. BIN
      utils/exporters/blender/tests/blend/scene_orthographic_camera.blend
  63. BIN
      utils/exporters/blender/tests/blend/scene_perspective_camera.blend
  64. BIN
      utils/exporters/blender/tests/blend/scene_point_light.blend
  65. BIN
      utils/exporters/blender/tests/blend/scene_spot_light.blend
  66. BIN
      utils/exporters/blender/tests/blend/textures/cloud.png
  67. BIN
      utils/exporters/blender/tests/blend/textures/lightmap.png
  68. BIN
      utils/exporters/blender/tests/blend/textures/normal.png
  69. BIN
      utils/exporters/blender/tests/blend/textures/uv_grid.jpg
  70. BIN
      utils/exporters/blender/tests/blend/three_point.blend
  71. BIN
      utils/exporters/blender/tests/blend/torusA.blend
  72. 13 0
      utils/exporters/blender/tests/scripts/css/style.css
  73. 37 0
      utils/exporters/blender/tests/scripts/exporter.py
  74. 249 0
      utils/exporters/blender/tests/scripts/js/review.js
  75. 127 0
      utils/exporters/blender/tests/scripts/review.py
  76. 29 0
      utils/exporters/blender/tests/scripts/setup_test_env.bash
  77. 8 0
      utils/exporters/blender/tests/scripts/test_buffer_geometry.bash
  78. 8 0
      utils/exporters/blender/tests/scripts/test_geometry.bash
  79. 9 0
      utils/exporters/blender/tests/scripts/test_geometry_animation.bash
  80. 8 0
      utils/exporters/blender/tests/scripts/test_geometry_bump_spec_maps.bash
  81. 8 0
      utils/exporters/blender/tests/scripts/test_geometry_diffuse_map.bash
  82. 8 0
      utils/exporters/blender/tests/scripts/test_geometry_lambert_material.bash
  83. 8 0
      utils/exporters/blender/tests/scripts/test_geometry_light_map.bash
  84. 8 0
      utils/exporters/blender/tests/scripts/test_geometry_mix_colors.bash
  85. 8 0
      utils/exporters/blender/tests/scripts/test_geometry_morph_targets.bash
  86. 9 0
      utils/exporters/blender/tests/scripts/test_geometry_normal_map.bash
  87. 8 0
      utils/exporters/blender/tests/scripts/test_geometry_normals.bash
  88. 8 0
      utils/exporters/blender/tests/scripts/test_geometry_phong_material.bash
  89. 8 0
      utils/exporters/blender/tests/scripts/test_geometry_vertex_colors.bash
  90. 8 0
      utils/exporters/blender/tests/scripts/test_geometry_wireframe.bash
  91. 9 0
      utils/exporters/blender/tests/scripts/test_scene_area_light.bash
  92. 9 0
      utils/exporters/blender/tests/scripts/test_scene_buffer_geometry.bash
  93. 9 0
      utils/exporters/blender/tests/scripts/test_scene_buffer_geometry_noembed.bash
  94. 9 0
      utils/exporters/blender/tests/scripts/test_scene_directional_light.bash
  95. 9 0
      utils/exporters/blender/tests/scripts/test_scene_hemi_light.bash
  96. 9 0
      utils/exporters/blender/tests/scripts/test_scene_instancing.bash
  97. 9 0
      utils/exporters/blender/tests/scripts/test_scene_maps.bash
  98. 9 0
      utils/exporters/blender/tests/scripts/test_scene_no_embed.bash
  99. 9 0
      utils/exporters/blender/tests/scripts/test_scene_orthographic.bash
  100. 9 0
      utils/exporters/blender/tests/scripts/test_scene_perspective.bash

+ 2 - 0
utils/exporters/blender/.gitignore

@@ -0,0 +1,2 @@
+tests/review
+__pycache__/

+ 0 - 462
utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/__init__.py

@@ -1,462 +0,0 @@
-# ##### BEGIN GPL LICENSE BLOCK #####
-#
-#  This program is free software; you can redistribute it and/or
-#  modify it under the terms of the GNU General Public License
-#  as published by the Free Software Foundation; either version 2
-#  of the License, or (at your option) any later version.
-#
-#  This program is distributed in the hope that it will be useful,
-#  but WITHOUT ANY WARRANTY; without even the implied warranty of
-#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-#  You should have received a copy of the GNU General Public License
-#  along with this program; if not, write to the Free Software Foundation,
-#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-#
-# ##### END GPL LICENSE BLOCK #####
-
-# ################################################################
-# Init
-# ################################################################
-
-
-bl_info = {
-    "name": "three.js format",
-    "author": "mrdoob, kikko, alteredq, remoe, pxf, n3tfr34k, crobi",
-    "version": (1, 5, 0),
-    "blender": (2, 7, 0),
-    "location": "File > Import-Export",
-    "description": "Import-Export three.js meshes",
-    "warning": "",
-    "wiki_url": "https://github.com/mrdoob/three.js/tree/master/utils/exporters/blender",
-    "tracker_url": "https://github.com/mrdoob/three.js/issues",
-    "category": "Import-Export"}
-
-# To support reload properly, try to access a package var,
-# if it's there, reload everything
-
-import bpy
-
-if "bpy" in locals():
-    import imp
-    if "export_threejs" in locals():
-        imp.reload(export_threejs)
-    if "import_threejs" in locals():
-        imp.reload(import_threejs)
-
-from bpy.props import *
-from bpy_extras.io_utils import ExportHelper, ImportHelper
-
-# ################################################################
-# Custom properties
-# ################################################################
-
-bpy.types.Object.THREE_castShadow = bpy.props.BoolProperty()
-bpy.types.Object.THREE_receiveShadow = bpy.props.BoolProperty()
-bpy.types.Object.THREE_doubleSided = bpy.props.BoolProperty()
-bpy.types.Object.THREE_exportGeometry = bpy.props.BoolProperty(default = True)
-bpy.types.Object.THREE_visible = bpy.props.BoolProperty(default = True)
-
-bpy.types.Material.THREE_useVertexColors = bpy.props.BoolProperty()
-bpy.types.Material.THREE_depthWrite = bpy.props.BoolProperty(default = True)
-bpy.types.Material.THREE_depthTest = bpy.props.BoolProperty(default = True)
-
-THREE_material_types = [("Basic", "Basic", "Basic"), ("Phong", "Phong", "Phong"), ("Lambert", "Lambert", "Lambert")]
-bpy.types.Material.THREE_materialType = EnumProperty(name = "Material type", description = "Material type", items = THREE_material_types, default = "Lambert")
-
-THREE_blending_types = [("NoBlending", "NoBlending", "NoBlending"), ("NormalBlending", "NormalBlending", "NormalBlending"),
-                        ("AdditiveBlending", "AdditiveBlending", "AdditiveBlending"), ("SubtractiveBlending", "SubtractiveBlending", "SubtractiveBlending"),
-                        ("MultiplyBlending", "MultiplyBlending", "MultiplyBlending"), ("AdditiveAlphaBlending", "AdditiveAlphaBlending", "AdditiveAlphaBlending")]
-bpy.types.Material.THREE_blendingType = EnumProperty(name = "Blending type", description = "Blending type", items = THREE_blending_types, default = "NormalBlending")
-
-class OBJECT_PT_hello( bpy.types.Panel ):
-
-    bl_label = "THREE"
-    bl_space_type = "PROPERTIES"
-    bl_region_type = "WINDOW"
-    bl_context = "object"
-
-    def draw(self, context):
-        layout = self.layout
-        obj = context.object
-
-        row = layout.row()
-        row.label(text="Selected object: " + obj.name )
-
-        row = layout.row()
-        row.prop( obj, "THREE_exportGeometry", text="Export geometry" )
-
-        row = layout.row()
-        row.prop( obj, "THREE_castShadow", text="Casts shadow" )
-
-        row = layout.row()
-        row.prop( obj, "THREE_receiveShadow", text="Receives shadow" )
-
-        row = layout.row()
-        row.prop( obj, "THREE_doubleSided", text="Double sided" )
-        
-        row = layout.row()
-        row.prop( obj, "THREE_visible", text="Visible" )
-
-class MATERIAL_PT_hello( bpy.types.Panel ):
-
-    bl_label = "THREE"
-    bl_space_type = "PROPERTIES"
-    bl_region_type = "WINDOW"
-    bl_context = "material"
-
-    def draw(self, context):
-        layout = self.layout
-        mat = context.material
-
-        row = layout.row()
-        row.label(text="Selected material: " + mat.name )
-
-        row = layout.row()
-        row.prop( mat, "THREE_materialType", text="Material type" )
-
-        row = layout.row()
-        row.prop( mat, "THREE_blendingType", text="Blending type" )
-
-        row = layout.row()
-        row.prop( mat, "THREE_useVertexColors", text="Use vertex colors" )
-
-        row = layout.row()
-        row.prop( mat, "THREE_depthWrite", text="Enable depth writing" )
-
-        row = layout.row()
-        row.prop( mat, "THREE_depthTest", text="Enable depth testing" )
-
-
-# ################################################################
-# Importer
-# ################################################################
-
-class ImportTHREEJS(bpy.types.Operator, ImportHelper):
-    '''Load a Three.js ASCII JSON model'''
-
-    bl_idname = "import.threejs"
-    bl_label = "Import Three.js"
-
-    filename_ext = ".json"
-    filter_glob = StringProperty(default="*.json", options={'HIDDEN'})
-
-    option_flip_yz = BoolProperty(name="Flip YZ", description="Flip YZ", default=True)
-    recalculate_normals = BoolProperty(name="Recalculate normals", description="Recalculate vertex normals", default=True)
-    option_worker = BoolProperty(name="Worker", description="Old format using workers", default=False)
-
-    def execute(self, context):
-        import io_mesh_threejs.import_threejs
-        return io_mesh_threejs.import_threejs.load(self, context, **self.properties)
-
-
-    def draw(self, context):
-        layout = self.layout
-
-        row = layout.row()
-        row.prop(self.properties, "option_flip_yz")
-
-        row = layout.row()
-        row.prop(self.properties, "recalculate_normals")
-
-        row = layout.row()
-        row.prop(self.properties, "option_worker")
-
-
-# ################################################################
-# Exporter - settings
-# ################################################################
-
-SETTINGS_FILE_EXPORT = "threejs_settings_export.js"
-
-import os
-import json
-
-def file_exists(filename):
-    """Return true if file exists and accessible for reading.
-
-    Should be safer than just testing for existence due to links and
-    permissions magic on Unix filesystems.
-
-    @rtype: boolean
-    """
-
-    try:
-        f = open(filename, 'r')
-        f.close()
-        return True
-    except IOError:
-        return False
-
-def get_settings_fullpath():
-    return os.path.join(bpy.app.tempdir, SETTINGS_FILE_EXPORT)
-
-def save_settings_export(properties):
-
-    settings = {
-    "option_export_scene" : properties.option_export_scene,
-    "option_embed_meshes" : properties.option_embed_meshes,
-    "option_url_base_html" : properties.option_url_base_html,
-    "option_copy_textures" : properties.option_copy_textures,
-
-    "option_lights" : properties.option_lights,
-    "option_cameras" : properties.option_cameras,
-
-    "option_animation_morph" : properties.option_animation_morph,
-    "option_animation_skeletal" : properties.option_animation_skeletal,
-    "option_frame_index_as_time" : properties.option_frame_index_as_time,
-
-    "option_frame_step" : properties.option_frame_step,
-    "option_all_meshes" : properties.option_all_meshes,
-
-    "option_flip_yz"      : properties.option_flip_yz,
-
-    "option_materials"       : properties.option_materials,
-    "option_normals"         : properties.option_normals,
-    "option_colors"          : properties.option_colors,
-    "option_uv_coords"       : properties.option_uv_coords,
-    "option_faces"           : properties.option_faces,
-    "option_vertices"        : properties.option_vertices,
-
-    "option_skinning"        : properties.option_skinning,
-    "option_bones"           : properties.option_bones,
-
-    "option_vertices_truncate" : properties.option_vertices_truncate,
-    "option_scale"        : properties.option_scale,
-
-    "align_model"         : properties.align_model
-    }
-
-    fname = get_settings_fullpath()
-    f = open(fname, "w")
-    json.dump(settings, f)
-
-def restore_settings_export(properties):
-
-    settings = {}
-
-    fname = get_settings_fullpath()
-    if file_exists(fname):
-        f = open(fname, "r")
-        settings = json.load(f)
-
-    properties.option_vertices = settings.get("option_vertices", True)
-    properties.option_vertices_truncate = settings.get("option_vertices_truncate", False)
-    properties.option_faces = settings.get("option_faces", True)
-    properties.option_normals = settings.get("option_normals", True)
-
-    properties.option_colors = settings.get("option_colors", True)
-    properties.option_uv_coords = settings.get("option_uv_coords", True)
-    properties.option_materials = settings.get("option_materials", True)
-
-    properties.option_skinning = settings.get("option_skinning", True)
-    properties.option_bones = settings.get("option_bones", True)
-
-    properties.align_model = settings.get("align_model", "None")
-
-    properties.option_scale = settings.get("option_scale", 1.0)
-    properties.option_flip_yz = settings.get("option_flip_yz", True)
-
-    properties.option_export_scene = settings.get("option_export_scene", False)
-    properties.option_embed_meshes = settings.get("option_embed_meshes", True)
-    properties.option_url_base_html = settings.get("option_url_base_html", False)
-    properties.option_copy_textures = settings.get("option_copy_textures", False)
-
-    properties.option_lights = settings.get("option_lights", False)
-    properties.option_cameras = settings.get("option_cameras", False)
-
-    properties.option_animation_morph = settings.get("option_animation_morph", False)
-    properties.option_animation_skeletal = settings.get("option_animation_skeletal", False)
-    properties.option_frame_index_as_time = settings.get("option_frame_index_as_time", False)
-
-    properties.option_frame_step = settings.get("option_frame_step", 1)
-    properties.option_all_meshes = settings.get("option_all_meshes", True)
-
-# ################################################################
-# Exporter
-# ################################################################
-
-class ExportTHREEJS(bpy.types.Operator, ExportHelper):
-    '''Export selected object / scene for Three.js (ASCII JSON format).'''
-
-    bl_idname = "export.threejs"
-    bl_label = "Export Three.js"
-
-    filename_ext = ".json"
-
-    option_vertices = BoolProperty(name = "Vertices", description = "Export vertices", default = True)
-    option_vertices_deltas = BoolProperty(name = "Deltas", description = "Delta vertices", default = False)
-    option_vertices_truncate = BoolProperty(name = "Truncate", description = "Truncate vertices", default = False)
-
-    option_faces = BoolProperty(name = "Faces", description = "Export faces", default = True)
-    option_faces_deltas = BoolProperty(name = "Deltas", description = "Delta faces", default = False)
-
-    option_normals = BoolProperty(name = "Normals", description = "Export normals", default = True)
-
-    option_colors = BoolProperty(name = "Colors", description = "Export vertex colors", default = True)
-    option_uv_coords = BoolProperty(name = "UVs", description = "Export texture coordinates", default = True)
-    option_materials = BoolProperty(name = "Materials", description = "Export materials", default = True)
-
-    option_skinning = BoolProperty(name = "Skinning", description = "Export skin data", default = True)
-    option_bones = BoolProperty(name = "Bones", description = "Export bones", default = True)
-
-    align_types = [("None","None","None"), ("Center","Center","Center"), ("Bottom","Bottom","Bottom"), ("Top","Top","Top")]
-    align_model = EnumProperty(name = "Align model", description = "Align model", items = align_types, default = "None")
-
-    option_scale = FloatProperty(name = "Scale", description = "Scale vertices", min = 0.01, max = 1000.0, soft_min = 0.01, soft_max = 1000.0, default = 1.0)
-    option_flip_yz = BoolProperty(name = "Flip YZ", description = "Flip YZ", default = True)
-
-    option_export_scene = BoolProperty(name = "Scene", description = "Export scene", default = False)
-    option_embed_meshes = BoolProperty(name = "Embed meshes", description = "Embed meshes", default = True)
-    option_copy_textures = BoolProperty(name = "Copy textures", description = "Copy textures", default = False)
-    option_url_base_html = BoolProperty(name = "HTML as url base", description = "Use HTML as url base ", default = False)
-
-    option_lights = BoolProperty(name = "Lights", description = "Export default scene lights", default = False)
-    option_cameras = BoolProperty(name = "Cameras", description = "Export default scene cameras", default = False)
-
-    option_animation_morph = BoolProperty(name = "Morph animation", description = "Export animation (morphs)", default = False)
-    option_animation_skeletal = BoolProperty(name = "Skeletal animation", description = "Export animation (skeletal)", default = False)
-    option_frame_index_as_time = BoolProperty(name = "Frame index as time", description = "Use (original) frame index as frame time", default = False)
-
-    option_frame_step = IntProperty(name = "Frame step", description = "Animation frame step", min = 1, max = 1000, soft_min = 1, soft_max = 1000, default = 1)
-    option_all_meshes = BoolProperty(name = "All meshes", description = "All meshes (merged)", default = True)
-
-    def invoke(self, context, event):
-        restore_settings_export(self.properties)
-        return ExportHelper.invoke(self, context, event)
-
-    @classmethod
-    def poll(cls, context):
-        return context.active_object != None
-
-    def execute(self, context):
-        print("Selected: " + context.active_object.name)
-
-        if not self.properties.filepath:
-            raise Exception("filename not set")
-
-        save_settings_export(self.properties)
-
-        filepath = self.filepath
-
-        import io_mesh_threejs.export_threejs
-        return io_mesh_threejs.export_threejs.save(self, context, **self.properties)
-
-    def draw(self, context):
-        layout = self.layout
-
-        row = layout.row()
-        row.label(text="Geometry:")
-
-        row = layout.row()
-        row.prop(self.properties, "option_vertices")
-        # row = layout.row()
-        # row.enabled = self.properties.option_vertices
-        # row.prop(self.properties, "option_vertices_deltas")
-        row.prop(self.properties, "option_vertices_truncate")
-        layout.separator()
-
-        row = layout.row()
-        row.prop(self.properties, "option_faces")
-        row = layout.row()
-        row.enabled = self.properties.option_faces
-        # row.prop(self.properties, "option_faces_deltas")
-        layout.separator()
-
-        row = layout.row()
-        row.prop(self.properties, "option_normals")
-        layout.separator()
-
-        row = layout.row()
-        row.prop(self.properties, "option_bones")
-        row.prop(self.properties, "option_skinning")
-        layout.separator()
-
-        row = layout.row()
-        row.label(text="Materials:")
-
-        row = layout.row()
-        row.prop(self.properties, "option_uv_coords")
-        row.prop(self.properties, "option_colors")
-        row = layout.row()
-        row.prop(self.properties, "option_materials")
-        layout.separator()
-
-        row = layout.row()
-        row.label(text="Settings:")
-
-        row = layout.row()
-        row.prop(self.properties, "align_model")
-        row = layout.row()
-        row.prop(self.properties, "option_flip_yz")
-        row.prop(self.properties, "option_scale")
-        layout.separator()
-
-        row = layout.row()
-        row.label(text="--------- Experimental ---------")
-        layout.separator()
-
-        row = layout.row()
-        row.label(text="Scene:")
-
-        row = layout.row()
-        row.prop(self.properties, "option_export_scene")
-        row.prop(self.properties, "option_embed_meshes")
-
-        row = layout.row()
-        row.prop(self.properties, "option_lights")
-        row.prop(self.properties, "option_cameras")
-        layout.separator()
-
-        row = layout.row()
-        row.label(text="Animation:")
-
-        row = layout.row()
-        row.prop(self.properties, "option_animation_morph")
-        row = layout.row()
-        row.prop(self.properties, "option_animation_skeletal")
-        row = layout.row()
-        row.prop(self.properties, "option_frame_index_as_time")
-        row = layout.row()
-        row.prop(self.properties, "option_frame_step")
-        layout.separator()
-
-        row = layout.row()
-        row.label(text="Settings:")
-
-        row = layout.row()
-        row.prop(self.properties, "option_all_meshes")
-
-        row = layout.row()
-        row.prop(self.properties, "option_copy_textures")
-
-        row = layout.row()
-        row.prop(self.properties, "option_url_base_html")
-
-        layout.separator()
-
-
-# ################################################################
-# Common
-# ################################################################
-
-def menu_func_export(self, context):
-    default_path = bpy.data.filepath.replace(".blend", ".json")
-    self.layout.operator(ExportTHREEJS.bl_idname, text="Three.js (.json)").filepath = default_path
-
-def menu_func_import(self, context):
-    self.layout.operator(ImportTHREEJS.bl_idname, text="Three.js (.json)")
-
-def register():
-    bpy.utils.register_module(__name__)
-    bpy.types.INFO_MT_file_export.append(menu_func_export)
-    bpy.types.INFO_MT_file_import.append(menu_func_import)
-
-def unregister():
-    bpy.utils.unregister_module(__name__)
-    bpy.types.INFO_MT_file_export.remove(menu_func_export)
-    bpy.types.INFO_MT_file_import.remove(menu_func_import)
-
-if __name__ == "__main__":
-    register()

+ 0 - 2689
utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/export_threejs.py

@@ -1,2689 +0,0 @@
-# ##### BEGIN GPL LICENSE BLOCK #####
-#
-#  This program is free software; you can redistribute it and/or
-#  modify it under the terms of the GNU General Public License
-#  as published by the Free Software Foundation; either version 2
-#  of the License, or (at your option) any later version.
-#
-#  This program is distributed in the hope that it will be useful,
-#  but WITHOUT ANY WARRANTY; without even the implied warranty of
-#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-#  You should have received a copy of the GNU General Public License
-#  along with this program; if not, write to the Free Software Foundation,
-#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-#
-# ##### END GPL LICENSE BLOCK #####
-
-"""
-Blender exporter for Three.js (ASCII JSON format).
-
-TODO
-    - binary format
-"""
-
-import bpy
-import mathutils
-
-import shutil
-import os
-import os.path
-import math
-import operator
-import random
-
-# #####################################################
-# Configuration
-# #####################################################
-
-DEFAULTS = {
-"bgcolor" : [0, 0, 0],
-"bgalpha" : 1.0,
-
-"position" : [0, 0, 0],
-"rotation" : [0, 0, 0],
-"scale"    : [1, 1, 1],
-
-"camera"  :
-    {
-        "name" : "default_camera",
-        "type" : "PerspectiveCamera",
-        "near" : 1,
-        "far"  : 10000,
-        "fov"  : 60,
-        "aspect": 1.333,
-        "position" : [0, 0, 10],
-        "target"   : [0, 0, 0]
-    },
-
-"light" :
- {
-    "name"       : "default_light",
-    "type"       : "DirectionalLight",
-    "direction"  : [0, 1, 1],
-    "color"      : [1, 1, 1],
-    "intensity"  : 0.8
- }
-}
-
-ROTATE_X_PI2 = mathutils.Quaternion((1.0, 0.0, 0.0), math.radians(-90.0)).to_matrix().to_4x4()
-
-# default colors for debugging (each material gets one distinct color):
-# white, red, green, blue, yellow, cyan, magenta
-COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
-
-
-# skinning
-MAX_INFLUENCES = 2
-
-
-# #####################################################
-# Templates - scene
-# #####################################################
-
-TEMPLATE_SCENE_ASCII = """\
-{
-
-	"urlBaseType": %(basetype)s,
-
-	"metadata": {
-		"formatVersion" : 3.2,
-		"type"          : "scene",
-		"sourceFile"    : "%(fname)s",
-		"generatedBy"   : "Blender 2.7 Exporter",
-		"objects"       : %(nobjects)s,
-		"geometries"    : %(ngeometries)s,
-		"materials"     : %(nmaterials)s,
-		"textures"      : %(ntextures)s
-	},
-%(sections)s
-	"transform": {
-		"position" : %(position)s,
-		"rotation" : %(rotation)s,
-		"scale"    : %(scale)s
-	},
-
-	"defaults": {
-		"bgcolor" : %(bgcolor)s,
-		"bgalpha" : %(bgalpha)f,
-		"camera"  : %(defcamera)s
-	}
-
-}
-"""
-
-TEMPLATE_SECTION = """
-	"%s": {
-%s
-	},
-"""
-
-TEMPLATE_OBJECT = """\
-		%(object_id)s: {
-			"geometry"      : %(geometry_id)s,
-			"groups"        : [ %(group_id)s ],
-			"material"      : %(material_id)s,
-			"position"      : %(position)s,
-			"rotation"      : %(rotation)s,
-			"quaternion"    : %(quaternion)s,
-			"scale"         : %(scale)s,
-			"visible"       : %(visible)s,
-			"castShadow"    : %(castShadow)s,
-			"receiveShadow" : %(receiveShadow)s,
-			"doubleSided"   : %(doubleSided)s
-		}"""
-
-TEMPLATE_EMPTY = """\
-		%(object_id)s: {
-			"groups"     : [ %(group_id)s ],
-			"position"   : %(position)s,
-			"rotation"   : %(rotation)s,
-			"quaternion" : %(quaternion)s,
-			"scale"      : %(scale)s
-		}"""
-
-TEMPLATE_GEOMETRY_LINK = """\
-		%(geometry_id)s: {
-			"type" : "ascii",
-			"url"  : %(model_file)s
-		}"""
-
-TEMPLATE_GEOMETRY_EMBED = """\
-		%(geometry_id)s: {
-			"type" : "embedded",
-			"id"   : %(embed_id)s
-		}"""
-
-TEMPLATE_TEXTURE = """\
-		%(texture_id)s: {
-			"url" : %(texture_file)s%(extras)s
-		}"""
-
-TEMPLATE_MATERIAL_SCENE = """\
-		%(material_id)s: {
-			"type"       : %(type)s,
-			"parameters" : { %(parameters)s }
-		}"""
-
-TEMPLATE_CAMERA_PERSPECTIVE = """\
-		%(camera_id)s: {
-			"type"     : "PerspectiveCamera",
-			"fov"      : %(fov)f,
-			"aspect"   : %(aspect)f,
-			"near"     : %(near)f,
-			"far"      : %(far)f,
-			"position" : %(position)s,
-			"target"   : %(target)s
-		}"""
-
-TEMPLATE_CAMERA_ORTHO = """\
-		%(camera_id)s: {
-			"type"     : "OrthographicCamera",
-			"left"     : %(left)f,
-			"right"    : %(right)f,
-			"top"      : %(top)f,
-			"bottom"   : %(bottom)f,
-			"near"     : %(near)f,
-			"far"      : %(far)f,
-			"position" : %(position)s,
-			"target"   : %(target)s
-		}"""
-
-TEMPLATE_LIGHT_POINT = """\
-		%(light_id)s: {
-			"type"      : "PointLight",
-			"position"  : %(position)s,
-			"rotation"  : %(rotation)s,
-			"color"     : %(color)d,
-			"distance"  : %(distance).3f,
-			"intensity" : %(intensity).3f
-		}"""
-
-TEMPLATE_LIGHT_SUN = """\
-		%(light_id)s: {
-			"type"      : "AmbientLight",
-			"position"  : %(position)s,
-			"rotation"  : %(rotation)s,
-			"color"     : %(color)d,
-			"distance"  : %(distance).3f,
-			"intensity" : %(intensity).3f
-		}"""
-
-TEMPLATE_LIGHT_SPOT = """\
-		%(light_id)s: {
-			"type"       : "SpotLight",
-			"position"   : %(position)s,
-			"rotation"   : %(rotation)s,
-			"color"      : %(color)d,
-			"distance"   : %(distance).3f,
-			"intensity"  : %(intensity).3f,
-			"use_shadow" : %(use_shadow)d,
-			"angle"      : %(angle).3f
-		}"""
-
-TEMPLATE_LIGHT_HEMI = """\
-		%(light_id)s: {
-			"type"      : "HemisphereLight",
-			"position"  : %(position)s,
-			"rotation"  : %(rotation)s,
-			"color"     : %(color)d,
-			"distance"  : %(distance).3f,
-			"intensity" : %(intensity).3f
-		}"""
-
-TEMPLATE_LIGHT_AREA = """\
-		%(light_id)s: {
-			"type"      : "AreaLight",
-			"position"  : %(position)s,
-			"rotation"  : %(rotation)s,
-			"color"     : %(color)d,
-			"distance"  : %(distance).3f,
-			"intensity" : %(intensity).3f,
-			"gamma"     : %(gamma).3f,
-			"shape"     : "%(shape)s",
-			"size"      : %(size).3f,
-			"size_y"    : %(size_y).3f
-		}"""
-
-
-TEMPLATE_VEC4 = '[ %g, %g, %g, %g ]'
-TEMPLATE_VEC3 = '[ %g, %g, %g ]'
-TEMPLATE_VEC2 = '[ %g, %g ]'
-TEMPLATE_STRING = '"%s"'
-TEMPLATE_HEX = "0x%06x"
-
-# #####################################################
-# Templates - model
-# #####################################################
-
-TEMPLATE_FILE_ASCII = """\
-	{
-
-		"metadata": {
-			"formatVersion" : 3.1,
-			"generatedBy"   : "Blender 2.7 Exporter",
-			"vertices"      : %(nvertex)d,
-			"faces"         : %(nface)d,
-			"normals"       : %(nnormal)d,
-			"colors"        : %(ncolor)d,
-			"uvs"           : [%(nuvs)s],
-			"materials"     : %(nmaterial)d,
-			"morphTargets"  : %(nmorphTarget)d,
-			"bones"         : %(nbone)d
-		},
-
-%(model)s
-
-	}
-"""
-
-TEMPLATE_MODEL_ASCII = """\
-			"scale" : %(scale)f,
-			
-			"vertices" : [%(vertices)s],
-			"faces"    : [%(faces)s],
-			"uvs"      : [%(uvs)s],
-			"normals"  : [%(normals)s],
-			
-			"skinIndices"  : [%(indices)s],
-			"skinWeights"  : [%(weights)s],
-			"morphTargets" : [%(morphTargets)s],
-			
-			"bones"      : [%(bones)s],
-			"animations" : [%(animations)s],
-			
-			"colors"    : [%(colors)s],
-			"materials" : [
-%(materials)s
-			]
-"""
-
-TEMPLATE_VERTEX = "%g,%g,%g"
-TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
-
-TEMPLATE_N = "%g,%g,%g"
-TEMPLATE_UV = "%g,%g"
-TEMPLATE_C = "%d"
-
-# #####################################################
-# Utils
-# #####################################################
-
-def veckey3(x,y,z):
-    return round(x, 6), round(y, 6), round(z, 6)
-
-def veckey3d(v):
-    return veckey3(v.x, v.y, v.z)
-
-def veckey2d(v):
-    return round(v[0], 6), round(v[1], 6)
-
-def get_faces(obj):
-    if hasattr(obj, "tessfaces"):
-        return obj.tessfaces
-    else:
-        return obj.faces
-
-def get_normal_indices(v, normals, mesh):
-    n = []
-    mv = mesh.vertices
-
-    for i in v:
-        normal = mv[i].normal
-        key = veckey3d(normal)
-
-        n.append( normals[key] )
-
-    return n
-
-def get_uv_indices(face_index, uvs, mesh, layer_index):
-    uv = []
-    uv_layer = mesh.tessface_uv_textures[layer_index].data
-    for i in uv_layer[face_index].uv:
-        uv.append( uvs[veckey2d(i)] )
-    return uv
-
-def get_color_indices(face_index, colors, mesh):
-    c = []
-    color_layer = mesh.tessface_vertex_colors.active.data
-    face_colors = color_layer[face_index]
-    face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
-    for i in face_colors:
-        c.append( colors[hexcolor(i)] )
-    return c
-
-def rgb2int(rgb):
-    color = (int(rgb[0]*255) << 16) + (int(rgb[1]*255) << 8) + int(rgb[2]*255);
-    return color
-
-# #####################################################
-# Utils - files
-# #####################################################
-
-def write_file(fname, content):
-    out = open(fname, "w", encoding="utf-8")
-    out.write(content)
-    out.close()
-
-def ensure_folder_exist(foldername):
-    """Create folder (with whole path) if it doesn't exist yet."""
-
-    if not os.access(foldername, os.R_OK|os.W_OK|os.X_OK):
-        os.makedirs(foldername)
-
-def ensure_extension(filepath, extension):
-    if not filepath.lower().endswith(extension):
-        filepath += extension
-    return filepath
-
-def generate_mesh_filename(meshname, filepath):
-    normpath = os.path.normpath(filepath)
-    path, ext = os.path.splitext(normpath)
-    return "%s.%s%s" % (path, meshname, ext)
-
-
-# #####################################################
-# Utils - alignment
-# #####################################################
-
-def bbox(vertices):
-    """Compute bounding box of vertex array.
-    """
-
-    if len(vertices)>0:
-        minx = maxx = vertices[0].co.x
-        miny = maxy = vertices[0].co.y
-        minz = maxz = vertices[0].co.z
-
-        for v in vertices[1:]:
-            if v.co.x < minx:
-                minx = v.co.x
-            elif v.co.x > maxx:
-                maxx = v.co.x
-
-            if v.co.y < miny:
-                miny = v.co.y
-            elif v.co.y > maxy:
-                maxy = v.co.y
-
-            if v.co.z < minz:
-                minz = v.co.z
-            elif v.co.z > maxz:
-                maxz = v.co.z
-
-        return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
-
-    else:
-        return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
-
-def translate(vertices, t):
-    """Translate array of vertices by vector t.
-    """
-
-    for i in range(len(vertices)):
-        vertices[i].co.x += t[0]
-        vertices[i].co.y += t[1]
-        vertices[i].co.z += t[2]
-
-def center(vertices):
-    """Center model (middle of bounding box).
-    """
-
-    bb = bbox(vertices)
-
-    cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
-    cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
-    cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
-
-    translate(vertices, [-cx,-cy,-cz])
-
-    return [-cx,-cy,-cz]
-
-def top(vertices):
-    """Align top of the model with the floor (Y-axis) and center it around X and Z.
-    """
-
-    bb = bbox(vertices)
-
-    cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
-    cy = bb['y'][1]
-    cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
-
-    translate(vertices, [-cx,-cy,-cz])
-
-    return [-cx,-cy,-cz]
-
-def bottom(vertices):
-    """Align bottom of the model with the floor (Y-axis) and center it around X and Z.
-    """
-
-    bb = bbox(vertices)
-
-    cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
-    cy = bb['y'][0]
-    cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
-
-    translate(vertices, [-cx,-cy,-cz])
-
-    return [-cx,-cy,-cz]
-
-# #####################################################
-# Elements rendering
-# #####################################################
-
-def hexcolor(c):
-    return ( int(c[0] * 255) << 16  ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
-
-def generate_vertices(vertices, option_vertices_truncate, option_vertices):
-    if not option_vertices:
-        return ""
-
-    return ",".join(generate_vertex(v, option_vertices_truncate) for v in vertices)
-
-def generate_vertex(v, option_vertices_truncate):
-    if not option_vertices_truncate:
-        return TEMPLATE_VERTEX % (v.co.x, v.co.y, v.co.z)
-    else:
-        return TEMPLATE_VERTEX_TRUNCATE % (v.co.x, v.co.y, v.co.z)
-
-def generate_normal(n):
-    return TEMPLATE_N % (n[0], n[1], n[2])
-
-def generate_vertex_color(c):
-    return TEMPLATE_C % c
-
-def generate_uv(uv):
-    return TEMPLATE_UV % (uv[0], uv[1])
-
-# #####################################################
-# Model exporter - faces
-# #####################################################
-
-def setBit(value, position, on):
-    if on:
-        mask = 1 << position
-        return (value | mask)
-    else:
-        mask = ~(1 << position)
-        return (value & mask)
-
-def generate_faces(normals, uv_layers, colors, meshes, option_normals, option_colors, option_uv_coords, option_materials, option_faces):
-
-    if not option_faces:
-        return "", 0
-
-    vertex_offset = 0
-    material_offset = 0
-
-    chunks = []
-    for mesh, object in meshes:
-
-        vertexUV = len(mesh.uv_textures) > 0
-        vertexColors = len(mesh.vertex_colors) > 0
-
-        mesh_colors = option_colors and vertexColors
-        mesh_uvs = option_uv_coords and vertexUV
-
-        if vertexUV:
-            active_uv_layer = mesh.uv_textures.active
-            if not active_uv_layer:
-                mesh_extract_uvs = False
-
-        if vertexColors:
-            active_col_layer = mesh.vertex_colors.active
-            if not active_col_layer:
-                mesh_extract_colors = False
-
-        for i, f in enumerate(get_faces(mesh)):
-            face = generate_face(f, i, normals, uv_layers, colors, mesh, option_normals, mesh_colors, mesh_uvs, option_materials, vertex_offset, material_offset)
-            chunks.append(face)
-
-        vertex_offset += len(mesh.vertices)
-
-        material_count = len(mesh.materials)
-        if material_count == 0:
-            material_count = 1
-
-        material_offset += material_count
-
-    return ",".join(chunks), len(chunks)
-
-def generate_face(f, faceIndex, normals, uv_layers, colors, mesh, option_normals, option_colors, option_uv_coords, option_materials, vertex_offset, material_offset):
-    isTriangle = ( len(f.vertices) == 3 )
-
-    if isTriangle:
-        nVertices = 3
-    else:
-        nVertices = 4
-
-    hasMaterial = option_materials
-
-    hasFaceUvs = False # not supported in Blender
-    hasFaceVertexUvs = option_uv_coords
-
-    hasFaceNormals = False # don't export any face normals (as they are computed in engine)
-    hasFaceVertexNormals = option_normals
-
-    hasFaceColors = False       # not supported in Blender
-    hasFaceVertexColors = option_colors
-
-    faceType = 0
-    faceType = setBit(faceType, 0, not isTriangle)
-    faceType = setBit(faceType, 1, hasMaterial)
-    faceType = setBit(faceType, 2, hasFaceUvs)
-    faceType = setBit(faceType, 3, hasFaceVertexUvs)
-    faceType = setBit(faceType, 4, hasFaceNormals)
-    faceType = setBit(faceType, 5, hasFaceVertexNormals)
-    faceType = setBit(faceType, 6, hasFaceColors)
-    faceType = setBit(faceType, 7, hasFaceVertexColors)
-
-    faceData = []
-
-    # order is important, must match order in JSONLoader
-
-    # face type
-    # vertex indices
-    # material index
-    # face uvs index
-    # face vertex uvs indices
-    # face color index
-    # face vertex colors indices
-
-    faceData.append(faceType)
-
-    # must clamp in case on polygons bigger than quads
-
-    for i in range(nVertices):
-        index = f.vertices[i] + vertex_offset
-        faceData.append(index)
-
-    if hasMaterial:
-        index = f.material_index + material_offset
-        faceData.append( index )
-
-    if hasFaceVertexUvs:
-        for layer_index, uvs in enumerate(uv_layers):
-            uv = get_uv_indices(faceIndex, uvs, mesh, layer_index)
-            for i in range(nVertices):
-                index = uv[i]
-                faceData.append(index)
-
-    if hasFaceVertexNormals:
-        n = get_normal_indices(f.vertices, normals, mesh)
-        for i in range(nVertices):
-            index = n[i]
-            faceData.append(index)
-
-    if hasFaceVertexColors:
-        c = get_color_indices(faceIndex, colors, mesh)
-        for i in range(nVertices):
-            index = c[i]
-            faceData.append(index)
-
-    return ",".join( map(str, faceData) )
-
-
-# #####################################################
-# Model exporter - normals
-# #####################################################
-
-def extract_vertex_normals(mesh, normals, count):
-    for f in get_faces(mesh):
-        for v in f.vertices:
-
-            normal = mesh.vertices[v].normal
-            key = veckey3d(normal)
-
-            if key not in normals:
-                normals[key] = count
-                count += 1
-
-    return count
-
-def generate_normals(normals, option_normals):
-    if not option_normals:
-        return ""
-
-    chunks = []
-    for key, index in sorted(normals.items(), key = operator.itemgetter(1)):
-        chunks.append(key)
-
-    return ",".join(generate_normal(n) for n in chunks)
-
-# #####################################################
-# Model exporter - vertex colors
-# #####################################################
-
-def extract_vertex_colors(mesh, colors, count):
-    color_layer = mesh.tessface_vertex_colors.active.data
-
-    for face_index, face in enumerate(get_faces(mesh)):
-
-        face_colors = color_layer[face_index]
-        face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
-
-        for c in face_colors:
-            key = hexcolor(c)
-            if key not in colors:
-                colors[key] = count
-                count += 1
-
-    return count
-
-def generate_vertex_colors(colors, option_colors):
-    if not option_colors:
-        return ""
-
-    chunks = []
-    for key, index in sorted(colors.items(), key=operator.itemgetter(1)):
-        chunks.append(key)
-
-    return ",".join(generate_vertex_color(c) for c in chunks)
-
-# #####################################################
-# Model exporter - UVs
-# #####################################################
-
-def extract_uvs(mesh, uv_layers, counts):
-    for index, layer in enumerate(mesh.tessface_uv_textures):
-
-        if len(uv_layers) <= index:
-            uvs = {}
-            count = 0
-            uv_layers.append(uvs)
-            counts.append(count)
-        else:
-            uvs = uv_layers[index]
-            count = counts[index]
-
-        uv_layer = layer.data
-
-        for face_index, face in enumerate(get_faces(mesh)):
-
-            for uv_index, uv in enumerate(uv_layer[face_index].uv):
-
-                key = veckey2d(uv)
-                if key not in uvs:
-                    uvs[key] = count
-                    count += 1
-
-        counts[index] = count
-
-    return counts
-
-def generate_uvs(uv_layers, option_uv_coords):
-    if not option_uv_coords:
-        return "[]"
-
-    layers = []
-    for uvs in uv_layers:
-        chunks = []
-        for key, index in sorted(uvs.items(), key=operator.itemgetter(1)):
-            chunks.append(key)
-        layer = ",".join(generate_uv(n) for n in chunks)
-        layers.append(layer)
-
-    return ",".join("[%s]" % n for n in layers)
-
-# ##############################################################################
-# Model exporter - armature
-# (only the first armature will exported)
-# ##############################################################################
-def get_armature():
-    if len(bpy.data.armatures) == 0:
-        print("Warning: no armatures in the scene")
-        return None, None
-
-    armature = bpy.data.armatures[0]
-
-    # Someone please figure out a proper way to get the armature node
-    for object in bpy.data.objects:
-        if object.type == 'ARMATURE':
-            return armature, object
-
-    print("Warning: no node of type 'ARMATURE' in the scene")
-    return None, None
-
-# ##############################################################################
-# Model exporter - bones
-# (only the first armature will exported)
-# ##############################################################################
-
-def generate_bones(meshes, option_bones, flipyz):
-
-    if not option_bones:
-        return "", 0
-
-    armature, armature_object = get_armature()
-    if armature_object is None:
-        return "", 0
-
-    hierarchy = []
-    armature_matrix = armature_object.matrix_world
-    pose_bones = armature_object.pose.bones
-    #pose_bones = armature.bones
-
-    TEMPLATE_BONE = '{"parent":%d,"name":"%s","pos":[%g,%g,%g],"rotq":[%g,%g,%g,%g],"scl":[%g,%g,%g]}'
-
-    for pose_bone in pose_bones:
-        armature_bone = pose_bone.bone
-        #armature_bone = pose_bone
-        bonePos = armature_matrix * armature_bone.head_local
-        boneIndex = None
-
-        if armature_bone.parent is None:
-            bone_matrix = armature_matrix * armature_bone.matrix_local
-            bone_index = -1
-        else:
-            parent_matrix = armature_matrix * armature_bone.parent.matrix_local
-            bone_matrix = armature_matrix * armature_bone.matrix_local
-            bone_matrix = parent_matrix.inverted() * bone_matrix
-
-            bone_index = i = 0
-            for pose_parent in pose_bones:
-                armature_parent = pose_parent.bone
-                #armature_parent = pose_parent
-                if armature_parent.name == armature_bone.parent.name:
-                    bone_index = i
-                i += 1
-
-        pos, rot, scl = bone_matrix.decompose()
-
-        if flipyz:
-            joint = TEMPLATE_BONE % (bone_index, armature_bone.name, pos.x, pos.z, -pos.y, rot.x, rot.z, -rot.y, rot.w, scl.x, scl.z, scl.y)
-            hierarchy.append(joint)
-        else:
-            joint = TEMPLATE_BONE % (bone_index, armature_bone.name, pos.x, pos.y,  pos.z, rot.x, rot.y,  rot.z, rot.w, scl.x, scl.y, scl.z)
-            hierarchy.append(joint)
-
-    bones_string = ",".join(hierarchy)
-    
-    return bones_string, len(pose_bones)
-
-
-# ##############################################################################
-# Model exporter - skin indices and weights
-# ##############################################################################
-
-def generate_indices_and_weights(meshes, option_skinning):
-
-    if not option_skinning or len(bpy.data.armatures) == 0:
-        return "", ""
-
-    indices = []
-    weights = []
-
-    armature, armature_object = get_armature()
-    bone_names = [bone.name for bone in armature_object.pose.bones]
-
-    for mesh, object in meshes:
-
-        i = 0
-        mesh_index = -1
-
-        # find the original object
-
-        for obj in bpy.data.objects:
-            if obj.name == mesh.name or obj == object:
-                mesh_index = i
-            i += 1
-
-        if mesh_index == -1:
-            print("generate_indices: couldn't find object for mesh", mesh.name)
-            continue
-
-        object = bpy.data.objects[mesh_index]
-
-        for vertex in mesh.vertices:
-
-            # sort bones by influence
-
-            bone_array = []
-
-            for group in vertex.groups:
-                index = group.group
-                if object.vertex_groups[index].name in bone_names:
-                    weight = group.weight 
-
-                    bone_array.append( (index, weight) )
-                
-            bone_array.sort(key = operator.itemgetter(1), reverse=True)
-            
-            # select first N bones
-
-            for i in range(MAX_INFLUENCES):
-
-                if i < len(bone_array):
-                    bone_proxy = bone_array[i]
-                    
-                    found = 0
-                    index = bone_proxy[0]
-                    weight = bone_proxy[1]
-
-                    for j, bone in enumerate(armature_object.pose.bones):
-                        if object.vertex_groups[index].name == bone.name:
-                            indices.append('%d' % j)
-                            weights.append('%g' % weight)
-                            found = 1
-                            break
-
-                    if found != 1:
-                        indices.append('-1')
-                        weights.append('0')
-
-                else:
-                    indices.append('-1')
-                    weights.append('0')
-    
-    
-    indices_string = ",".join(indices)
-    weights_string = ",".join(weights)
-
-    return indices_string, weights_string
-
-
-# ##############################################################################
-# Model exporter - skeletal animation
-# (only the first action will exported)
-# ##############################################################################
-
-def generate_animation(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time, index):
-
-    if not option_animation_skeletal or len(bpy.data.actions) == 0:
-        return ""
-
-    # TODO: Add scaling influences
-
-    action = bpy.data.actions[index]
-    
-    # get current context and then switch to dopesheet temporarily
-    
-    current_context = bpy.context.area.type
-    
-    bpy.context.area.type = "DOPESHEET_EDITOR"
-    bpy.context.space_data.mode = "ACTION"    
-    
-    # set active action
-    bpy.context.area.spaces.active.action = action
-    
-    armature, armature_object = get_armature()
-    if armature_object is None or armature is None:
-        return "", 0
-        
-    #armature_object = bpy.data.objects['marine_rig']
-    
-        
-    armature_matrix = armature_object.matrix_world
-
-    fps = bpy.data.scenes[0].render.fps
-
-    end_frame = action.frame_range[1]
-    start_frame = action.frame_range[0]
-
-    frame_length = end_frame - start_frame
-
-    used_frames = int(frame_length / option_frame_step) + 1
-
-    TEMPLATE_KEYFRAME_FULL  = '{"time":%g,"pos":[%g,%g,%g],"rot":[%g,%g,%g,%g],"scl":[%g,%g,%g]}'
-    TEMPLATE_KEYFRAME_BEGIN = '{"time":%g'
-    TEMPLATE_KEYFRAME_END   = '}'
-    TEMPLATE_KEYFRAME_POS   = ',"pos":[%g,%g,%g]'
-    TEMPLATE_KEYFRAME_ROT   = ',"rot":[%g,%g,%g,%g]'
-    TEMPLATE_KEYFRAME_SCL   = ',"scl":[%g,%g,%g]'
-
-    keys = []
-    channels_location = []
-    channels_rotation = []
-    channels_scale = []
-    
-    # Precompute per-bone data
-    for pose_bone in armature_object.pose.bones:
-        armature_bone = pose_bone.bone
-        keys.append([])
-        channels_location.append(  find_channels(action, armature_bone, "location"))
-        channels_rotation.append(  find_channels(action, armature_bone, "rotation_quaternion"))
-        channels_rotation.append(  find_channels(action, armature_bone, "rotation_euler"))
-        channels_scale.append(     find_channels(action, armature_bone, "scale"))
-
-    # Process all frames
-    for frame_i in range(0, used_frames):
-
-        #print("Processing frame %d/%d" % (frame_i, used_frames))
-        # Compute the index of the current frame (snap the last index to the end)
-        frame = start_frame + frame_i * option_frame_step
-        if frame_i == used_frames-1:
-            frame = end_frame
-
-        # Compute the time of the frame
-        if option_frame_index_as_time:
-            time = frame - start_frame
-        else:
-            time = (frame - start_frame) / fps
-
-        # Let blender compute the pose bone transformations
-        bpy.data.scenes[0].frame_set(frame)
-
-        # Process all bones for the current frame
-        bone_index = 0
-        for pose_bone in armature_object.pose.bones:
-
-            # Extract the bone transformations
-            if pose_bone.parent is None:
-                bone_matrix = armature_matrix * pose_bone.matrix
-            else:
-                parent_matrix = armature_matrix * pose_bone.parent.matrix
-                bone_matrix = armature_matrix * pose_bone.matrix
-                bone_matrix = parent_matrix.inverted() * bone_matrix
-            pos, rot, scl = bone_matrix.decompose()
-
-            pchange = True or has_keyframe_at(channels_location[bone_index], frame)
-            rchange = True or has_keyframe_at(channels_rotation[bone_index], frame)
-            schange = True or has_keyframe_at(channels_scale[bone_index], frame)
-
-            if flipyz:
-                px, py, pz = pos.x, pos.z, -pos.y
-                rx, ry, rz, rw = rot.x, rot.z, -rot.y, rot.w
-                sx, sy, sz = scl.x, scl.z, scl.y
-            else:
-                px, py, pz = pos.x, pos.y, pos.z
-                rx, ry, rz, rw = rot.x, rot.y, rot.z, rot.w
-                sx, sy, sz = scl.x, scl.y, scl.z
-
-            # START-FRAME: needs pos, rot and scl attributes (required frame)
-
-            if frame == start_frame:
-
-                keyframe = TEMPLATE_KEYFRAME_FULL % (time, px, py, pz, rx, ry, rz, rw, sx, sy, sz)
-                keys[bone_index].append(keyframe)
-
-            # END-FRAME: needs pos, rot and scl attributes with animation length (required frame)
-
-            elif frame == end_frame:
-
-                keyframe = TEMPLATE_KEYFRAME_FULL % (time, px, py, pz, rx, ry, rz, rw, sx, sy, sz)
-                keys[bone_index].append(keyframe)
-
-            # MIDDLE-FRAME: needs only one of the attributes, can be an empty frame (optional frame)
-
-            elif pchange == True or rchange == True:
-
-                keyframe = TEMPLATE_KEYFRAME_BEGIN % time
-                if pchange == True:
-                    keyframe = keyframe + TEMPLATE_KEYFRAME_POS % (px, py, pz)
-                if rchange == True:
-                    keyframe = keyframe + TEMPLATE_KEYFRAME_ROT % (rx, ry, rz, rw)
-                if schange == True:
-                    keyframe = keyframe + TEMPLATE_KEYFRAME_SCL % (sx, sy, sz)
-                keyframe = keyframe + TEMPLATE_KEYFRAME_END
-
-                keys[bone_index].append(keyframe)
-            bone_index += 1
-
-    # Gather data
-    parents = []
-    bone_index = 0
-    for pose_bone in armature_object.pose.bones:
-        keys_string = ",".join(keys[bone_index])
-        parent_index = bone_index - 1 # WTF? Also, this property is not used by three.js
-        parent = '{"parent":%d,"keys":[%s]}' % (parent_index, keys_string)
-        bone_index += 1
-        parents.append(parent)
-    hierarchy_string = ",".join(parents)
-
-    if option_frame_index_as_time:
-        length = frame_length
-    else:
-        length = frame_length / fps
-
-    animation_string = '"name":"%s","fps":%d,"length":%g,"hierarchy":[%s]' % (action.name, fps, length, hierarchy_string)
-
-    bpy.data.scenes[0].frame_set(start_frame)
-
-    # reset context
-    
-    bpy.context.area.type = current_context
-    
-    return animation_string
-
-def find_channels(action, bone, channel_type):
-    bone_name = bone.name
-    ngroups = len(action.groups)
-    result = []
-
-    # Variant 1: channels grouped by bone names
-    if ngroups > 0:
-
-        # Find the channel group for the given bone
-        group_index = -1
-        for i in range(ngroups):
-            if action.groups[i].name == bone_name:
-                group_index = i
-
-        # Get all desired channels in that group
-        if group_index > -1:
-            for channel in action.groups[group_index].channels:
-                if channel_type in channel.data_path:
-                    result.append(channel)
-
-    # Variant 2: no channel groups, bone names included in channel names
-    else:
-
-        bone_label = '"%s"' % bone_name
-
-        for channel in action.fcurves:
-            data_path = channel.data_path
-            if bone_label in data_path and channel_type in data_path:
-                result.append(channel)
-
-    return result
-
-def find_keyframe_at(channel, frame):
-    for keyframe in channel.keyframe_points:
-        if keyframe.co[0] == frame:
-            return keyframe
-    return None
-
-def has_keyframe_at(channels, frame):
-    for channel in channels:
-        if not find_keyframe_at(channel, frame) is None:
-            return True
-    return False
-
-def generate_all_animations(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time):
-    all_animations_string = ""
-    if option_animation_skeletal:
-        for index in range(0, len(bpy.data.actions)):
-            if index != 0 :
-                all_animations_string += ", \n"
-            all_animations_string += "{" + generate_animation(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time,index) + "}"
-    return all_animations_string
-
-def handle_position_channel(channel, frame, position):
-
-    change = False
-
-    if channel.array_index in [0, 1, 2]:
-        for keyframe in channel.keyframe_points:
-            if keyframe.co[0] == frame:
-                change = True
-
-        value = channel.evaluate(frame)
-
-        if channel.array_index == 0:
-            position.x = value
-
-        if channel.array_index == 1:
-            position.y = value
-
-        if channel.array_index == 2:
-            position.z = value
-
-    return change
-
-def position(bone, frame, action, armatureMatrix):
-
-    position = mathutils.Vector((0,0,0))
-    change = False
-
-    ngroups = len(action.groups)
-
-    if ngroups > 0:
-
-        index = 0
-
-        for i in range(ngroups):
-            if action.groups[i].name == bone.name:
-                index = i
-
-        for channel in action.groups[index].channels:
-            if "location" in channel.data_path:
-                hasChanged = handle_position_channel(channel, frame, position)
-                change = change or hasChanged
-
-    else:
-
-        bone_label = '"%s"' % bone.name
-
-        for channel in action.fcurves:
-            data_path = channel.data_path
-            if bone_label in data_path and "location" in data_path:
-                hasChanged = handle_position_channel(channel, frame, position)
-                change = change or hasChanged
-
-    position = position * bone.matrix_local.inverted()
-
-    if bone.parent == None:
-
-        position.x += bone.head.x
-        position.y += bone.head.y
-        position.z += bone.head.z
-
-    else:
-
-        parent = bone.parent
-
-        parentInvertedLocalMatrix = parent.matrix_local.inverted()
-        parentHeadTailDiff = parent.tail_local - parent.head_local
-
-        position.x += (bone.head * parentInvertedLocalMatrix).x + parentHeadTailDiff.x
-        position.y += (bone.head * parentInvertedLocalMatrix).y + parentHeadTailDiff.y
-        position.z += (bone.head * parentInvertedLocalMatrix).z + parentHeadTailDiff.z
-
-    return armatureMatrix*position, change
-
-def handle_rotation_channel(channel, frame, rotation):
-
-    change = False
-
-    if channel.array_index in [0, 1, 2, 3]:
-
-        for keyframe in channel.keyframe_points:
-            if keyframe.co[0] == frame:
-                change = True
-
-        value = channel.evaluate(frame)
-
-        if channel.array_index == 1:
-            rotation.x = value
-
-        elif channel.array_index == 2:
-            rotation.y = value
-
-        elif channel.array_index == 3:
-            rotation.z = value
-
-        elif channel.array_index == 0:
-            rotation.w = value
-
-    return change
-
-def rotation(bone, frame, action, armatureMatrix):
-
-    # TODO: calculate rotation also from rotation_euler channels
-
-    rotation = mathutils.Vector((0,0,0,1))
-
-    change = False
-
-    ngroups = len(action.groups)
-
-    # animation grouped by bones
-
-    if ngroups > 0:
-
-        index = -1
-
-        for i in range(ngroups):
-            if action.groups[i].name == bone.name:
-                index = i
-
-        if index > -1:
-            for channel in action.groups[index].channels:
-                if "quaternion" in channel.data_path:
-                    hasChanged = handle_rotation_channel(channel, frame, rotation)
-                    change = change or hasChanged
-
-    # animation in raw fcurves
-
-    else:
-
-        bone_label = '"%s"' % bone.name
-
-        for channel in action.fcurves:
-            data_path = channel.data_path
-            if bone_label in data_path and "quaternion" in data_path:
-                hasChanged = handle_rotation_channel(channel, frame, rotation)
-                change = change or hasChanged
-
-    rot3 = rotation.to_3d()
-    rotation.xyz = rot3 * bone.matrix_local.inverted()
-    rotation.xyz = armatureMatrix * rotation.xyz
-
-    return rotation, change
-
-# #####################################################
-# Model exporter - materials
-# #####################################################
-
-def generate_color(i):
-    """Generate hex color corresponding to integer.
-
-    Colors should have well defined ordering.
-    First N colors are hardcoded, then colors are random
-    (must seed random number  generator with deterministic value
-    before getting colors).
-    """
-
-    if i < len(COLORS):
-        #return "0x%06x" % COLORS[i]
-        return COLORS[i]
-    else:
-        #return "0x%06x" % int(0xffffff * random.random())
-        return int(0xffffff * random.random())
-
-def generate_mtl(materials):
-    """Generate dummy materials.
-    """
-
-    mtl = {}
-    for m in materials:
-        index = materials[m]
-        mtl[m] = {
-            "DbgName": m,
-            "DbgIndex": index,
-            "DbgColor": generate_color(index),
-            "vertexColors" : False
-        }
-    return mtl
-
-def value2string(v):
-    if type(v) == str and v[0:2] != "0x":
-        return '"%s"' % v
-    elif type(v) == bool:
-        return str(v).lower()
-    elif type(v) == list:
-        return "[%s]" % (", ".join(value2string(x) for x in v))
-    return str(v)
-
-def generate_materials(mtl, materials, draw_type):
-    """Generate JS array of materials objects
-    """
-
-    mtl_array = []
-    for m in mtl:
-        index = materials[m]
-
-        # add debug information
-        #  materials should be sorted according to how
-        #  they appeared in OBJ file (for the first time)
-        #  this index is identifier used in face definitions
-        mtl[m]['DbgName'] = m
-        mtl[m]['DbgIndex'] = index
-        mtl[m]['DbgColor'] = generate_color(index)
-
-        if draw_type in [ "BOUNDS", "WIRE" ]:
-            mtl[m]['wireframe'] = True
-            mtl[m]['DbgColor'] = 0xff0000
-
-        mtl_raw = ",\n".join(['\t\t\t\t\t"%s": %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
-        mtl_string = "\t\t\t\t{\n%s\n\t\t\t\t}" % mtl_raw
-        mtl_array.append([index, mtl_string])
-
-    return ",\n".join([m for i,m in sorted(mtl_array)]), len(mtl_array)
-
-def extract_materials(mesh, scene, option_colors, option_copy_textures, filepath):
-    world = scene.world
-
-    materials = {}
-    for m in mesh.materials:
-        if m:
-            materials[m.name] = {}
-            material = materials[m.name]
-
-            material['colorDiffuse'] = [m.diffuse_intensity * m.diffuse_color[0],
-                                        m.diffuse_intensity * m.diffuse_color[1],
-                                        m.diffuse_intensity * m.diffuse_color[2]]
-
-            material['colorSpecular'] = [m.specular_intensity * m.specular_color[0],
-                                         m.specular_intensity * m.specular_color[1],
-                                         m.specular_intensity * m.specular_color[2]]
-
-            material['colorAmbient'] = [m.ambient * material['colorDiffuse'][0],
-                                        m.ambient * material['colorDiffuse'][1],
-                                        m.ambient * material['colorDiffuse'][2]]
-
-            material['colorEmissive'] = [m.emit * material['colorDiffuse'][0],
-                                         m.emit * material['colorDiffuse'][1],
-                                         m.emit * material['colorDiffuse'][2]]
-
-            material['transparency'] = m.alpha
-
-            # not sure about mapping values to Blinn-Phong shader
-            # Blender uses INT from [1, 511] with default 0
-            # http://www.blender.org/documentation/blender_python_api_2_54_0/bpy.types.Material.html#bpy.types.Material.specular_hardness
-
-            material["specularCoef"] = m.specular_hardness
-
-            textures = guess_material_textures(m)
-
-            handle_texture('diffuse', textures, material, filepath, option_copy_textures)
-            handle_texture('light', textures, material, filepath, option_copy_textures)
-            handle_texture('normal', textures, material, filepath, option_copy_textures)
-            handle_texture('specular', textures, material, filepath, option_copy_textures)
-            handle_texture('bump', textures, material, filepath, option_copy_textures)
-
-            material["vertexColors"] = m.THREE_useVertexColors and option_colors
-
-            # can't really use this reliably to tell apart Phong from Lambert
-            # as Blender defaults to non-zero specular color
-            #if m.specular_intensity > 0.0 and (m.specular_color[0] > 0 or m.specular_color[1] > 0 or m.specular_color[2] > 0):
-            #    material['shading'] = "Phong"
-            #else:
-            #    material['shading'] = "Lambert"
-
-            if textures['normal']:
-                material['shading'] = "Phong"
-            else:
-                material['shading'] = m.THREE_materialType
-
-            material['blending'] = m.THREE_blendingType
-            material['depthWrite'] = m.THREE_depthWrite
-            material['depthTest'] = m.THREE_depthTest
-            material['transparent'] = m.use_transparency
-
-    return materials
-
-def generate_materials_string(mesh, scene, option_colors, draw_type, option_copy_textures, filepath, offset):
-
-    random.seed(42) # to get well defined color order for debug materials
-
-    materials = {}
-    if mesh.materials:
-        for i, m in enumerate(mesh.materials):
-            mat_id = i + offset
-            if m:
-                materials[m.name] = mat_id
-            else:
-                materials["undefined_dummy_%0d" % mat_id] = mat_id
-
-
-    if not materials:
-        materials = { 'default': 0 }
-
-    # default dummy materials
-
-    mtl = generate_mtl(materials)
-
-    # extract real materials from the mesh
-
-    mtl.update(extract_materials(mesh, scene, option_colors, option_copy_textures, filepath))
-
-    return generate_materials(mtl, materials, draw_type)
-
-def handle_texture(id, textures, material, filepath, option_copy_textures):
-
-    if textures[id] and textures[id]['texture'].users > 0 and len(textures[id]['texture'].users_material) > 0:
-        texName     = 'map%s'       % id.capitalize()
-        repeatName  = 'map%sRepeat' % id.capitalize()
-        wrapName    = 'map%sWrap'   % id.capitalize()
-
-        slot = textures[id]['slot']
-        texture = textures[id]['texture']
-        image = texture.image
-        fname = extract_texture_filename(image)
-        material[texName] = fname
-
-        if option_copy_textures:
-            save_image(image, fname, filepath)
-
-        if texture.repeat_x != 1 or texture.repeat_y != 1:
-            material[repeatName] = [texture.repeat_x, texture.repeat_y]
-
-        if texture.extension == "REPEAT":
-            wrap_x = "repeat"
-            wrap_y = "repeat"
-
-            if texture.use_mirror_x:
-                wrap_x = "mirror"
-            if texture.use_mirror_y:
-                wrap_y = "mirror"
-
-            material[wrapName] = [wrap_x, wrap_y]
-
-        if slot.use_map_normal:
-            if slot.normal_factor != 1.0:
-                if id == "bump":
-                    material['mapBumpScale'] = slot.normal_factor
-                else:
-                    material['mapNormalFactor'] = slot.normal_factor
-
-
-# #####################################################
-# ASCII model generator
-# #####################################################
-
-def generate_ascii_model(meshes, morphs,
-                         scene,
-                         option_vertices,
-                         option_vertices_truncate,
-                         option_faces,
-                         option_normals,
-                         option_uv_coords,
-                         option_materials,
-                         option_colors,
-                         option_bones,
-                         option_skinning,
-                         align_model,
-                         flipyz,
-                         option_scale,
-                         option_copy_textures,
-                         filepath,
-                         option_animation_morph,
-                         option_animation_skeletal,
-                         option_frame_index_as_time,
-                         option_frame_step):
-
-    vertices = []
-
-    vertex_offset = 0
-    vertex_offsets = []
-
-    nnormal = 0
-    normals = {}
-
-    ncolor = 0
-    colors = {}
-
-    nuvs = []
-    uv_layers = []
-
-    nmaterial = 0
-    materials = []
-
-    for mesh, object in meshes:
-
-        vertexUV = len(mesh.uv_textures) > 0
-        vertexColors = len(mesh.vertex_colors) > 0
-
-        mesh_extract_colors = option_colors and vertexColors
-        mesh_extract_uvs = option_uv_coords and vertexUV
-
-        if vertexUV:
-            active_uv_layer = mesh.uv_textures.active
-            if not active_uv_layer:
-                mesh_extract_uvs = False
-
-        if vertexColors:
-            active_col_layer = mesh.vertex_colors.active
-            if not active_col_layer:
-                mesh_extract_colors = False
-
-        vertex_offsets.append(vertex_offset)
-        vertex_offset += len(vertices)
-
-        vertices.extend(mesh.vertices[:])
-
-        if option_normals:
-            nnormal = extract_vertex_normals(mesh, normals, nnormal)
-
-        if mesh_extract_colors:
-            ncolor = extract_vertex_colors(mesh, colors, ncolor)
-
-        if mesh_extract_uvs:
-            nuvs = extract_uvs(mesh, uv_layers, nuvs)
-
-        if option_materials:
-            mesh_materials, nmaterial = generate_materials_string(mesh, scene, mesh_extract_colors, object.draw_type, option_copy_textures, filepath, nmaterial)
-            materials.append(mesh_materials)
-
-
-    morphTargets_string = ""
-    nmorphTarget = 0
-
-    if option_animation_morph:
-        chunks = []
-        for i, morphVertices in enumerate(morphs):
-            morphTarget = '{ "name": "%s_%06d", "vertices": [%s] }' % ("animation", i, morphVertices)
-            chunks.append(morphTarget)
-
-        morphTargets_string = ",\n\t".join(chunks)
-        nmorphTarget = len(morphs)
-
-    if align_model == 1:
-        center(vertices)
-    elif align_model == 2:
-        bottom(vertices)
-    elif align_model == 3:
-        top(vertices)
-
-    faces_string, nfaces = generate_faces(normals, uv_layers, colors, meshes, option_normals, option_colors, option_uv_coords, option_materials, option_faces)
-
-    bones_string, nbone = generate_bones(meshes, option_bones, flipyz)
-    indices_string, weights_string = generate_indices_and_weights(meshes, option_skinning)
-
-    materials_string = ",\n".join(materials)
-
-    model_string = TEMPLATE_MODEL_ASCII % {
-    "scale" : option_scale,
-
-    "uvs"       : generate_uvs(uv_layers, option_uv_coords),
-    "normals"   : generate_normals(normals, option_normals),
-    "colors"    : generate_vertex_colors(colors, option_colors),
-
-    "materials" : materials_string,
-
-    "vertices" : generate_vertices(vertices, option_vertices_truncate, option_vertices),
-
-    "faces"    : faces_string,
-
-    "morphTargets" : morphTargets_string,
-
-    "bones"     : bones_string,
-    "indices"   : indices_string,
-    "weights"   : weights_string,
-    "animations" : generate_all_animations(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time)
-    }
-
-    text = TEMPLATE_FILE_ASCII % {
-    "nvertex"   : len(vertices),
-    "nface"     : nfaces,
-    "nuvs"      : ",".join("%d" % n for n in nuvs),
-    "nnormal"   : nnormal,
-    "ncolor"    : ncolor,
-    "nmaterial" : nmaterial,
-    "nmorphTarget": nmorphTarget,
-    "nbone"     : nbone,
-
-    "model"     : model_string
-    }
-
-
-    return text, model_string
-
-
-# #####################################################
-# Model exporter - export single mesh
-# #####################################################
-
-def extract_meshes(objects, scene, export_single_model, option_scale, flipyz):
-
-    meshes = []
-
-    for object in objects:
-
-        if object.type == "MESH" and object.THREE_exportGeometry:
-
-            # collapse modifiers into mesh
-
-            mesh = object.to_mesh(scene, True, 'RENDER')
-
-            if not mesh:
-                raise Exception("Error, could not get mesh data from object [%s]" % object.name)
-
-            # preserve original name
-
-            mesh.name = object.name
-
-            if export_single_model:
-
-                if flipyz:
-
-                    # that's what Blender's native export_obj.py does to flip YZ
-
-                    X_ROT = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
-                    mesh.transform(X_ROT * object.matrix_world)
-
-                else:
-                    mesh.transform(object.matrix_world)
-                    
-                    
-            mesh.update(calc_tessface=True)
-
-            mesh.calc_normals()
-            mesh.calc_tessface()
-            mesh.transform(mathutils.Matrix.Scale(option_scale, 4))
-            meshes.append([mesh, object])
-
-    return meshes
-
-def generate_mesh_string(objects, scene,
-                option_vertices,
-                option_vertices_truncate,
-                option_faces,
-                option_normals,
-                option_uv_coords,
-                option_materials,
-                option_colors,
-                option_bones,
-                option_skinning,
-                align_model,
-                flipyz,
-                option_scale,
-                export_single_model,
-                option_copy_textures,
-                filepath,
-                option_animation_morph,
-                option_animation_skeletal,
-                option_frame_index_as_time,
-                option_frame_step):
-
-    meshes = extract_meshes(objects, scene, export_single_model, option_scale, flipyz)
-
-    morphs = []
-
-    if option_animation_morph:
-
-        original_frame = scene.frame_current # save animation state
-
-        scene_frames = range(scene.frame_start, scene.frame_end + 1, option_frame_step)
-
-        for index, frame in enumerate(scene_frames):
-            scene.frame_set(frame, 0.0)
-
-            anim_meshes = extract_meshes(objects, scene, export_single_model, option_scale, flipyz)
-
-            frame_vertices = []
-
-            for mesh, object in anim_meshes:
-                frame_vertices.extend(mesh.vertices[:])
-
-            if index == 0:
-                if align_model == 1:
-                    offset = center(frame_vertices)
-                elif align_model == 2:
-                    offset = bottom(frame_vertices)
-                elif align_model == 3:
-                    offset = top(frame_vertices)
-                else:
-                    offset = False
-            else:
-                if offset:
-                    translate(frame_vertices, offset)
-
-            morphVertices = generate_vertices(frame_vertices, option_vertices_truncate, option_vertices)
-            morphs.append(morphVertices)
-
-            # remove temp meshes
-
-            for mesh, object in anim_meshes:
-                bpy.data.meshes.remove(mesh)
-
-        scene.frame_set(original_frame, 0.0) # restore animation state
-
-
-    text, model_string = generate_ascii_model(meshes, morphs,
-                                scene,
-                                option_vertices,
-                                option_vertices_truncate,
-                                option_faces,
-                                option_normals,
-                                option_uv_coords,
-                                option_materials,
-                                option_colors,
-                                option_bones,
-                                option_skinning,
-                                align_model,
-                                flipyz,
-                                option_scale,
-                                option_copy_textures,
-                                filepath,
-                                option_animation_morph,
-                                option_animation_skeletal,
-                                option_frame_index_as_time,
-                                option_frame_step)
-
-    # remove temp meshes
-
-    for mesh, object in meshes:
-        bpy.data.meshes.remove(mesh)
-
-    return text, model_string
-
-def export_mesh(objects,
-                scene, filepath,
-                option_vertices,
-                option_vertices_truncate,
-                option_faces,
-                option_normals,
-                option_uv_coords,
-                option_materials,
-                option_colors,
-                option_bones,
-                option_skinning,
-                align_model,
-                flipyz,
-                option_scale,
-                export_single_model,
-                option_copy_textures,
-                option_animation_morph,
-                option_animation_skeletal,
-                option_frame_step,
-                option_frame_index_as_time):
-
-    """Export single mesh"""
-
-    text, model_string = generate_mesh_string(objects,
-                scene,
-                option_vertices,
-                option_vertices_truncate,
-                option_faces,
-                option_normals,
-                option_uv_coords,
-                option_materials,
-                option_colors,
-                option_bones,
-                option_skinning,
-                align_model,
-                flipyz,
-                option_scale,
-                export_single_model,
-                option_copy_textures,
-                filepath,
-                option_animation_morph,
-                option_animation_skeletal,
-                option_frame_index_as_time,
-                option_frame_step)
-
-    write_file(filepath, text)
-
-    print("writing", filepath, "done")
-
-
-# #####################################################
-# Scene exporter - render elements
-# #####################################################
-
-def generate_quat(quat):
-    return TEMPLATE_VEC4 % (quat.x, quat.y, quat.z, quat.w)
-
-def generate_vec4(vec):
-    return TEMPLATE_VEC4 % (vec[0], vec[1], vec[2], vec[3])
-
-def generate_vec3(vec, flipyz = False):
-    if flipyz:
-        return TEMPLATE_VEC3 % (vec[0], vec[2], vec[1])
-    return TEMPLATE_VEC3 % (vec[0], vec[1], vec[2])
-
-def generate_vec2(vec):
-    return TEMPLATE_VEC2 % (vec[0], vec[1])
-
-def generate_hex(number):
-    return TEMPLATE_HEX % number
-
-def generate_string(s):
-    return TEMPLATE_STRING % s
-
-def generate_string_list(src_list):
-    return ", ".join(generate_string(item) for item in src_list)
-
-def generate_section(label, content):
-    return TEMPLATE_SECTION % (label, content)
-
-def get_mesh_filename(mesh):
-    object_id = mesh["data"]["name"]
-    filename = "%s.json" % sanitize(object_id)
-    return filename
-
-def generate_material_id_list(materials):
-    chunks = []
-    for material in materials:
-        chunks.append(material.name)
-
-    return chunks
-
-def generate_group_id_list(obj):
-    chunks = []
-
-    for group in bpy.data.groups:
-        if obj.name in group.objects:
-            chunks.append(group.name)
-
-    return chunks
-
-def generate_bool_property(property):
-    if property:
-        return "true"
-    return "false"
-
-# #####################################################
-# Scene exporter - objects
-# #####################################################
-
-def generate_objects(data):
-    chunks = []
-
-    for obj in data["objects"]:
-
-        if obj.type == "MESH" and obj.THREE_exportGeometry:
-            object_id = obj.name
-
-            #if len(obj.modifiers) > 0:
-            #    geo_name = obj.name
-            #else:
-            geo_name = obj.data.name
-
-            geometry_id = "geo_%s" % geo_name
-
-            material_ids = generate_material_id_list(obj.material_slots)
-            group_ids = generate_group_id_list(obj)
-
-            if data["flipyz"]:
-                matrix_world = ROTATE_X_PI2 * obj.matrix_world
-            else:
-                matrix_world = obj.matrix_world
-
-            position, quaternion, scale = matrix_world.decompose()
-            rotation = quaternion.to_euler("ZYX")
-
-            # use empty material string for multi-material objects
-            # this will trigger use of MeshFaceMaterial in SceneLoader
-
-            material_string = '""'
-            if len(material_ids) == 1:
-                material_string = generate_string_list(material_ids)
-
-            group_string = ""
-            if len(group_ids) > 0:
-                group_string = generate_string_list(group_ids)
-
-            castShadow = obj.THREE_castShadow
-            receiveShadow = obj.THREE_receiveShadow
-            doubleSided = obj.THREE_doubleSided
-
-            visible = obj.THREE_visible
-
-            geometry_string = generate_string(geometry_id)
-
-            object_string = TEMPLATE_OBJECT % {
-            "object_id"   : generate_string(object_id),
-            "geometry_id" : geometry_string,
-            "group_id"    : group_string,
-            "material_id" : material_string,
-
-            "position"    : generate_vec3(position),
-            "rotation"    : generate_vec3(rotation),
-            "quaternion"  : generate_quat(quaternion),
-            "scale"       : generate_vec3(scale),
-
-            "castShadow"  : generate_bool_property(castShadow),
-            "receiveShadow"  : generate_bool_property(receiveShadow),
-            "doubleSided"  : generate_bool_property(doubleSided),
-            "visible"      : generate_bool_property(visible)
-            }
-            chunks.append(object_string)
-
-        elif obj.type == "EMPTY" or (obj.type == "MESH" and not obj.THREE_exportGeometry):
-
-            object_id = obj.name
-            group_ids = generate_group_id_list(obj)
-
-            if data["flipyz"]:
-                matrix_world = ROTATE_X_PI2 * obj.matrix_world
-            else:
-                matrix_world = obj.matrix_world
-
-            position, quaternion, scale = matrix_world.decompose()
-            rotation = quaternion.to_euler("ZYX")
-
-            group_string = ""
-            if len(group_ids) > 0:
-                group_string = generate_string_list(group_ids)
-
-            object_string = TEMPLATE_EMPTY % {
-            "object_id"   : generate_string(object_id),
-            "group_id"    : group_string,
-
-            "position"    : generate_vec3(position),
-            "rotation"    : generate_vec3(rotation),
-            "quaternion"  : generate_quat(quaternion),
-            "scale"       : generate_vec3(scale)
-            }
-            chunks.append(object_string)
-
-    return ",\n".join(chunks), len(chunks)
-
-# #####################################################
-# Scene exporter - geometries
-# #####################################################
-
-def generate_geometries(data):
-    chunks = []
-
-    geo_set = set()
-
-    for obj in data["objects"]:
-        if obj.type == "MESH" and obj.THREE_exportGeometry:
-
-            #if len(obj.modifiers) > 0:
-            #    name = obj.name
-            #else:
-            name = obj.data.name
-
-            if name not in geo_set:
-
-                geometry_id = "geo_%s" % name
-
-                if data["embed_meshes"]:
-
-                    embed_id = "emb_%s" % name
-
-                    geometry_string = TEMPLATE_GEOMETRY_EMBED % {
-                    "geometry_id" : generate_string(geometry_id),
-                    "embed_id"  : generate_string(embed_id)
-                    }
-
-                else:
-
-                    model_filename = os.path.basename(generate_mesh_filename(name, data["filepath"]))
-
-                    geometry_string = TEMPLATE_GEOMETRY_LINK % {
-                    "geometry_id" : generate_string(geometry_id),
-                    "model_file"  : generate_string(model_filename)
-                    }
-
-                chunks.append(geometry_string)
-
-                geo_set.add(name)
-
-    return ",\n".join(chunks), len(chunks)
-
-# #####################################################
-# Scene exporter - textures
-# #####################################################
-
-def generate_textures_scene(data):
-    chunks = []
-
-    # TODO: extract just textures actually used by some objects in the scene
-
-    for texture in bpy.data.textures:
-
-        if texture.type == 'IMAGE' and texture.image and texture.users > 0 and len(texture.users_material) > 0:
-
-            img = texture.image
-
-            texture_id = img.name
-            texture_file = extract_texture_filename(img)
-
-            if data["copy_textures"]:
-                save_image(img, texture_file, data["filepath"])
-
-            extras = ""
-
-            if texture.repeat_x != 1 or texture.repeat_y != 1:
-                extras += ',\n        "repeat": [%g, %g]' % (texture.repeat_x, texture.repeat_y)
-
-            if texture.extension == "REPEAT":
-                wrap_x = "repeat"
-                wrap_y = "repeat"
-
-                if texture.use_mirror_x:
-                    wrap_x = "mirror"
-                if texture.use_mirror_y:
-                    wrap_y = "mirror"
-
-                extras += ',\n\t\t\t"wrap": ["%s", "%s"]' % (wrap_x, wrap_y)
-
-            texture_string = TEMPLATE_TEXTURE % {
-            "texture_id"   : generate_string(texture_id),
-            "texture_file" : generate_string(texture_file),
-            "extras"       : extras
-            }
-            chunks.append(texture_string)
-
-    return ",\n".join(chunks), len(chunks)
-
-def extract_texture_filename(image):
-    fn = bpy.path.abspath(image.filepath)
-    fn = os.path.normpath(fn)
-    fn_strip = os.path.basename(fn)
-    return fn_strip
-
-def save_image(img, name, fpath):
-    dst_dir = os.path.dirname(fpath)
-    dst_path = os.path.join(dst_dir, name)
-
-    ensure_folder_exist(dst_dir)
-
-    if img.packed_file:
-        img.save_render(dst_path)
-
-    else:
-        src_path = bpy.path.abspath(img.filepath)
-        shutil.copy(src_path, dst_dir)
-
-# #####################################################
-# Scene exporter - materials
-# #####################################################
-
-def extract_material_data(m, option_colors):
-    world = bpy.context.scene.world
-
-    material = { 'name': m.name }
-
-    material['colorDiffuse'] = [m.diffuse_intensity * m.diffuse_color[0],
-                                m.diffuse_intensity * m.diffuse_color[1],
-                                m.diffuse_intensity * m.diffuse_color[2]]
-
-    material['colorSpecular'] = [m.specular_intensity * m.specular_color[0],
-                                 m.specular_intensity * m.specular_color[1],
-                                 m.specular_intensity * m.specular_color[2]]
-
-    material['colorAmbient'] = [m.ambient * material['colorDiffuse'][0],
-                                m.ambient * material['colorDiffuse'][1],
-                                m.ambient * material['colorDiffuse'][2]]
-
-    material['colorEmissive'] = [m.emit * material['colorDiffuse'][0],
-                                 m.emit * material['colorDiffuse'][1],
-                                 m.emit * material['colorDiffuse'][2]]
-
-    material['transparency'] = m.alpha
-
-    # not sure about mapping values to Blinn-Phong shader
-    # Blender uses INT from [1,511] with default 0
-    # http://www.blender.org/documentation/blender_python_api_2_54_0/bpy.types.Material.html#bpy.types.Material.specular_hardness
-
-    material["specularCoef"] = m.specular_hardness
-
-    material["vertexColors"] = m.THREE_useVertexColors and option_colors
-
-    material['mapDiffuse'] = ""
-    material['mapLight'] = ""
-    material['mapSpecular'] = ""
-    material['mapNormal'] = ""
-    material['mapBump'] = ""
-
-    material['mapNormalFactor'] = 1.0
-    material['mapBumpScale'] = 1.0
-
-    textures = guess_material_textures(m)
-
-    if textures['diffuse']:
-        material['mapDiffuse'] = textures['diffuse']['texture'].image.name
-
-    if textures['light']:
-        material['mapLight'] = textures['light']['texture'].image.name
-
-    if textures['specular']:
-        material['mapSpecular'] = textures['specular']['texture'].image.name
-
-    if textures['normal']:
-        material['mapNormal'] = textures['normal']['texture'].image.name
-        if textures['normal']['slot'].use_map_normal:
-            material['mapNormalFactor'] = textures['normal']['slot'].normal_factor
-
-    if textures['bump']:
-        material['mapBump'] = textures['bump']['texture'].image.name
-        if textures['bump']['slot'].use_map_normal:
-            material['mapBumpScale'] = textures['bump']['slot'].normal_factor
-
-    material['shading'] = m.THREE_materialType
-    material['blending'] = m.THREE_blendingType
-    material['depthWrite'] = m.THREE_depthWrite
-    material['depthTest'] = m.THREE_depthTest
-    material['transparent'] = m.use_transparency
-
-    return material
-
-def guess_material_textures(material):
-    textures = {
-        'diffuse' : None,
-        'light'   : None,
-        'normal'  : None,
-        'specular': None,
-        'bump'    : None
-    }
-
-    # just take first textures of each, for the moment three.js materials can't handle more
-    # assume diffuse comes before lightmap, normalmap has checked flag
-
-    for i in range(len(material.texture_slots)):
-        slot = material.texture_slots[i]
-        if slot:
-            texture = slot.texture
-            if slot.use and texture and texture.type == 'IMAGE':
-
-                # normal map in Blender UI: textures => image sampling => normal map
-
-                if texture.use_normal_map:
-                    textures['normal'] = { "texture": texture, "slot": slot }
-
-                # bump map in Blender UI: textures => influence => geometry => normal
-
-                elif slot.use_map_normal:
-                    textures['bump'] = { "texture": texture, "slot": slot }
-
-                elif slot.use_map_specular or slot.use_map_hardness:
-                    textures['specular'] = { "texture": texture, "slot": slot }
-
-                else:
-                    if not textures['diffuse'] and not slot.blend_type == 'MULTIPLY':
-                        textures['diffuse'] = { "texture": texture, "slot": slot }
-
-                    else:
-                        textures['light'] = { "texture": texture, "slot": slot }
-
-                if textures['diffuse'] and textures['normal'] and textures['light'] and textures['specular'] and textures['bump']:
-                    break
-
-    return textures
-
-def generate_material_string(material):
-
-    material_id = material["name"]
-
-    # default to Lambert
-
-    shading = material.get("shading", "Lambert")
-
-    # normal and bump mapped materials must use Phong
-    # to get all required parameters for normal shader
-
-    if material['mapNormal'] or material['mapBump']:
-        shading = "Phong"
-
-    type_map = {
-    "Lambert"   : "MeshLambertMaterial",
-    "Phong"     : "MeshPhongMaterial"
-    }
-
-    material_type = type_map.get(shading, "MeshBasicMaterial")
-
-    parameters = '"color": %d' % rgb2int(material["colorDiffuse"])
-    parameters += ', "ambient": %d' % rgb2int(material["colorDiffuse"])
-    parameters += ', "emissive": %d' % rgb2int(material["colorEmissive"])
-    parameters += ', "opacity": %.2g' % material["transparency"]
-
-    if shading == "Phong":
-        parameters += ', "ambient": %d' % rgb2int(material["colorAmbient"])
-        parameters += ', "emissive": %d' % rgb2int(material["colorEmissive"])
-        parameters += ', "specular": %d' % rgb2int(material["colorSpecular"])
-        parameters += ', "shininess": %.1g' % material["specularCoef"]
-
-    colorMap = material['mapDiffuse']
-    lightMap = material['mapLight']
-    specularMap = material['mapSpecular']
-    normalMap = material['mapNormal']
-    bumpMap = material['mapBump']
-    normalMapFactor = material['mapNormalFactor']
-    bumpMapScale = material['mapBumpScale']
-
-    if colorMap:
-        parameters += ', "map": %s' % generate_string(colorMap)
-    if lightMap:
-        parameters += ', "lightMap": %s' % generate_string(lightMap)
-    if specularMap:
-        parameters += ', "specularMap": %s' % generate_string(specularMap)
-    if normalMap:
-        parameters += ', "normalMap": %s' % generate_string(normalMap)
-    if bumpMap:
-        parameters += ', "bumpMap": %s' % generate_string(bumpMap)
-
-    if normalMapFactor != 1.0:
-        parameters += ', "normalMapFactor": %g' % normalMapFactor
-
-    if bumpMapScale != 1.0:
-        parameters += ', "bumpMapScale": %g' % bumpMapScale
-
-    if material['vertexColors']:
-        parameters += ', "vertexColors": "vertex"'
-
-    if material['transparent']:
-        parameters += ', "transparent": true'
-
-    parameters += ', "blending": "%s"' % material['blending']
-
-    if not material['depthWrite']:
-        parameters += ', "depthWrite": false'
-
-    if not material['depthTest']:
-        parameters += ', "depthTest": false'
-
-
-    material_string = TEMPLATE_MATERIAL_SCENE % {
-    "material_id" : generate_string(material_id),
-    "type"        : generate_string(material_type),
-    "parameters"  : parameters
-    }
-
-    return material_string
-
-def generate_materials_scene(data):
-    chunks = []
-
-    def material_is_used(mat):
-        minimum_users = 1
-        if mat.use_fake_user:
-            minimum_users = 2 #we must ignore the "fake user" in this case
-        return mat.users >= minimum_users
-    
-    used_materials = [m for m in bpy.data.materials if material_is_used(m)]
-
-    for m in used_materials:
-        material = extract_material_data(m, data["use_colors"])
-        material_string = generate_material_string(material)
-        chunks.append(material_string)
-
-    return ",\n".join(chunks), len(chunks)
-
-# #####################################################
-# Scene exporter - cameras
-# #####################################################
-
-def generate_cameras(data):
-    chunks = []
-
-    if data["use_cameras"]:
-
-        cams = bpy.data.objects
-        cams = [ob for ob in cams if (ob.type == 'CAMERA')]
-
-        if not cams:
-            camera = DEFAULTS["camera"]
-
-            if camera["type"] == "PerspectiveCamera":
-
-                camera_string = TEMPLATE_CAMERA_PERSPECTIVE % {
-                "camera_id" : generate_string(camera["name"]),
-                "fov"       : camera["fov"],
-                "aspect"    : camera["aspect"],
-                "near"      : camera["near"],
-                "far"       : camera["far"],
-                "position"  : generate_vec3(camera["position"]),
-                "target"    : generate_vec3(camera["target"])
-                }
-
-            elif camera["type"] == "OrthographicCamera":
-
-                camera_string = TEMPLATE_CAMERA_ORTHO % {
-                "camera_id" : generate_string(camera["name"]),
-                "left"      : camera["left"],
-                "right"     : camera["right"],
-                "top"       : camera["top"],
-                "bottom"    : camera["bottom"],
-                "near"      : camera["near"],
-                "far"       : camera["far"],
-                "position"  : generate_vec3(camera["position"]),
-                "target"    : generate_vec3(camera["target"])
-                }
-
-            chunks.append(camera_string)
-
-        else:
-
-            for cameraobj in cams:
-                camera = bpy.data.cameras[cameraobj.data.name]
-
-                if camera.id_data.type == "PERSP":
-
-                    camera_string = TEMPLATE_CAMERA_PERSPECTIVE % {
-                    "camera_id" : generate_string(cameraobj.name),
-                    "fov"       : (camera.angle / 3.14) * 180.0,
-                    "aspect"    : 1.333,
-                    "near"      : camera.clip_start,
-                    "far"       : camera.clip_end,
-                    "position"  : generate_vec3([cameraobj.location[0], -cameraobj.location[1], cameraobj.location[2]], data["flipyz"]),
-                    "target"    : generate_vec3([0, 0, 0])
-                    }
-
-                elif camera.id_data.type == "ORTHO":
-
-                    camera_string = TEMPLATE_CAMERA_ORTHO % {
-                    "camera_id" : generate_string(camera.name),
-                    "left"      : -(camera.angle_x * camera.ortho_scale),
-                    "right"     : (camera.angle_x * camera.ortho_scale),
-                    "top"       : (camera.angle_y * camera.ortho_scale),
-                    "bottom"    : -(camera.angle_y * camera.ortho_scale),
-                    "near"      : camera.clip_start,
-                    "far"       : camera.clip_end,
-                    "position"  : generate_vec3([cameraobj.location[0], -cameraobj.location[1], cameraobj.location[2]], data["flipyz"]),
-                    "target"    : generate_vec3([0, 0, 0])
-                    }
-                    
-                chunks.append(camera_string)
-
-    return ",\n".join(chunks), len(chunks)
-
-# #####################################################
-# Scene exporter - lights
-# #####################################################
-
-def generate_lights(data):
-    chunks = []
-
-    if data["use_lights"]:
-        lamps = data["objects"]
-        lamps = [ob for ob in lamps if (ob.type == 'LAMP')]
-
-        for lamp in lamps:
-            light_string = ""
-            concrete_lamp = lamp.data
-
-            if concrete_lamp.type == "POINT":
-                light_string = TEMPLATE_LIGHT_POINT % {
-                    "light_id"      : generate_string(concrete_lamp.name),
-                    "position"      : generate_vec3(lamp.location, data["flipyz"]),
-                    "rotation"      : generate_vec3(lamp.rotation_euler, data["flipyz"]),
-                    "color"         : rgb2int(concrete_lamp.color),
-                    "distance"      : concrete_lamp.distance,
-                    "intensity"        : concrete_lamp.energy
-                }
-            elif concrete_lamp.type == "SUN":
-                light_string = TEMPLATE_LIGHT_SUN % {
-                    "light_id"      : generate_string(concrete_lamp.name),
-                    "position"      : generate_vec3(lamp.location, data["flipyz"]),
-                    "rotation"      : generate_vec3(lamp.rotation_euler, data["flipyz"]),
-                    "color"         : rgb2int(concrete_lamp.color),
-                    "distance"      : concrete_lamp.distance,
-                    "intensity"        : concrete_lamp.energy
-                }
-            elif concrete_lamp.type == "SPOT":
-                light_string = TEMPLATE_LIGHT_SPOT % {
-                    "light_id"      : generate_string(concrete_lamp.name),
-                    "position"      : generate_vec3(lamp.location, data["flipyz"]),
-                    "rotation"      : generate_vec3(lamp.rotation_euler, data["flipyz"]),
-                    "color"         : rgb2int(concrete_lamp.color),
-                    "distance"      : concrete_lamp.distance,
-                    "intensity"        : concrete_lamp.energy,
-                    "use_shadow"    : concrete_lamp.use_shadow,
-                    "angle"         : concrete_lamp.spot_size
-                }
-            elif concrete_lamp.type == "HEMI":
-                light_string = TEMPLATE_LIGHT_HEMI % {
-                    "light_id"      : generate_string(concrete_lamp.name),
-                    "position"      : generate_vec3(lamp.location, data["flipyz"]),
-                    "rotation"      : generate_vec3(lamp.rotation_euler, data["flipyz"]),
-                    "color"         : rgb2int(concrete_lamp.color),
-                    "distance"      : concrete_lamp.distance,
-                    "intensity"        : concrete_lamp.energy
-                }
-            elif concrete_lamp.type == "AREA":
-                light_string = TEMPLATE_LIGHT_AREA % {
-                    "light_id"      : generate_string(concrete_lamp.name),
-                    "position"      : generate_vec3(lamp.location, data["flipyz"]),
-                    "rotation"      : generate_vec3(lamp.rotation_euler, data["flipyz"]),
-                    "color"         : rgb2int(concrete_lamp.color),
-                    "distance"      : concrete_lamp.distance,
-                    "intensity"        : concrete_lamp.energy,
-                    "gamma"         : concrete_lamp.gamma,
-                    "shape"         : concrete_lamp.shape,
-                    "size"          : concrete_lamp.size,
-                    "size_y"        : concrete_lamp.size_y
-                }
-
-            chunks.append(light_string)
-
-        if not lamps:
-            lamps.append(DEFAULTS["light"])
-
-    return ",\n".join(chunks), len(chunks)
-
-# #####################################################
-# Scene exporter - embedded meshes
-# #####################################################
-
-def generate_embeds(data):
-
-    if data["embed_meshes"]:
-
-        chunks = []
-
-        for e in data["embeds"]:
-
-            embed = '\t\t"emb_%s": {\n%s\t\t}' % (e, data["embeds"][e])
-            chunks.append(embed)
-
-        return ",\n".join(chunks)
-
-    return ""
-
-# #####################################################
-# Scene exporter - generate ASCII scene
-# #####################################################
-
-def generate_ascii_scene(data):
-
-    objects, nobjects = generate_objects(data)
-    geometries, ngeometries = generate_geometries(data)
-    textures, ntextures = generate_textures_scene(data)
-    materials, nmaterials = generate_materials_scene(data)
-    lights, nlights = generate_lights(data)
-    cameras, ncameras = generate_cameras(data)
-
-    embeds = generate_embeds(data)
-
-    if nlights > 0:
-        if nobjects > 0:
-            objects = objects + ",\n" + lights
-        else:
-            objects = lights
-        nobjects += nlights
-
-    if ncameras > 0:
-        if nobjects > 0:
-            objects = objects + ",\n" + cameras
-        else:
-            objects = cameras
-        nobjects += ncameras
-
-    basetype = "relativeTo"
-
-    if data["base_html"]:
-        basetype += "HTML"
-    else:
-        basetype += "Scene"
-
-    sections = [
-    ["objects",    objects],
-    ["geometries", geometries],
-    ["textures",   textures],
-    ["materials",  materials],
-    ["embeds",     embeds]
-    ]
-
-    chunks = []
-    for label, content in sections:
-        if content:
-            chunks.append(generate_section(label, content))
-
-    sections_string = "".join(chunks)
-
-    default_camera = ""
-    if data["use_cameras"]:
-        cams = [ob for ob in bpy.data.objects if (ob.type == 'CAMERA' and ob.select)]
-        if not cams:
-            default_camera = "default_camera"
-        else:
-            default_camera = cams[0].name
-
-    parameters = {
-    "fname"     : data["source_file"],
-
-    "sections"  : sections_string,
-
-    "bgcolor"   : generate_vec3(DEFAULTS["bgcolor"]),
-    "bgalpha"   : DEFAULTS["bgalpha"],
-    "defcamera" :  generate_string(default_camera),
-
-    "nobjects"      : nobjects,
-    "ngeometries"   : ngeometries,
-    "ntextures"     : ntextures,
-    "basetype"      : generate_string(basetype),
-    "nmaterials"    : nmaterials,
-
-    "position"      : generate_vec3(DEFAULTS["position"]),
-    "rotation"      : generate_vec3(DEFAULTS["rotation"]),
-    "scale"         : generate_vec3(DEFAULTS["scale"])
-    }
-
-    text = TEMPLATE_SCENE_ASCII % parameters
-
-    return text
-
-def export_scene(scene, filepath, flipyz, option_colors, option_lights, option_cameras, option_embed_meshes, embeds, option_url_base_html, option_copy_textures):
-
-    source_file = os.path.basename(bpy.data.filepath)
-
-    # objects are contained in scene and linked groups
-    objects = []
-
-    # get scene objects
-    sceneobjects = scene.objects
-    for obj in sceneobjects:
-      objects.append(obj)
-
-    scene_text = ""
-    data = {
-    "scene"        : scene,
-    "objects"      : objects,
-    "embeds"       : embeds,
-    "source_file"  : source_file,
-    "filepath"     : filepath,
-    "flipyz"       : flipyz,
-    "use_colors"   : option_colors,
-    "use_lights"   : option_lights,
-    "use_cameras"  : option_cameras,
-    "embed_meshes" : option_embed_meshes,
-    "base_html"    : option_url_base_html,
-    "copy_textures": option_copy_textures
-    }
-    scene_text += generate_ascii_scene(data)
-
-    write_file(filepath, scene_text)
-
-# #####################################################
-# Main
-# #####################################################
-
-def save(operator, context, filepath = "",
-         option_flip_yz = True,
-         option_vertices = True,
-         option_vertices_truncate = False,
-         option_faces = True,
-         option_normals = True,
-         option_uv_coords = True,
-         option_materials = True,
-         option_colors = True,
-         option_bones = True,
-         option_skinning = True,
-         align_model = 0,
-         option_export_scene = False,
-         option_lights = False,
-         option_cameras = False,
-         option_scale = 1.0,
-         option_embed_meshes = True,
-         option_url_base_html = False,
-         option_copy_textures = False,
-         option_animation_morph = False,
-         option_animation_skeletal = False,
-         option_frame_step = 1,
-         option_all_meshes = True,
-         option_frame_index_as_time = False):
-
-    #print("URL TYPE", option_url_base_html)
-
-    filepath = ensure_extension(filepath, '.json')
-
-    scene = context.scene
-
-    if scene.objects.active:
-        bpy.ops.object.mode_set(mode='OBJECT')
-
-    if option_all_meshes:
-        sceneobjects = scene.objects
-    else:
-        sceneobjects = context.selected_objects
-
-    # objects are contained in scene and linked groups
-    objects = []
-
-    # get scene objects
-    for obj in sceneobjects:
-      objects.append(obj)
-
-    if option_export_scene:
-
-        geo_set = set()
-        embeds = {}
-
-        for object in objects:
-            if object.type == "MESH" and object.THREE_exportGeometry:
-
-                # create extra copy of geometry with applied modifiers
-                # (if they exist)
-
-                #if len(object.modifiers) > 0:
-                #    name = object.name
-
-                # otherwise can share geometry
-
-                #else:
-                name = object.data.name
-
-                if name not in geo_set:
-
-                    if option_embed_meshes:
-
-                        text, model_string = generate_mesh_string([object], scene,
-                                                        option_vertices,
-                                                        option_vertices_truncate,
-                                                        option_faces,
-                                                        option_normals,
-                                                        option_uv_coords,
-                                                        option_materials,
-                                                        option_colors,
-                                                        option_bones,
-                                                        option_skinning,
-                                                        False,          # align_model
-                                                        option_flip_yz,
-                                                        option_scale,
-                                                        False,          # export_single_model
-                                                        False,          # option_copy_textures
-                                                        filepath,
-                                                        option_animation_morph,
-                                                        option_animation_skeletal,
-                                                        option_frame_index_as_time,
-                                                        option_frame_step)
-
-                        embeds[object.data.name] = model_string
-
-                    else:
-
-                        fname = generate_mesh_filename(name, filepath)
-                        export_mesh([object], scene,
-                                    fname,
-                                    option_vertices,
-                                    option_vertices_truncate,
-                                    option_faces,
-                                    option_normals,
-                                    option_uv_coords,
-                                    option_materials,
-                                    option_colors,
-                                    option_bones,
-                                    option_skinning,
-                                    False,          # align_model
-                                    option_flip_yz,
-                                    option_scale,
-                                    False,          # export_single_model
-                                    option_copy_textures,
-                                    option_animation_morph,
-                                    option_animation_skeletal,
-                                    option_frame_step,
-                                    option_frame_index_as_time)
-
-                    geo_set.add(name)
-
-        export_scene(scene, filepath,
-                     option_flip_yz,
-                     option_colors,
-                     option_lights,
-                     option_cameras,
-                     option_embed_meshes,
-                     embeds,
-                     option_url_base_html,
-                     option_copy_textures)
-
-    else:
-
-        export_mesh(objects, scene, filepath,
-                    option_vertices,
-                    option_vertices_truncate,
-                    option_faces,
-                    option_normals,
-                    option_uv_coords,
-                    option_materials,
-                    option_colors,
-                    option_bones,
-                    option_skinning,
-                    align_model,
-                    option_flip_yz,
-                    option_scale,
-                    True,            # export_single_model
-                    option_copy_textures,
-                    option_animation_morph,
-                    option_animation_skeletal,
-                    option_frame_step,
-                    option_frame_index_as_time)
-
-    return {'FINISHED'}

+ 0 - 633
utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/import_threejs.py

@@ -1,633 +0,0 @@
-# ##### BEGIN GPL LICENSE BLOCK #####
-#
-#  This program is free software; you can redistribute it and/or
-#  modify it under the terms of the GNU General Public License
-#  as published by the Free Software Foundation; either version 2
-#  of the License, or (at your option) any later version.
-#
-#  This program is distributed in the hope that it will be useful,
-#  but WITHOUT ANY WARRANTY; without even the implied warranty of
-#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-#  You should have received a copy of the GNU General Public License
-#  along with this program; if not, write to the Free Software Foundation,
-#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-#
-# ##### END GPL LICENSE BLOCK #####
-
-"""
-Blender importer for Three.js (ASCII JSON format).
-
-"""
-
-import os
-import time
-import json
-import bpy
-import mathutils
-from mathutils.geometry import tessellate_polygon
-from bpy_extras.image_utils import load_image
-
-# #####################################################
-# Generators
-# #####################################################
-def setColor(c, t):
-    c.r = t[0]
-    c.g = t[1]
-    c.b = t[2]
-
-def create_texture(filename, modelpath):
-    name = filename
-    texture = bpy.data.textures.new(name, type='IMAGE')
-
-    image = load_image(filename, modelpath)
-    has_data = False
-
-    if image:
-        texture.image = image
-        has_data = image.has_data
-
-    return texture
-
-def create_materials(data, modelpath):
-    materials = []
-    materials_data = data.get("materials", [])
-
-    for i, m in enumerate(materials_data):
-
-        name = m.get("DbgName", "material_%d" % i)
-
-        colorAmbient = m.get("colorAmbient", None)
-        colorDiffuse = m.get("colorDiffuse", None)
-        colorSpecular = m.get("colorSpecular", None)
-        alpha = m.get("transparency", 1.0)
-        specular_hardness = m.get("specularCoef", 0)
-
-        mapDiffuse = m.get("mapDiffuse", None)
-        mapLightmap = m.get("mapLightmap", None)
-
-        vertexColorsType = m.get("vertexColors", False)
-
-        useVertexColors = False
-        if vertexColorsType:
-            useVertexColors = True
-
-        material = bpy.data.materials.new(name)
-
-        material.THREE_useVertexColors = useVertexColors
-
-        if colorDiffuse:
-            setColor(material.diffuse_color, colorDiffuse)
-            material.diffuse_intensity = 1.0
-
-        if colorSpecular:
-            setColor(material.specular_color, colorSpecular)
-            material.specular_intensity = 1.0
-
-        if alpha < 1.0:
-            material.alpha = alpha
-            material.use_transparency = True
-
-        if specular_hardness:
-            material.specular_hardness = specular_hardness
-
-        if mapDiffuse:
-            texture = create_texture(mapDiffuse, modelpath)
-            mtex = material.texture_slots.add()
-            mtex.texture = texture
-            mtex.texture_coords = 'UV'
-            mtex.use = True
-            mtex.use_map_color_diffuse = True
-
-            material.active_texture = texture
-
-        materials.append(material)
-
-    return materials
-
-def create_mesh_object(name, vertices, materials, face_data, flipYZ, recalculate_normals):
-
-    faces         = face_data["faces"]
-    vertexNormals = face_data["vertexNormals"]
-    vertexColors  = face_data["vertexColors"]
-    vertexUVs     = face_data["vertexUVs"]
-    faceMaterials = face_data["materials"]
-    faceColors    = face_data["faceColors"]
-
-    edges = []
-
-    # Create a new mesh
-
-    me = bpy.data.meshes.new(name)
-    me.from_pydata(vertices, edges, faces)
-
-    # Handle normals
-
-    if not recalculate_normals:
-        me.update(calc_edges = True)
-
-    if face_data["hasVertexNormals"]:
-
-        print("setting vertex normals")
-
-        for fi in range(len(faces)):
-
-            if vertexNormals[fi]:
-
-                #print("setting face %i with %i vertices" % (fi, len(normals[fi])))
-
-                # if me.update() is called after setting vertex normals
-                # setting face.use_smooth overrides these normals
-                #  - this fixes weird shading artefacts (seems to come from sharing
-                #    of vertices between faces, didn't find a way how to set vertex normals
-                #    per face use of vertex as opposed to per vertex),
-                #  - probably this just overrides all custom vertex normals
-                #  - to preserve vertex normals from the original data
-                #    call me.update() before setting them
-
-                me.tessfaces[fi].use_smooth = True
-
-                if not recalculate_normals:
-                    for j in range(len(vertexNormals[fi])):
-
-                        vertexNormal = vertexNormals[fi][j]
-
-                        x = vertexNormal[0]
-                        y = vertexNormal[1]
-                        z = vertexNormal[2]
-
-                        if flipYZ:
-                            tmp = y
-                            y = -z
-                            z = tmp
-
-                            # flip normals (this make them look consistent with the original before export)
-
-                            #x = -x
-                            #y = -y
-                            #z = -z
-
-                        vi = me.tessfaces[fi].vertices[j]
-
-                        me.vertices[vi].normal.x = x
-                        me.vertices[vi].normal.y = y
-                        me.vertices[vi].normal.z = z
-
-    if recalculate_normals:
-        me.update(calc_edges = True)
-
-    # Handle colors
-
-    if face_data["hasVertexColors"]:
-
-        print("setting vertex colors")
-
-        me.vertex_colors.new("vertex_color_layer_0")
-
-        for fi in range(len(faces)):
-
-            if vertexColors[fi]:
-
-                face_colors = me.vertex_colors[0].data[fi]
-                face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
-
-                for vi in range(len(vertexColors[fi])):
-
-                    r = vertexColors[fi][vi][0]
-                    g = vertexColors[fi][vi][1]
-                    b = vertexColors[fi][vi][2]
-
-                    face_colors[vi].r = r
-                    face_colors[vi].g = g
-                    face_colors[vi].b = b
-
-    elif face_data["hasFaceColors"]:
-
-        print("setting vertex colors from face colors")
-
-        me.vertex_colors.new("vertex_color_layer_0")
-
-        for fi in range(len(faces)):
-
-            if faceColors[fi]:
-
-                r = faceColors[fi][0]
-                g = faceColors[fi][1]
-                b = faceColors[fi][2]
-
-                face_colors = me.vertex_colors[0].data[fi]
-                face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
-
-                for vi in range(len(faces[fi])):
-
-                    face_colors[vi].r = r
-                    face_colors[vi].g = g
-                    face_colors[vi].b = b
-
-    # Handle uvs
-
-    if face_data["hasVertexUVs"]:
-
-        print("setting vertex uvs")
-
-        for li, layer in enumerate(vertexUVs):
-
-            me.uv_textures.new("uv_layer_%d" % li)
-
-            for fi in range(len(faces)):
-
-                if layer[fi]:
-
-                    uv_face = me.uv_textures[li].data[fi]
-                    face_uvs = uv_face.uv1, uv_face.uv2, uv_face.uv3, uv_face.uv4
-
-                    for vi in range(len(layer[fi])):
-
-                        u = layer[fi][vi][0]
-                        v = layer[fi][vi][1]
-
-                        face_uvs[vi].x = u
-                        face_uvs[vi].y = v
-
-                    active_texture = materials[faceMaterials[fi]].active_texture
-
-                    if active_texture:
-                        uv_face.image = active_texture.image
-
-
-    # Handle materials # 1
-
-    if face_data["hasMaterials"]:
-
-
-        print("setting materials (mesh)")
-
-        for m in materials:
-
-            me.materials.append(m)
-
-        print("setting materials (faces)")
-
-        for fi in range(len(faces)):
-
-            if faceMaterials[fi] >= 0:
-
-                me.tessfaces[fi].material_index = faceMaterials[fi]
-
-    # Create a new object
-
-    ob = bpy.data.objects.new(name, me)
-    ob.data = me                                # link the mesh data to the object
-
-
-    scene = bpy.context.scene                   # get the current scene
-    scene.objects.link(ob)                      # link the object into the scene
-
-    ob.location = scene.cursor_location         # position object at 3d-cursor
-
-
-# #####################################################
-# Faces
-# #####################################################
-
-def extract_faces(data):
-
-    result = {
-    "faces"         : [],
-    "materials"     : [],
-    "faceUVs"       : [],
-    "vertexUVs"     : [],
-    "faceNormals"   : [],
-    "vertexNormals" : [],
-    "faceColors"    : [],
-    "vertexColors"  : [],
-
-    "hasVertexNormals"  : False,
-    "hasVertexUVs"      : False,
-    "hasVertexColors"   : False,
-    "hasFaceColors"     : False,
-    "hasMaterials"      : False
-    }
-
-    faces = data.get("faces", [])
-    normals = data.get("normals", [])
-    colors = data.get("colors", [])
-
-    offset = 0
-    zLength = len(faces)
-
-    # disregard empty arrays
-
-    nUvLayers = 0
-
-    for layer in data["uvs"]:
-
-        if len(layer) > 0:
-            nUvLayers += 1
-            result["faceUVs"].append([])
-            result["vertexUVs"].append([])
-
-
-    while ( offset < zLength ):
-
-        type = faces[ offset ]
-        offset += 1
-
-        isQuad          	= isBitSet( type, 0 )
-        hasMaterial         = isBitSet( type, 1 )
-        hasFaceUv           = isBitSet( type, 2 )
-        hasFaceVertexUv     = isBitSet( type, 3 )
-        hasFaceNormal       = isBitSet( type, 4 )
-        hasFaceVertexNormal = isBitSet( type, 5 )
-        hasFaceColor	    = isBitSet( type, 6 )
-        hasFaceVertexColor  = isBitSet( type, 7 )
-
-        #print("type", type, "bits", isQuad, hasMaterial, hasFaceUv, hasFaceVertexUv, hasFaceNormal, hasFaceVertexNormal, hasFaceColor, hasFaceVertexColor)
-
-        result["hasVertexUVs"] = result["hasVertexUVs"] or hasFaceVertexUv
-        result["hasVertexNormals"] = result["hasVertexNormals"] or hasFaceVertexNormal
-        result["hasVertexColors"] = result["hasVertexColors"] or hasFaceVertexColor
-        result["hasFaceColors"] = result["hasFaceColors"] or hasFaceColor
-        result["hasMaterials"] = result["hasMaterials"] or hasMaterial
-
-        # vertices
-
-        if isQuad:
-
-            a = faces[ offset ]
-            offset += 1
-
-            b = faces[ offset ]
-            offset += 1
-
-            c = faces[ offset ]
-            offset += 1
-
-            d = faces[ offset ]
-            offset += 1
-
-            face = [a, b, c, d]
-
-            nVertices = 4
-
-        else:
-
-            a = faces[ offset ]
-            offset += 1
-
-            b = faces[ offset ]
-            offset += 1
-
-            c = faces[ offset ]
-            offset += 1
-
-            face = [a, b, c]
-
-            nVertices = 3
-
-        result["faces"].append(face)
-
-        # material
-
-        if hasMaterial:
-
-            materialIndex = faces[ offset ]
-            offset += 1
-
-        else:
-
-            materialIndex = -1
-
-        result["materials"].append(materialIndex)
-
-        # uvs
-
-        for i in range(nUvLayers):
-
-            faceUv = None
-
-            if hasFaceUv:
-
-                uvLayer = data["uvs"][ i ]
-
-                uvIndex = faces[ offset ]
-                offset += 1
-
-                u = uvLayer[ uvIndex * 2 ]
-                v = uvLayer[ uvIndex * 2 + 1 ]
-
-                faceUv = [u, v]
-
-            result["faceUVs"][i].append(faceUv)
-
-
-            if hasFaceVertexUv:
-
-                uvLayer = data["uvs"][ i ]
-
-                vertexUvs = []
-
-                for j in range(nVertices):
-
-                    uvIndex = faces[ offset ]
-                    offset += 1
-
-                    u = uvLayer[ uvIndex * 2 ]
-                    v = uvLayer[ uvIndex * 2 + 1 ]
-
-                    vertexUvs.append([u, v])
-
-            result["vertexUVs"][i].append(vertexUvs)
-
-
-        if hasFaceNormal:
-
-            normalIndex = faces[ offset ] * 3
-            offset += 1
-
-            x = normals[ normalIndex ]
-            y = normals[ normalIndex + 1 ]
-            z = normals[ normalIndex + 2 ]
-
-            faceNormal = [x, y, z]
-
-        else:
-
-            faceNormal = None
-
-        result["faceNormals"].append(faceNormal)
-
-
-        if hasFaceVertexNormal:
-
-            vertexNormals = []
-
-            for j in range(nVertices):
-
-                normalIndex = faces[ offset ] * 3
-                offset += 1
-
-                x = normals[ normalIndex ]
-                y = normals[ normalIndex + 1 ]
-                z = normals[ normalIndex + 2 ]
-
-                vertexNormals.append( [x, y, z] )
-
-
-        else:
-
-            vertexNormals = None
-
-        result["vertexNormals"].append(vertexNormals)
-
-
-        if hasFaceColor:
-
-            colorIndex = faces[ offset ]
-            offset += 1
-
-            faceColor = hexToTuple( colors[ colorIndex ] )
-
-        else:
-
-            faceColor = None
-
-        result["faceColors"].append(faceColor)
-
-
-        if hasFaceVertexColor:
-
-            vertexColors = []
-
-            for j in range(nVertices):
-
-                colorIndex = faces[ offset ]
-                offset += 1
-
-                color = hexToTuple( colors[ colorIndex ] )
-                vertexColors.append( color )
-
-        else:
-
-            vertexColors = None
-
-        result["vertexColors"].append(vertexColors)
-
-
-    return result
-
-# #####################################################
-# Utils
-# #####################################################
-
-def hexToTuple( hexColor ):
-    r = (( hexColor >> 16 ) & 0xff) / 255.0
-    g = (( hexColor >> 8 ) & 0xff) / 255.0
-    b = ( hexColor & 0xff) / 255.0
-    return (r, g, b)
-
-def isBitSet(value, position):
-    return value & ( 1 << position )
-
-def splitArray(data, chunkSize):
-    result = []
-    chunk = []
-    for i in range(len(data)):
-        if i > 0 and i % chunkSize == 0:
-            result.append(chunk)
-            chunk = []
-        chunk.append(data[i])
-    result.append(chunk)
-    return result
-
-
-def extract_json_string(text):
-    marker_begin = "var model ="
-    marker_end = "postMessage"
-
-    start = text.find(marker_begin) + len(marker_begin)
-    end = text.find(marker_end)
-    end = text.rfind("}", start, end)
-    return text[start:end+1].strip()
-
-def get_name(filepath):
-    return os.path.splitext(os.path.basename(filepath))[0]
-
-def get_path(filepath):
-    return os.path.dirname(filepath)
-
-# #####################################################
-# Parser
-# #####################################################
-
-def load(operator, context, filepath, option_flip_yz = True, recalculate_normals = True, option_worker = False):
-
-    print('\nimporting %r' % filepath)
-
-    time_main = time.time()
-
-    print("\tparsing JSON file...")
-
-    time_sub = time.time()
-
-    file = open(filepath, 'rU')
-    rawcontent = file.read()
-    file.close()
-
-    if option_worker:
-        json_string = extract_json_string(rawcontent)
-    else:
-        json_string = rawcontent
-    data = json.loads( json_string )
-
-    time_new = time.time()
-
-    print('parsing %.4f sec' % (time_new - time_sub))
-
-    time_sub = time_new
-
-    # flip YZ
-
-    vertices = splitArray(data["vertices"], 3)
-
-    if option_flip_yz:
-        vertices[:] = [(v[0], -v[2], v[1]) for v in vertices]
-
-    # extract faces
-
-    face_data = extract_faces(data)
-
-    # deselect all
-
-    bpy.ops.object.select_all(action='DESELECT')
-
-    nfaces = len(face_data["faces"])
-    nvertices = len(vertices)
-    nnormals = len(data.get("normals", [])) / 3
-    ncolors = len(data.get("colors", [])) / 3
-    nuvs = len(data.get("uvs", [])) / 2
-    nmaterials = len(data.get("materials", []))
-
-    print('\tbuilding geometry...\n\tfaces:%i, vertices:%i, vertex normals: %i, vertex uvs: %i, vertex colors: %i, materials: %i ...' % (
-        nfaces, nvertices, nnormals, nuvs, ncolors, nmaterials ))
-
-    # Create materials
-
-    materials = create_materials(data, get_path(filepath))
-
-    # Create new obj
-
-    create_mesh_object(get_name(filepath), vertices, materials, face_data, option_flip_yz, recalculate_normals)
-
-    scene = bpy.context.scene
-    scene.update()
-
-    time_new = time.time()
-
-    print('finished importing: %r in %.4f sec.' % (filepath, (time_new - time_main)))
-    return {'FINISHED'}
-
-
-if __name__ == "__main__":
-    register()

+ 17 - 5
utils/exporters/blender/README.md

@@ -1,13 +1,17 @@
-# Three.js Blender Import/Export
+# Three.js Blender Export
 
-Imports and exports Three.js' ASCII JSON format.
+Exports Three.js' ASCII JSON format.
+
+## IMPORTANT
+
+The exporter (r69 and earlier) has been completely replaced. Please ensure you have removed the io_three_mesh addon from your Blender addons directory before installing the current addon (io_three).
 
 ## Installation
 
-Copy the io_mesh_threejs folder to the scripts/addons folder. If it doesn't exist, create it. The full path is OS-dependent (see below).
+Copy the io_three folder to the scripts/addons folder. If it doesn't exist, create it. The full path is OS-dependent (see below).
 
 Once that is done, you need to activate the plugin. Open Blender preferences, look for
-Addons, search for `three`, enable the checkbox next to the `Import-Export: three.js format` entry.
+Addons, search for `three`, enable the checkbox next to the `Import-Export: Three.js Format` entry.
 
 Goto Usage.
 
@@ -40,4 +44,12 @@ For Ubuntu users who installed Blender 2.68 via apt-get, this is the location:
 
 ## Usage
 
-Activate the Import-Export addon under "User Preferences" > "Addons" and then use the regular Import and Export menu within Blender, select `Three.js (js)`.
+Activate the Import-Export addon under "User Preferences" > "Addons" and then use the regular Export menu within Blender, select `Three.js (json)`.
+
+
+## Enabling msgpack
+To enable msgpack compression copy the msgpack to scripts/modules.
+
+
+## Importer
+Currently there is no import functionality available.

+ 643 - 0
utils/exporters/blender/addons/io_three/__init__.py

@@ -0,0 +1,643 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import os
+import json
+
+import bpy
+from bpy_extras.io_utils import ExportHelper
+from bpy.props import (
+    EnumProperty,
+    BoolProperty,
+    FloatProperty,
+    IntProperty
+)
+
+from . import constants
+
+SETTINGS_FILE_EXPORT = 'three_settings_export.js'
+
+
+bl_info = {
+    'name': 'Three.js Format',
+    'author': 'Ed Caspersen (repsac)',
+    'version': (1, 0, 0),
+    'blender': (2, 7, 2),
+    'location': 'File > Import-Export',
+    'description': 'Export Three.js formatted JSON files.',
+    'warning': '',
+    'wiki_url': 'https://github.com/mrdoob/three.js/tree/'\
+        'master/utils/exporters/blender',
+    'tracker_url':  'https://github.com/mrdoob/three.js/issues',
+    'category': 'Import-Export'
+}
+
+def _geometry_types():
+    types = [
+        (constants.GLOBAL, constants.GLOBAL.title(), 
+        constants.GLOBAL),
+        (constants.GEOMETRY, constants.GEOMETRY.title(), 
+        constants.GEOMETRY),
+        (constants.BUFFER_GEOMETRY, constants.BUFFER_GEOMETRY, 
+        constants.BUFFER_GEOMETRY),
+    ]
+
+    return types
+
+bpy.types.Mesh.THREE_geometry_type = EnumProperty(
+    name='Geometry type',
+    description='Geometry type',
+    items=_geometry_types(),
+    default=constants.GLOBAL)
+
+class MESH_PT_hello(bpy.types.Panel):
+
+    bl_label = 'THREE'
+    bl_space_type = 'PROPERTIES'
+    bl_region_type = 'WINDOW'
+    bl_context = 'data'
+    
+    def draw(self, context):
+        row = self.layout.row()
+        if context.mesh:
+            row.prop(context.mesh, 'THREE_geometry_type', text='Type')
+
+def _blending_types(index):
+    types = (
+        constants.BLENDING_TYPES.NONE, 
+        constants.BLENDING_TYPES.NORMAL, 
+        constants.BLENDING_TYPES.ADDITIVE, 
+        constants.BLENDING_TYPES.SUBTRACTIVE, 
+        constants.BLENDING_TYPES.MULTIPLY, 
+        constants.BLENDING_TYPES.CUSTOM)
+    return (types[index], types[index], types[index])
+
+bpy.types.Material.THREE_blending_type = EnumProperty(
+    name='Blending type', 
+    description='Blending type', 
+    items=[_blending_types(x) for x in range(5)], 
+    default=constants.BLENDING_TYPES.NORMAL)
+
+bpy.types.Material.THREE_depth_write = BoolProperty(default=True)
+bpy.types.Material.THREE_depth_test = BoolProperty(default=True)
+
+class MATERIAL_PT_hello(bpy.types.Panel):
+
+    bl_label = 'THREE'
+    bl_space_type = 'PROPERTIES'
+    bl_region_type = 'WINDOW'
+    bl_context = 'material'
+    
+    def draw(self, context):
+        layout = self.layout
+        mat = context.material
+    
+        if mat is not None:
+            row = layout.row()
+            row.label(text='Selected material: %s' % mat.name )
+
+            row = layout.row()
+            row.prop(mat, 'THREE_blending_type', 
+                text='Blending type' )
+
+            row = layout.row()
+            row.prop(mat, 'THREE_depth_write', 
+                text='Enable depth writing' )
+
+            row = layout.row()
+            row.prop(mat, 'THREE_depth_test', 
+                text='Enable depth testing' )
+
+def _mag_filters(index):
+    types = (constants.LINEAR_FILTERS.LINEAR,
+        constants.NEAREST_FILTERS.NEAREST)
+    return (types[index], types[index], types[index])
+
+bpy.types.Texture.THREE_mag_filter = EnumProperty(
+    name='Mag Filter',
+    items = [_mag_filters(x) for x in range(2)],
+    default=constants.LINEAR_FILTERS.LINEAR)
+
+def _min_filters(index):
+    types = (constants.LINEAR_FILTERS.LINEAR,
+        constants.LINEAR_FILTERS.MIP_MAP_NEAREST,
+        constants.LINEAR_FILTERS.MIP_MAP_LINEAR,
+        constants.NEAREST_FILTERS.NEAREST,
+        constants.NEAREST_FILTERS.MIP_MAP_NEAREST,
+        constants.NEAREST_FILTERS.MIP_MAP_LINEAR)
+    return (types[index], types[index], types[index])
+
+bpy.types.Texture.THREE_min_filter = EnumProperty(
+    name='Min Filter',
+    items = [_min_filters(x) for x in range(6)],
+    default=constants.LINEAR_FILTERS.MIP_MAP_LINEAR)
+
+def _mapping(index):
+    types = (constants.MAPPING_TYPES.UV,
+        constants.MAPPING_TYPES.CUBE_REFLECTION,
+        constants.MAPPING_TYPES.CUBE_REFRACTION,
+        constants.MAPPING_TYPES.SPHERICAL_REFLECTION,
+        constants.MAPPING_TYPES.SPHERICAL_REFRACTION)
+    return (types[index], types[index], types[index])
+
+bpy.types.Texture.THREE_mapping = EnumProperty(
+    name='Mapping',
+    items = [_mapping(x) for x in range(5)],
+    default=constants.MAPPING_TYPES.UV)
+
+class TEXTURE_PT_hello(bpy.types.Panel):
+    bl_label = 'THREE'
+    bl_space_type = 'PROPERTIES'
+    bl_region_type = 'WINDOW'
+    bl_context = 'texture'
+
+    #@TODO: possible to make cycles compatible?
+    def draw(self, context):
+        layout = self.layout
+        tex = context.texture
+
+        if tex is not None:
+            row = layout.row()
+            row.prop(tex, 'THREE_mapping', text='Mapping')
+
+            row = layout.row()
+            row.prop(tex, 'THREE_mag_filter', text='Mag Filter')
+
+            row = layout.row()
+            row.prop(tex, 'THREE_min_filter', text='Min Filter')
+
+bpy.types.Object.THREE_export = bpy.props.BoolProperty(default=True)
+
+class OBJECT_PT_hello(bpy.types.Panel):
+    bl_label = 'THREE'
+    bl_space_type = 'PROPERTIES'
+    bl_region_type = 'WINDOW'
+    bl_context = 'object'
+
+    def draw(self, context):
+        layout = self.layout
+        obj = context.object
+
+        row = layout.row()
+        row.prop(obj, 'THREE_export', text='Export')
+
+def get_settings_fullpath():
+    return os.path.join(bpy.app.tempdir, SETTINGS_FILE_EXPORT)
+
+
+def save_settings_export(properties):
+
+    settings = {
+        constants.VERTICES: properties.option_vertices,
+        constants.FACES: properties.option_faces,
+        constants.NORMALS: properties.option_normals,
+        constants.SKINNING: properties.option_skinning,
+        constants.BONES: properties.option_bones,
+        constants.GEOMETRY_TYPE: properties.option_geometry_type,
+
+        constants.MATERIALS: properties.option_materials,
+        constants.UVS: properties.option_uv_coords,
+        constants.FACE_MATERIALS: properties.option_face_materials,
+        constants.MAPS: properties.option_maps,
+        constants.COLORS: properties.option_colors,
+        constants.MIX_COLORS: properties.option_mix_colors,
+
+        constants.SCALE: properties.option_scale,
+        constants.ENABLE_PRECISION: properties.option_round_off,
+        constants.PRECISION: properties.option_round_value,
+        constants.LOGGING: properties.option_logging,
+        constants.COMPRESSION: properties.option_compression,
+        constants.COPY_TEXTURES: properties.option_copy_textures,
+
+        constants.SCENE: properties.option_export_scene,
+        constants.EMBED_GEOMETRY: properties.option_embed_geometry,
+        constants.EMBED_ANIMATION: properties.option_embed_animation,
+        constants.LIGHTS: properties.option_lights,
+        constants.CAMERAS: properties.option_cameras,
+
+        constants.MORPH_TARGETS: properties.option_animation_morph,
+        constants.ANIMATION: properties.option_animation_skeletal,
+        constants.FRAME_STEP: properties.option_frame_step
+    }
+
+    fname = get_settings_fullpath()
+    with open(fname, 'w') as stream:
+        json.dump(settings, stream)
+
+    return settings
+
+
+def restore_settings_export(properties):
+
+    settings = {}
+
+    fname = get_settings_fullpath()
+    if os.path.exists(fname) and os.access(fname, os.R_OK):
+        f = open(fname, 'r')
+        settings = json.load(f)
+
+    ## Geometry {
+    properties.option_vertices = settings.get(
+        constants.VERTICES, constants.EXPORT_OPTIONS[constants.VERTICES])
+    properties.option_faces = settings.get(
+        constants.FACES, constants.EXPORT_OPTIONS[constants.FACES])
+    properties.option_normals = settings.get(
+        constants.NORMALS, constants.EXPORT_OPTIONS[constants.NORMALS])
+
+    properties.option_skinning = settings.get(
+        constants.SKINNING, constants.EXPORT_OPTIONS[constants.SKINNING])
+    properties.option_bones = settings.get(
+        constants.BONES, constants.EXPORT_OPTIONS[constants.BONES])
+    properties.option_geometry_type = settings.get(
+        constants.GEOMETRY_TYPE,
+        constants.EXPORT_OPTIONS[constants.GEOMETRY_TYPE])
+    ## }
+
+    ## Materials {
+    properties.option_materials = settings.get(
+        constants.MATERIALS, constants.EXPORT_OPTIONS[constants.MATERIALS])
+    properties.option_uv_coords = settings.get(
+        constants.UVS, constants.EXPORT_OPTIONS[constants.UVS])
+    properties.option_face_materials = settings.get(
+        constants.FACE_MATERIALS, 
+        constants.EXPORT_OPTIONS[constants.FACE_MATERIALS])
+    properties.option_maps = settings.get(
+        constants.MAPS, constants.EXPORT_OPTIONS[constants.MAPS])
+    properties.option_colors = settings.get(
+        constants.COLORS, constants.EXPORT_OPTIONS[constants.COLORS])
+    properties.option_mix_colors = settings.get(
+        constants.MIX_COLORS, constants.EXPORT_OPTIONS[constants.MIX_COLORS])
+    ## }
+
+    ## Settings {
+    properties.option_scale = settings.get(
+        constants.SCALE, constants.EXPORT_OPTIONS[constants.SCALE])
+    properties.option_round_off = settings.get(
+        constants.ENABLE_PRECISION, 
+        constants.EXPORT_OPTIONS[constants.ENABLE_PRECISION])
+    properties.option_round_value = settings.get(
+        constants.PRECISION, 
+        constants.EXPORT_OPTIONS[constants.PRECISION])
+    properties.option_logging = settings.get(
+        constants.LOGGING, constants.EXPORT_OPTIONS[constants.LOGGING])
+    properties.option_compression = settings.get(
+        constants.COMPRESSION, constants.NONE)
+    properties.option_copy_textures = settings.get(
+        constants.COPY_TEXTURES, 
+        constants.EXPORT_OPTIONS[constants.COPY_TEXTURES])
+    properties.option_embed_animation = settings.get(
+        constants.EMBED_ANIMATION, 
+        constants.EXPORT_OPTIONS[constants.EMBED_ANIMATION])
+    ## }
+
+    ## Scene {
+    properties.option_export_scene = settings.get(
+        constants.SCENE, constants.EXPORT_OPTIONS[constants.SCENE])
+    properties.option_embed_geometry = settings.get(
+        constants.EMBED_GEOMETRY, 
+        constants.EXPORT_OPTIONS[constants.EMBED_GEOMETRY])
+    properties.option_lights = settings.get(
+        constants.LIGHTS, constants.EXPORT_OPTIONS[constants.LIGHTS])
+    properties.option_cameras = settings.get(
+        constants.CAMERAS, constants.EXPORT_OPTIONS[constants.CAMERAS])
+    ## }
+
+    ## Animation {
+    properties.option_animation_morph = settings.get(
+        constants.MORPH_TARGETS, constants.EXPORT_OPTIONS[constants.MORPH_TARGETS])
+    properties.option_animation_skeletal = settings.get(
+        constants.ANIMATION, constants.EXPORT_OPTIONS[constants.ANIMATION])
+    #properties.option_frame_index_as_time = settings.get(
+    #    'option_frame_index_as_time', False)
+
+    properties.option_frame_step = settings.get(
+        constants.FRAME_STEP, constants.EXPORT_OPTIONS[constants.FRAME_STEP])
+    ## }
+
+def compression_types():
+    types = [(constants.NONE, constants.NONE, constants.NONE)]
+
+    try:
+        import msgpack
+        types.append((constants.MSGPACK, constants.MSGPACK, 
+            constants.MSGPACK))
+    except ImportError:
+        pass
+
+    return types
+
+class ExportThree(bpy.types.Operator, ExportHelper):
+
+    bl_idname='export.three'
+    bl_label = 'Export THREE'
+
+    filename_ext = constants.EXTENSION
+
+    option_vertices = BoolProperty(
+        name='Vertices', 
+        description='Export vertices', 
+        default=constants.EXPORT_OPTIONS[constants.VERTICES])
+
+    option_faces = BoolProperty(
+        name='Faces', 
+        description='Export faces', 
+        default=constants.EXPORT_OPTIONS[constants.FACES])
+
+    option_normals = BoolProperty(
+        name='Normals', 
+        description='Export normals', 
+        default=constants.EXPORT_OPTIONS[constants.NORMALS])
+
+    option_colors = BoolProperty(
+        name='Colors', 
+        description='Export vertex colors', 
+        default=constants.EXPORT_OPTIONS[constants.COLORS])
+
+    option_mix_colors = BoolProperty(
+        name='Mix Colors',
+        description='Mix material and vertex colors',
+        default=constants.EXPORT_OPTIONS[constants.MIX_COLORS])
+
+    option_uv_coords = BoolProperty(
+        name='UVs', 
+        description='Export texture coordinates', 
+        default=constants.EXPORT_OPTIONS[constants.UVS])
+
+    option_materials = BoolProperty(
+        name='Materials', 
+        description='Export materials', 
+        default=constants.EXPORT_OPTIONS[constants.MATERIALS])
+
+    option_face_materials = BoolProperty(
+        name='Face Materials',
+        description='Face mapping materials',
+        default=constants.EXPORT_OPTIONS[constants.FACE_MATERIALS])
+
+    option_maps = BoolProperty(
+        name='Textures',
+        description='Include texture maps',
+        default=constants.EXPORT_OPTIONS[constants.MAPS])
+
+    option_skinning = BoolProperty(
+        name='Skinning', 
+        description='Export skin data', 
+        default=constants.EXPORT_OPTIONS[constants.SKINNING])
+
+    option_bones = BoolProperty(
+        name='Bones', 
+        description='Export bones', 
+        default=constants.EXPORT_OPTIONS[constants.BONES])
+
+    option_scale = FloatProperty(
+        name='Scale', 
+        description='Scale vertices', 
+        min=0.01, 
+        max=1000.0, 
+        soft_min=0.01, 
+        soft_max=1000.0, 
+        default=constants.EXPORT_OPTIONS[constants.SCALE])
+
+    option_round_off = BoolProperty(
+        name='Enable Precision',
+        description='Round off floating point values',
+        default=constants.EXPORT_OPTIONS[constants.ENABLE_PRECISION])
+
+    option_round_value = IntProperty(
+        name='Precision',
+        min=0,
+        max=16,
+        description='Floating point precision',
+        default=constants.EXPORT_OPTIONS[constants.PRECISION])
+
+    logging_types = [
+        (constants.DEBUG, constants.DEBUG, constants.DEBUG),
+        (constants.INFO, constants.INFO, constants.INFO),
+        (constants.WARNING, constants.WARNING, constants.WARNING),
+        (constants.ERROR, constants.ERROR, constants.ERROR),
+        (constants.CRITICAL, constants.CRITICAL, constants.CRITICAL)]
+
+    option_logging = EnumProperty(
+        name='Logging', 
+        description = 'Logging verbosity level', 
+        items=logging_types, 
+        default=constants.DEBUG)
+
+    option_geometry_type = EnumProperty(
+        name='Type',
+        description='Geometry type',
+        items=_geometry_types()[1:],
+        default=constants.GEOMETRY)
+
+    option_export_scene = BoolProperty(
+        name='Scene', 
+        description='Export scene', 
+        default=constants.EXPORT_OPTIONS[constants.SCENE])
+
+    option_embed_geometry = BoolProperty(
+        name='Embed geometry', 
+        description='Embed geometry', 
+        default=constants.EXPORT_OPTIONS[constants.EMBED_GEOMETRY])
+
+    option_embed_animation = BoolProperty(
+        name='Embed animation', 
+        description='Embed animation data with the geometry data', 
+        default=constants.EXPORT_OPTIONS[constants.EMBED_ANIMATION])
+
+    option_copy_textures = BoolProperty(
+        name='Copy textures', 
+        description='Copy textures', 
+        default=constants.EXPORT_OPTIONS[constants.COPY_TEXTURES])
+
+    option_lights = BoolProperty(
+        name='Lights', 
+        description='Export default scene lights', 
+        default=False)
+
+    option_cameras = BoolProperty(
+        name='Cameras', 
+        description='Export default scene cameras', 
+        default=False)
+
+    option_animation_morph = BoolProperty(
+        name='Morph animation', 
+        description='Export animation (morphs)', 
+        default=constants.EXPORT_OPTIONS[constants.MORPH_TARGETS])
+
+    option_animation_skeletal = BoolProperty(
+        name='Skeletal animation', 
+        description='Export animation (skeletal)', 
+        default=constants.EXPORT_OPTIONS[constants.ANIMATION])
+
+    option_frame_step = IntProperty(
+        name='Frame step', 
+        description='Animation frame step', 
+        min=1, 
+        max=1000, 
+        soft_min=1, 
+        soft_max=1000, 
+        default=1)
+ 
+    option_compression = EnumProperty(
+        name='Compression', 
+        description = 'Compression options', 
+        items=compression_types(), 
+        default=constants.NONE)
+
+    def invoke(self, context, event):
+        restore_settings_export(self.properties)
+        return ExportHelper.invoke(self, context, event)
+
+    @classmethod
+    def poll(cls, context):
+        return context.active_object is not None
+
+    def execute(self, context):
+        if not self.properties.filepath:
+            raise Exception('filename not set')
+
+        settings = save_settings_export(self.properties)
+
+        filepath = self.filepath
+        if settings[constants.COMPRESSION] == constants.MSGPACK:
+            filepath = '%s%s' % (filepath[:-4], constants.PACK)
+
+        from io_three import exporter
+        if settings[constants.SCENE]:
+            exporter.export_scene(filepath, settings)
+        else:
+            exporter.export_geometry(filepath, settings)
+
+        return {'FINISHED'}
+
+    def draw(self, context):
+        layout = self.layout
+
+        ## Geometry {
+        row = layout.row()
+        row.label(text='Geometry:')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_vertices')
+        row.prop(self.properties, 'option_faces')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_normals')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_bones')
+        row.prop(self.properties, 'option_skinning')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_geometry_type')
+        ## }
+
+        layout.separator()
+
+        ## Materials {
+        row = layout.row()
+        row.label(text='Materials:')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_materials')
+        row.prop(self.properties, 'option_uv_coords')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_face_materials')
+        row.prop(self.properties, 'option_maps')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_colors')
+        row.prop(self.properties, 'option_mix_colors')
+        ## }
+    
+        layout.separator()
+
+        ## Settings {
+        row = layout.row()
+        row.label(text='Settings:')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_scale')
+        
+        row = layout.row()
+        row.prop(self.properties, 'option_round_off')
+        row.prop(self.properties, 'option_round_value')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_logging')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_compression')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_copy_textures')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_embed_animation')
+        ## }
+
+        layout.separator()
+
+        ## Scene {
+        row = layout.row()
+        row.label(text='Scene:')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_export_scene')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_embed_geometry')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_lights')
+        row.prop(self.properties, 'option_cameras')
+        ## }
+
+        layout.separator()
+
+        ## Animation {
+        row = layout.row()
+        row.label(text='Animation:')
+
+        row = layout.row()
+        row.prop(self.properties, 'option_animation_morph')
+        row = layout.row()
+        row.prop(self.properties, 'option_animation_skeletal')
+        row = layout.row()
+        row.prop(self.properties, 'option_frame_step')
+        ## }
+
+def menu_func_export(self, context):
+    default_path = bpy.data.filepath.replace('.blend', constants.EXTENSION)
+    text = 'Three (%s)' % constants.EXTENSION
+    operator = self.layout.operator(ExportThree.bl_idname, text=text)
+    operator.filepath = default_path
+
+
+def register():
+    bpy.utils.register_module(__name__)
+    bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+    bpy.utils.unregister_module(__name__)
+    bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+
+if __name__ == '__main__':
+    register() 

+ 311 - 0
utils/exporters/blender/addons/io_three/constants.py

@@ -0,0 +1,311 @@
+'''
+All constant data used in the package should be defined here.
+'''
+
+from collections import OrderedDict as BASE_DICT
+
+BLENDING_TYPES = type('Blending', (), {
+    'NONE': 'NoBlending',
+    'NORMAL': 'NormalBlending',
+    'ADDITIVE': 'AdditiveBlending',
+    'SUBTRACTIVE': 'SubtractiveBlending',
+    'MULTIPLY': 'MultiplyBlending',
+    'CUSTOM': 'CustomBlending'
+})
+
+NEAREST_FILTERS = type('NearestFilters', (), {
+    'NEAREST': 'NearestFilter',
+    'MIP_MAP_NEAREST': 'NearestMipMapNearestFilter',
+    'MIP_MAP_LINEAR': 'NearestMipMapLinearFilter'
+})
+
+LINEAR_FILTERS = type('LinearFilters', (), {
+    'LINEAR': 'LinearFilter',
+    'MIP_MAP_NEAREST': 'LinearMipMapNearestFilter',
+    'MIP_MAP_LINEAR': 'LinearMipMapLinearFilter'
+})
+
+MAPPING_TYPES = type('Mapping', (), {
+    'UV': 'UVMapping',
+    'CUBE_REFLECTION': 'CubeReflectionMapping',
+    'CUBE_REFRACTION': 'CubeRefractionMapping',
+    'SPHERICAL_REFLECTION': 'SphericalReflectionMapping',
+    'SPHERICAL_REFRACTION': 'SphericalRefractionMapping'
+})
+
+JSON = 'json'
+EXTENSION = '.%s' % JSON
+
+
+MATERIALS = 'materials'
+SCENE = 'scene'
+VERTICES = 'vertices'
+FACES = 'faces'
+NORMALS = 'normals'
+BONES = 'bones'
+UVS = 'uvs'
+COLORS = 'colors'
+MIX_COLORS = 'mixColors'
+SCALE = 'scale'
+COMPRESSION = 'compression'
+MAPS = 'maps'
+FRAME_STEP = 'frameStep'
+ANIMATION = 'animation'
+MORPH_TARGETS = 'morphTargets'
+SKIN_INDICES = 'skinIndices'
+SKIN_WEIGHTS = 'skinWeights'
+LOGGING = 'logging'
+CAMERAS = 'cameras'
+LIGHTS = 'lights'
+FACE_MATERIALS = 'faceMaterials'
+SKINNING = 'skinning'
+COPY_TEXTURES = 'copyTextures'
+ENABLE_PRECISION = 'enablePrecision'
+PRECISION = 'precision'
+DEFAULT_PRECISION = 6
+EMBED_GEOMETRY = 'embedGeometry'
+EMBED_ANIMATION = 'embedAnimation'
+
+GLOBAL = 'global'
+BUFFER_GEOMETRY = 'BufferGeometry'
+GEOMETRY = 'geometry'
+GEOMETRY_TYPE = 'geometryType'
+
+CRITICAL = 'critical'
+ERROR = 'error'
+WARNING = 'warning'
+INFO = 'info'
+DEBUG = 'debug'
+
+NONE = 'None'
+MSGPACK = 'msgpack'
+
+PACK = 'pack'
+
+EXPORT_OPTIONS = {
+    FACES: True,
+    VERTICES: True,
+    NORMALS: False,
+    UVS: False,
+    COLORS: False,
+    MATERIALS: False,
+    FACE_MATERIALS: False,
+    SCALE: 1,
+    FRAME_STEP: 1,
+    SCENE: True,
+    MIX_COLORS: False,
+    COMPRESSION: None,
+    MAPS: False,
+    ANIMATION: False,
+    BONES: False,
+    SKINNING: False,
+    MORPH_TARGETS: False,
+    CAMERAS: False,
+    LIGHTS: False,
+    COPY_TEXTURES: True,
+    LOGGING: DEBUG,
+    ENABLE_PRECISION: False,
+    PRECISION: DEFAULT_PRECISION,
+    EMBED_GEOMETRY: True,
+    EMBED_ANIMATION: True,
+    GEOMETRY_TYPE: GEOMETRY
+}
+
+
+FORMAT_VERSION = 4.3
+VERSION = 'version'
+THREE = 'io_three'
+GENERATOR = 'generator'
+SOURCE_FILE = 'sourceFile'
+VALID_DATA_TYPES = (str, int, float, bool, list, tuple, dict)
+
+JSON = 'json'
+GZIP = 'gzip'
+
+EXTENSIONS = {
+    JSON: '.json',
+    MSGPACK: '.pack',
+    GZIP: '.gz'
+}
+
+METADATA = 'metadata'
+GEOMETRIES = 'geometries'
+IMAGES = 'images'
+TEXTURE = 'texture'
+TEXTURES = 'textures'
+
+USER_DATA = 'userData'
+DATA = 'data'
+TYPE = 'type'
+
+MATERIAL = 'material'
+OBJECT = 'object'
+PERSPECTIVE_CAMERA = 'PerspectiveCamera'
+ORTHOGRAPHIC_CAMERA = 'OrthographicCamera'
+AMBIENT_LIGHT = 'AmbientLight'
+DIRECTIONAL_LIGHT = 'DirectionalLight'
+AREA_LIGHT = 'AreaLight'
+POINT_LIGHT = 'PointLight'
+SPOT_LIGHT = 'SpotLight'
+HEMISPHERE_LIGHT = 'HemisphereLight'
+MESH = 'Mesh'
+SPRITE = 'Sprite'
+
+DEFAULT_METADATA = {
+    VERSION: FORMAT_VERSION,
+    TYPE: OBJECT.title(),
+    GENERATOR: THREE
+}
+
+UUID = 'uuid'
+
+MATRIX = 'matrix'
+POSITION = 'position'
+QUATERNION = 'quaternion'
+ROTATION ='rotation'
+SCALE = 'scale'
+
+UV = 'uv'
+ATTRIBUTES = 'attributes'
+NORMAL = 'normal'
+ITEM_SIZE = 'itemSize'
+ARRAY = 'array'
+
+FLOAT_32 = 'Float32Array'
+
+VISIBLE = 'visible'
+CAST_SHADOW = 'castShadow'
+RECEIVE_SHADOW = 'receiveShadow'
+QUAD = 'quad'
+
+USER_DATA = 'userData'
+
+MASK = {
+    QUAD: 0,
+    MATERIALS: 1,
+    UVS: 3,
+    NORMALS: 5,
+    COLORS: 7
+}
+
+
+CHILDREN = 'children'
+
+URL = 'url'
+WRAP = 'wrap'
+REPEAT = 'repeat'
+WRAPPING = type('Wrapping', (), {
+    'REPEAT': 'RepeatWrapping',
+    'CLAMP': 'ClampToEdgeWrapping',
+    'MIRROR': 'MirroredRepeatWrapping'
+})
+ANISOTROPY = 'anisotropy'
+MAG_FILTER = 'magFilter'
+MIN_FILTER = 'minFilter'
+MAPPING = 'mapping'
+
+IMAGE = 'image'
+
+NAME = 'name'
+PARENT = 'parent'
+
+#@TODO move to api.constants?
+POS = 'pos'
+ROTQ = 'rotq'
+
+AMBIENT = 'ambient'
+COLOR = 'color'
+EMISSIVE = 'emissive'
+SPECULAR = 'specular'
+SPECULAR_COEF = 'specularCoef'
+SHININESS = 'shininess'
+SIDE = 'side'
+OPACITY = 'opacity'
+TRANSPARENT = 'transparent'
+WIREFRAME = 'wireframe'
+BLENDING = 'blending'
+VERTEX_COLORS = 'vertexColors'
+DEPTH_WRITE = 'depthWrite'
+DEPTH_TEST = 'depthTest'
+
+MAP = 'map'
+SPECULAR_MAP = 'specularMap'
+LIGHT_MAP = 'lightMap'
+BUMP_MAP = 'bumpMap'
+BUMP_SCALE = 'bumpScale'
+NORMAL_MAP = 'normalMap'
+NORMAL_SCALE = 'normalScale'
+
+#@TODO ENV_MAP, REFLECTIVITY, REFRACTION_RATIO, COMBINE
+
+MAP_DIFFUSE = 'mapDiffuse'
+MAP_DIFFUSE_REPEAT = 'mapDiffuseRepeat'
+MAP_DIFFUSE_WRAP = 'mapDiffuseWrap'
+MAP_DIFFUSE_ANISOTROPY = 'mapDiffuseAnisotropy'
+
+MAP_SPECULAR = 'mapSpecular'
+MAP_SPECULAR_REPEAT = 'mapSpecularRepeat'
+MAP_SPECULAR_WRAP = 'mapSpecularWrap'
+MAP_SPECULAR_ANISOTROPY = 'mapSpecularAnisotropy'
+
+MAP_LIGHT = 'mapLight'
+MAP_LIGHT_REPEAT = 'mapLightRepeat'
+MAP_LIGHT_WRAP = 'mapLightWrap'
+MAP_LIGHT_ANISOTROPY = 'mapLightAnisotropy'
+
+MAP_NORMAL = 'mapNormal'
+MAP_NORMAL_FACTOR = 'mapNormalFactor'
+MAP_NORMAL_REPEAT = 'mapNormalRepeat'
+MAP_NORMAL_WRAP = 'mapNormalWrap'
+MAP_NORMAL_ANISOTROPY = 'mapNormalAnisotropy'
+
+MAP_BUMP = 'mapBump'
+MAP_BUMP_REPEAT = 'mapBumpRepeat'
+MAP_BUMP_WRAP = 'mapBumpWrap'
+MAP_BUMP_ANISOTROPY = 'mapBumpAnisotropy'
+MAP_BUMP_SCALE = 'mapBumpScale'
+
+NORMAL_BLENDING = 0
+
+VERTEX_COLORS_ON = 2
+VERTEX_COLORS_OFF = 0
+
+THREE_BASIC = 'MeshBasicMaterial'
+THREE_LAMBERT = 'MeshLambertMaterial'
+THREE_PHONG = 'MeshPhongMaterial'
+
+INTENSITY = 'intensity'
+DISTANCE = 'distance'
+ASPECT = 'aspect'
+ANGLE = 'angle'
+
+FOV = 'fov'
+ASPECT = 'aspect'
+NEAR = 'near'
+FAR = 'far'
+
+LEFT = 'left'
+RIGHT = 'right'
+TOP = 'top'
+BOTTOM = 'bottom'
+
+SHADING = 'shading'
+COLOR_DIFFUSE = 'colorDiffuse'
+COLOR_AMBIENT = 'colorAmbient'
+COLOR_EMISSIVE = 'colorEmissive'
+COLOR_SPECULAR = 'colorSpecular'
+DBG_NAME = 'DbgName'
+DBG_COLOR = 'DbgColor'
+DBG_INDEX = 'DbgIndex'
+EMIT = 'emit'
+
+PHONG = 'phong'
+LAMBERT = 'lambert'
+BASIC = 'basic'
+
+NORMAL_BLENDING = 'NormalBlending'
+
+DBG_COLORS = (0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee)
+
+DOUBLE_SIDED = 'doubleSided'
+

+ 86 - 0
utils/exporters/blender/addons/io_three/exporter/__init__.py

@@ -0,0 +1,86 @@
+import os
+import sys
+import traceback
+from .. import constants
+from . import (
+    scene, 
+    geometry, 
+    api, 
+    exceptions, 
+    logger, 
+    base_classes
+)
+
+
+def _error_handler(func):
+    
+    def inner(filepath, options, *args, **kwargs):
+        level = options.get(constants.LOGGING, constants.DEBUG)
+        logger.init(level=level)
+        api.init()
+        
+        try:
+            func(filepath, options, *args, **kwargs)
+        except:
+            info = sys.exc_info()
+            trace = traceback.format_exception(
+                info[0], info[1], info[2].tb_next)
+            trace = ''.join(trace)
+            logger.error(trace)
+            
+            print('Error recorded to %s' % logger.LOG_FILE)
+
+            raise
+        else:
+            print('Log: %s' % logger.LOG_FILE)
+
+    return inner
+
+
+@_error_handler
+def export_scene(filepath, options):
+    selected = []
+
+    # during scene exports unselect everything. this is needed for
+    # applying modifiers, if it is necessary
+    # record the selected nodes so that selection is restored later
+    for obj in api.selected_objects():
+        api.object.unselect(obj)
+        selected.append(obj)
+    active = api.active_object()
+
+    try:
+        scene_ = scene.Scene(filepath, options=options)
+        scene_.parse()
+        scene_.write()
+    except:
+        _restore_selection(selected, active)
+        raise
+        
+    _restore_selection(selected, active)
+
+
+@_error_handler
+def export_geometry(filepath, options, node=None):
+    if node is None:
+        node = api.active_object()
+        if node is None:
+            raise exceptions.SelectionError('Nothing selected')
+        if node.type != 'MESH':
+            raise exceptions.GeometryError('Not a valid mesh object')
+    
+    mesh = api.object.mesh(node, options)
+    parent = base_classes.BaseScene(filepath, options)
+    geo = geometry.Geometry(mesh, parent)
+    geo.parse()
+    geo.write()
+    
+    if not options.get(constants.EMBED_ANIMATION, True):
+        geo.write_animation(os.path.dirname(filepath))
+
+
+def _restore_selection(objects, active):
+    for obj in objects:
+        api.object.select(obj)
+
+    api.set_active_object(active)

+ 216 - 0
utils/exporters/blender/addons/io_three/exporter/_json.py

@@ -0,0 +1,216 @@
+import json
+from .. import constants
+
+ROUND = constants.DEFAULT_PRECISION
+
+## THREE override function
+def _json_floatstr(o):
+    s = str(o)
+
+    if ROUND is None:
+        return s
+
+    if '.' in s and len(s[s.index('.'):]) > ROUND - 1:
+        s = '%.{0}f'.format(ROUND) % o
+        while '.' in s and s[-1] == '0':
+            s = s[:-1] # this actually removes the last '0' from the string
+        if s[-1] == '.': # added this test to avoid leaving '0.' instead of '0.0',
+            s += '0'    # which would throw an error while loading the file
+    return s
+
+
+def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
+        _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
+        ## HACK: hand-optimized bytecode; turn globals into locals
+        ValueError=ValueError,
+        dict=dict,
+        float=float,
+        id=id,
+        int=int,
+        isinstance=isinstance,
+        list=list,
+        str=str,
+        tuple=tuple,
+    ):
+    '''
+    Overwrite json.encoder for Python 2.7 and above to not
+    assign each index of a list or tuple to its own row as
+    this is completely asinine behaviour 
+    '''
+
+    ## @THREE
+    # Override the function
+    _floatstr = _json_floatstr
+
+    if _indent is not None and not isinstance(_indent, str):
+        _indent = ' ' * _indent
+
+    def _iterencode_list(lst, _current_indent_level):
+        if not lst:
+            yield '[]'
+            return
+        if markers is not None:
+            markerid = id(lst)
+            if markerid in markers:
+                raise ValueError("Circular reference detected")
+            markers[markerid] = lst
+        buf = '['
+        ## @THREEJS
+        # -  block the moronic functionality that puts each
+        #    index on its own line causing insane row counts
+        #if _indent is not None:
+        #    _current_indent_level += 1
+        #    newline_indent = '\n' + _indent * _current_indent_level
+        #    separator = _item_separator + newline_indent
+        #    buf += newline_indent
+        #else:
+        newline_indent = None
+        separator = _item_separator
+        first = True
+        for value in lst:
+            if first:
+                first = False
+            else:
+                buf = separator
+            if isinstance(value, str):
+                yield buf + _encoder(value)
+            elif value is None:
+                yield buf + 'null'
+            elif value is True:
+                yield buf + 'true'
+            elif value is False:
+                yield buf + 'false'
+            elif isinstance(value, int):
+                yield buf + str(value)
+            elif isinstance(value, float):
+                yield buf + _floatstr(value)
+            else:
+                yield buf
+                if isinstance(value, (list, tuple)):
+                    chunks = _iterencode_list(value, _current_indent_level)
+                elif isinstance(value, dict):
+                    chunks = _iterencode_dict(value, _current_indent_level)
+                else:
+                    chunks = _iterencode(value, _current_indent_level)
+                for chunk in chunks:
+                    yield chunk
+        if newline_indent is not None:
+            _current_indent_level -= 1
+            yield '\n' + _indent * _current_indent_level
+        yield ']'
+        if markers is not None:
+            del markers[markerid]
+
+    def _iterencode_dict(dct, _current_indent_level):
+        if not dct:
+            yield '{}'
+            return
+        if markers is not None:
+            markerid = id(dct)
+            if markerid in markers:
+                raise ValueError("Circular reference detected")
+            markers[markerid] = dct
+        yield '{'
+        if _indent is not None:
+            _current_indent_level += 1
+            newline_indent = '\n' + _indent * _current_indent_level
+            item_separator = _item_separator + newline_indent
+            yield newline_indent
+        else:
+            newline_indent = None
+            item_separator = _item_separator
+        first = True
+        if _sort_keys:
+            items = sorted(dct.items(), key=lambda kv: kv[0])
+        else:
+            items = dct.items()
+        for key, value in items:
+            if isinstance(key, str):
+                pass
+            # JavaScript is weakly typed for these, so it makes sense to
+            # also allow them.  Many encoders seem to do something like this.
+            elif isinstance(key, float):
+                key = _floatstr(key)
+            elif key is True:
+                key = 'true'
+            elif key is False:
+                key = 'false'
+            elif key is None:
+                key = 'null'
+            elif isinstance(key, int):
+                key = str(key)
+            elif _skipkeys:
+                continue
+            else:
+                raise TypeError("key " + repr(key) + " is not a string")
+            if first:
+                first = False
+            else:
+                yield item_separator
+            yield _encoder(key)
+            yield _key_separator
+            if isinstance(value, str):
+                yield _encoder(value)
+            elif value is None:
+                yield 'null'
+            elif value is True:
+                yield 'true'
+            elif value is False:
+                yield 'false'
+            elif isinstance(value, int):
+                yield str(value)
+            elif isinstance(value, float):
+                yield _floatstr(value)
+            else:
+                if isinstance(value, (list, tuple)):
+                    chunks = _iterencode_list(value, _current_indent_level)
+                elif isinstance(value, dict):
+                    chunks = _iterencode_dict(value, _current_indent_level)
+                else:
+                    chunks = _iterencode(value, _current_indent_level)
+                for chunk in chunks:
+                    yield chunk
+        if newline_indent is not None:
+            _current_indent_level -= 1
+            yield '\n' + _indent * _current_indent_level
+        yield '}'
+        if markers is not None:
+            del markers[markerid]
+
+    def _iterencode(o, _current_indent_level):
+        if isinstance(o, str):
+            yield _encoder(o)
+        elif o is None:
+            yield 'null'
+        elif o is True:
+            yield 'true'
+        elif o is False:
+            yield 'false'
+        elif isinstance(o, int):
+            yield str(o)
+        elif isinstance(o, float):
+            yield _floatstr(o)
+        elif isinstance(o, (list, tuple)):
+            for chunk in _iterencode_list(o, _current_indent_level):
+                yield chunk
+        elif isinstance(o, dict):
+            for chunk in _iterencode_dict(o, _current_indent_level):
+                yield chunk
+        else:
+            if markers is not None:
+                markerid = id(o)
+                if markerid in markers:
+                    raise ValueError("Circular reference detected")
+                markers[markerid] = o
+            o = _default(o)
+            for chunk in _iterencode(o, _current_indent_level):
+                yield chunk
+            if markers is not None:
+                del markers[markerid]
+    return _iterencode
+
+
+# override the encoder
+json.encoder._make_iterencode = _make_iterencode 
+
+

+ 31 - 0
utils/exporters/blender/addons/io_three/exporter/api/__init__.py

@@ -0,0 +1,31 @@
+import os
+import bpy
+from . import object, mesh, material, camera, light
+from .. import logger
+
+
+def active_object():
+    return bpy.context.scene.objects.active
+
+
+def init():
+    logger.debug('Initializing API')
+    object._MESH_MAP.clear()
+
+
+def selected_objects(valid_types=None):
+    logger.debug('api.selected_objects(%s)', valid_types)
+    for node in bpy.context.selected_objects:
+        if valid_types is None:
+            yield node.name
+        elif valid_types is not None and node.type in valid_types:
+            yield node.name
+
+
+def set_active_object(obj):
+    bpy.context.scene.objects.active = obj
+
+
+def scene_name():
+    return os.path.basename(bpy.data.filepath)
+

+ 66 - 0
utils/exporters/blender/addons/io_three/exporter/api/camera.py

@@ -0,0 +1,66 @@
+import math
+from bpy import data, types, context
+from .. import constants, logger
+
+
+def _camera(func):
+
+    def inner(name, *args, **kwargs):
+
+        if isinstance(name, types.Camera):
+            camera = name
+        else:
+            camera = data.cameras[name] 
+
+        return func(camera, *args, **kwargs)
+
+    return inner
+
+
+@_camera
+def aspect(camera):
+    logger.debug('camera.aspect(%s)', camera)
+    render = context.scene.render
+    return render.resolution_x/render.resolution_y
+
+
+@_camera
+def bottom(camera):
+    logger.debug('camera.bottom(%s)', camera)
+    return  -(camera.angle_y * camera.ortho_scale)
+
+
+@_camera
+def far(camera):
+    logger.debug('camera.far(%s)', camera)
+    return camera.clip_end
+
+
+@_camera
+def fov(camera):
+    logger.debug('camera.fov(%s)', camera)
+    return camera.lens
+
+
+@_camera
+def left(camera):
+    logger.debug('camera.left(%s)', camera)
+    return -(camera.angle_x * camera.ortho_scale)
+
+
+@_camera
+def near(camera):
+    logger.debug('camera.near(%s)', camera)
+    return camera.clip_start
+
+
+@_camera
+def right(camera):
+    logger.debug('camera.right(%s)', camera)
+    return camera.angle_x * camera.ortho_scale
+
+
+@_camera
+def top(camera):
+    logger.debug('camera.top(%s)', camera)
+    return camera.angle_y * camera.ortho_scale

+ 29 - 0
utils/exporters/blender/addons/io_three/exporter/api/constants.py

@@ -0,0 +1,29 @@
+MESH = 'MESH'
+LAMP = 'LAMP'
+EMPTY = 'EMPTY'
+ARMATURE = 'ARMATURE'
+
+SPOT = 'SPOT'
+POINT = 'POINT'
+SUN = 'SUN'
+HEMI = 'HEMI'
+AREA = 'AREA'
+
+NO_SHADOW = 'NOSHADOW'
+
+CAMERA = 'CAMERA'
+PERSP = 'PERSP'
+ORTHO = 'ORTHO'
+
+RENDER = 'RENDER'
+
+ZYX = 'ZYX'
+
+MULTIPLY = 'MULTIPLY'
+
+WIRE = 'WIRE'
+IMAGE = 'IMAGE'
+
+MAG_FILTER = 'LinearFilter'
+MIN_FILTER = 'LinearMipMapLinearFilter'
+MAPPING = 'UVMapping'

+ 28 - 0
utils/exporters/blender/addons/io_three/exporter/api/image.py

@@ -0,0 +1,28 @@
+import os
+from bpy import data, types
+from .. import logger
+
+
+def _image(func):
+
+    def inner(name, *args, **kwargs):
+
+        if isinstance(name, types.Image):
+            mesh = name
+        else:
+            mesh = data.images[name] 
+
+        return func(mesh, *args, **kwargs)
+
+    return inner
+
+
+def file_name(image):
+    logger.debug('image.file_name(%s)', image)
+    return os.path.basename(file_path(image)) 
+
+
+@_image
+def file_path(image):
+    logger.debug('image.file_path(%s)', image)
+    return os.path.normpath(image.filepath_from_user())

+ 41 - 0
utils/exporters/blender/addons/io_three/exporter/api/light.py

@@ -0,0 +1,41 @@
+from bpy import data, types, context
+from .. import constants, utilities, logger
+
+
+def _lamp(func):
+
+    def inner(name, *args, **kwargs):
+
+        if isinstance(name, types.Lamp):
+            lamp = name
+        else:
+            lamp = data.lamps[name] 
+
+        return func(lamp, *args, **kwargs)
+
+    return inner
+
+
+@_lamp
+def angle(lamp):
+    logger.debug('light.angle(%s)', lamp)
+    return lamp.spot_size
+
+
+@_lamp
+def color(lamp):
+    logger.debug('light.color(%s)', lamp)
+    colour = (lamp.color.r, lamp.color.g, lamp.color.b)
+    return utilities.rgb2int(colour)
+
+
+@_lamp
+def distance(lamp):
+    logger.debug('light.distance(%s)', lamp)
+    return lamp.distance
+
+
+@_lamp
+def intensity(lamp):
+    logger.debug('light.intensity(%s)', lamp)
+    return round(lamp.energy, 2)

+ 221 - 0
utils/exporters/blender/addons/io_three/exporter/api/material.py

@@ -0,0 +1,221 @@
+from bpy import data, types
+from .. import constants, logger
+from .constants import MULTIPLY, WIRE, IMAGE
+
+
+def _material(func):
+
+    def inner(name, *args, **kwargs):
+
+        if isinstance(name, types.Material):
+            material = name
+        else:
+            material = data.materials[name] 
+
+        return func(material, *args, **kwargs)
+
+    return inner
+
+
+@_material
+def ambient_color(material):
+    logger.debug('material.ambient_color(%s)', material)
+    diffuse = diffuse_color(material) 
+    return (material.ambient * diffuse[0],
+            material.ambient * diffuse[1],
+            material.ambient * diffuse[2])
+
+
+@_material
+def blending(material):
+    logger.debug('material.blending(%s)', material)
+    try:
+        blend = material.THREE_blending_type
+    except AttributeError:
+        logger.debug('No THREE_blending_type attribute found')
+        blend = constants.NORMAL_BLENDING 
+    return blend
+
+
+@_material
+def bump_map(material):
+    logger.debug('material.bump_map(%s)', material)
+    for texture in _valid_textures(material):
+        if texture.use_map_normal and not \
+        texture.texture.use_normal_map:
+            return texture.texture
+
+
+@_material
+def bump_scale(material):
+    return normal_scale(material)
+
+
+@_material
+def depth_test(material):
+    logger.debug('material.depth_test(%s)', material)
+    try:
+        test = material.THREE_depth_test
+    except AttributeError:
+        logger.debug('No THREE_depth_test attribute found')
+        test = True
+    return test
+
+
+@_material
+def depth_write(material):
+    logger.debug('material.depth_write(%s)', material)
+    try:
+        write = material.THREE_depth_write
+    except AttributeError:
+        logger.debug('No THREE_depth_write attribute found')
+        write = True
+    return write
+
+
+@_material
+def diffuse_color(material):
+    logger.debug('material.diffuse_color(%s)', material)
+    return (material.diffuse_intensity * material.diffuse_color[0],
+            material.diffuse_intensity * material.diffuse_color[1],
+            material.diffuse_intensity * material.diffuse_color[2])
+
+
+@_material
+def diffuse_map(material):
+    logger.debug('material.diffuse_map(%s)', material)
+    for texture in _valid_textures(material):
+        if texture.use_map_color_diffuse and not \
+        texture.blend_type == MULTIPLY:
+            return texture.texture
+
+
+@_material
+def emissive_color(material):
+    logger.debug('material.emissive_color(%s)', material)
+    diffuse = diffuse_color(material) 
+    return (material.emit * diffuse[0],
+            material.emit * diffuse[1],
+            material.emit * diffuse[2])
+
+
+@_material
+def light_map(material):
+    logger.debug('material.light_map(%s)', material)
+    for texture in _valid_textures(material):
+        if texture.use_map_color_diffuse and \
+        texture.blend_type == MULTIPLY:
+            return texture.texture
+
+
+@_material
+def normal_scale(material):
+    logger.debug('material.normal_scale(%s)', material)
+    for texture in _valid_textures(material):
+        if texture.use_map_normal:
+            return texture.normal_factor
+
+
+@_material
+def normal_map(material):
+    logger.debug('material.normal_map(%s)', material)
+    for texture in _valid_textures(material):
+        if texture.use_map_normal and \
+        texture.texture.use_normal_map:
+            return texture.texture
+ 
+
+@_material
+def opacity(material):
+    logger.debug('material.opacity(%s)', material)
+    return round(material.alpha - 1.0, 2);
+
+
+@_material
+def shading(material):
+    logger.debug('material.shading(%s)', material)
+    dispatch = {
+        True: constants.PHONG,
+        False: constants.LAMBERT
+    }
+
+    return dispatch[material.specular_intensity > 0.0]
+
+
+@_material
+def specular_coef(material):
+    logger.debug('material.specular_coef(%s)', material)
+    return material.specular_hardness
+ 
+
+@_material
+def specular_color(material):
+    logger.debug('material.specular_color(%s)', material)
+    return (material.specular_intensity * material.specular_color[0],
+            material.specular_intensity * material.specular_color[1],
+            material.specular_intensity * material.specular_color[2])
+  
+
+@_material
+def specular_map(material):
+    logger.debug('material.specular_map(%s)', material)
+    for texture in _valid_textures(material):
+        if texture.use_map_specular:
+            return texture.texture
+
+
+@_material
+def transparent(material):
+    logger.debug('material.transparent(%s)', material)
+    return material.use_transparency
+
+
+@_material
+def type(material):
+    logger.debug('material.type(%s)', material)
+    if material.diffuse_shader != 'LAMBERT':
+        material_type = constants.BASIC
+    elif material.specular_intensity > 0:
+        material_type = constants.PHONG
+    else:
+        material_type = constants.LAMBERT
+
+    return material_type
+
+
+@_material
+def use_vertex_colors(material):
+    logger.debug('material.use_vertex_colors(%s)', material)
+    return material.use_vertex_color_paint
+
+
+def used_materials():
+    logger.debug('material.used_materials()')
+    for material in data.materials:
+        if material.users > 0:
+            yield material.name
+
+@_material
+def visible(material):
+    logger.debug('material.visible(%s)', material)
+    try:
+        vis = material.THREE_visible
+    except AttributeError:
+        logger.debug('No THREE_visible attribute found')
+        vis = True
+
+    return vis
+
+
+@_material
+def wireframe(material):
+    logger.debug('material.wireframe(%s)', material)
+    return material.type == WIRE
+
+ 
+def _valid_textures(material):
+    for texture in material.texture_slots:
+        if not texture: continue
+        if texture.texture.type != IMAGE: continue
+        logger.debug('Valid texture found %s', texture)
+        yield texture

+ 900 - 0
utils/exporters/blender/addons/io_three/exporter/api/mesh.py

@@ -0,0 +1,900 @@
+import operator
+import mathutils
+from bpy import data, types, context
+from . import material, texture
+from . import object as object_
+from .. import constants, utilities, logger
+
+
+def _mesh(func):
+
+    def inner(name, *args, **kwargs):
+
+        if isinstance(name, types.Mesh):
+            mesh = name
+        else:
+            mesh = data.meshes[name] 
+
+        return func(mesh, *args, **kwargs)
+
+    return inner
+
+
+@_mesh
+def animation(mesh, options):
+    logger.debug('mesh.animation(%s, %s)', mesh, options)
+    armature = _armature(mesh)
+    if armature and armature.animation_data:
+        return _skeletal_animations(armature, options)
+
+
+@_mesh
+def bones(mesh):
+    logger.debug('mesh.bones(%s)', mesh)
+    armature = _armature(mesh)
+    if not armature: return
+
+    bones = []
+    for bone in armature.data.bones:
+        logger.info('Parsing bone %s', bone.name)
+
+        if bone.parent is None:
+            bone_pos = bone.head_local
+            bone_index = -1
+        else:
+            bone_pos = bone.head_local - bone.parent.head_local
+            bone_index = 0
+            index = 0
+            for parent in armature.data.bones:
+                if parent.name == bone.parent.name:
+                    bone_index = index
+                index += 1
+
+        bone_world_pos = armature.matrix_world * bone_pos
+        x_axis = bone_world_pos.x
+        y_axis = bone_world_pos.z
+        z_axis = -bone_world_pos.y
+
+        bones.append({
+            constants.PARENT: bone_index,
+            constants.NAME: bone.name,
+            constants.POS: (x_axis, y_axis, z_axis),
+            constants.ROTQ: (0,0,0,1)
+        })
+
+    return bones
+
+
+@_mesh
+def buffer_normal(mesh, options):
+    normals_ = []
+    round_off, round_val = utilities.rounding(options)
+
+    for face in mesh.tessfaces:
+        vert_count = len(face.vertices)
+        if vert_count is not 3:
+            msg = 'Non-triangulated face detected'
+            raise exceptions.BufferGeometryError(msg)
+
+        for vertex_index in face.vertices:
+            normal = mesh.vertices[vertex_index].normal
+            vector = (normal.x, normal.y, normal.z)
+            if round_off:
+                vector = utilities.round_off(vector, round_val)
+
+            normals_.extend(vector)
+
+    return normals_
+
+
+
+
+@_mesh
+def buffer_position(mesh, options):
+    position = []
+    round_off, round_val = utilities.rounding(options)
+
+    for face in mesh.tessfaces:
+        vert_count = len(face.vertices)
+        if vert_count is not 3:
+            msg = 'Non-triangulated face detected'
+            raise exceptions.BufferGeometryError(msg)
+
+        for vertex_index in face.vertices:
+            vertex = mesh.vertices[vertex_index]
+            vector = (vertex.co.x, vertex.co.y, vertex.co.z)
+            if round_off:
+                vector = utilities.round_off(vector, round_val)
+
+            position.extend(vector)
+
+    return position
+
+
+@_mesh
+def buffer_uv(mesh, options):
+    if len(mesh.uv_layers) is 0:
+        return
+    elif len(mesh.uv_layers) > 1:
+        # if memory serves me correctly buffer geometry
+        # only uses one UV layer
+        logger.warning('%s has more than 1 UV layer', mesh.name )
+
+    round_off, round_val = utilities.rounding(options)
+    uvs_ = []
+    for uv in mesh.uv_layers[0].data:
+        uv = (uv.uv[0], uv.uv[1])
+        if round_off:
+            uv = utilities.round_off(uv, round_val)
+    
+    return uvs_
+
+@_mesh
+def faces(mesh, options):
+    logger.debug('mesh.faces(%s, %s)', mesh, options)
+    vertex_uv = len(mesh.uv_textures) > 0
+    has_colors = len(mesh.vertex_colors) > 0
+    logger.info('Has UVs = %s', vertex_uv)
+    logger.info('Has vertex colours = %s', has_colors)
+
+
+    round_off, round_val = utilities.rounding(options)
+    if round_off:
+        logger.debug('Rounding off of vectors set to %s', round_off)
+
+    opt_colours = options[constants.COLORS] and has_colors
+    opt_uvs = options[constants.UVS] and vertex_uv
+    opt_materials = options.get(constants.FACE_MATERIALS)
+    opt_normals = options[constants.NORMALS]
+    logger.debug('Vertex colours enabled = %s', opt_colours)
+    logger.debug('UVS enabled = %s', opt_uvs)
+    logger.debug('Materials enabled = %s', opt_materials)
+    logger.debug('Normals enabled = %s', opt_normals)
+
+    uv_layers = _uvs(mesh, options) if opt_uvs else None
+    vertex_normals = _normals(mesh, options) if opt_normals else None
+    vertex_colours = vertex_colors(mesh) if opt_colours else None
+
+    MASK = constants.MASK
+    face_data = []
+
+    logger.info('Parsing %d faces', len(mesh.tessfaces))
+    for face in mesh.tessfaces:
+        vert_count = len(face.vertices)
+
+        if vert_count not in (3, 4):
+            logger.error('%d vertices for face %d detected', 
+                vert_count, face.index)
+            raise exceptions.NGonError('ngons are not supported')
+
+        materials = face.material_index is not None and opt_materials
+        mask = {
+            constants.QUAD: vert_count is 4,
+            constants.MATERIALS: materials,
+            constants.UVS: opt_uvs,
+            constants.NORMALS: opt_normals,
+            constants.COLORS: opt_colours
+        }
+
+        face_data.append(utilities.bit_mask(mask))
+        
+        face_data.extend([v for v in face.vertices])
+        
+        if mask[constants.MATERIALS]:
+            face_data.append(face.material_index)
+        
+        if mask[constants.UVS] and uv_layers:
+
+            for index, uv_layer in enumerate(uv_layers):
+                layer = mesh.tessface_uv_textures[index]
+
+                for uv_data in layer.data[face.index].uv:
+                    uv = (uv_data[0], uv_data[1])
+                    if round_off:
+                        uv = utilities.round_off(uv, round_val)
+                    face_data.append(uv_layer.index(uv))
+
+        if mask[constants.NORMALS] and vertex_normals:
+            for vertex in face.vertices:
+                normal = mesh.vertices[vertex].normal
+                normal = (normal.x, normal.y, normal.z)
+                if round_off:
+                    normal = utilities.round_off(normal, round_val)
+                face_data.append(vertex_normals.index(normal))
+        
+        if mask[constants.COLORS]:
+            colours = mesh.tessface_vertex_colors.active.data[face.index]
+
+            for each in (colours.color1, colours.color2, colours.color3):
+                each = utilities.rgb2int(each)
+                face_data.append(vertex_colours.index(each))
+
+            if mask[constants.QUAD]:
+                colour = utilities.rgb2int(colours.color4)
+                face_data.append(vertex_colours.index(colour))
+
+    return face_data
+ 
+
+@_mesh
+def morph_targets(mesh, options):
+    logger.debug('mesh.morph_targets(%s, %s)', mesh, options)
+    #@TODO: consider an attribute for the meshes for determining
+    #       morphs, which would save on so much overhead
+    obj = object_.objects_using_mesh(mesh)[0]
+    original_frame = context.scene.frame_current
+    frame_step = options.get(constants.FRAME_STEP, 1)
+    scene_frames = range(context.scene.frame_start,
+        context.scene.frame_end+1, frame_step)
+
+    morphs = []
+    for frame in scene_frames:
+        logger.info('Processing data at frame %d', frame)
+        context.scene.frame_set(frame, 0.0)
+        morphs.append([])
+        vertices = object_.extract_mesh(obj, options).vertices[:]
+
+        for vertex in vertices:
+            vectors = [round(vertex.co.x, 6), round(vertex.co.y, 6), 
+                round(vertex.co.z, 6)]
+            morphs[-1].extend(vectors)
+    
+    context.scene.frame_set(original_frame, 0.0)
+    morphs_detected = False
+    for index, each in enumerate(morphs):
+        if index is 0: continue
+        morphs_detected = morphs[index-1] != each
+        if morphs_detected: 
+            logger.info('Valid morph target data detected')
+            break
+    else: 
+        logger.info('No valid morph data detected')
+        return
+
+    manifest = []
+    for index,morph in enumerate(morphs):
+        manifest.append({
+            constants.NAME: 'animation_%06d' % index,
+            constants.VERTICES: morph
+        })
+
+    return manifest
+
+
+@_mesh
+def materials(mesh, options):
+    logger.debug('mesh.materials(%s, %s)', mesh, options)
+    indices = set([face.material_index for face in mesh.tessfaces])
+    material_sets = [(mesh.materials[index], index) for index in indices]
+    materials = []
+
+    maps = options.get(constants.MAPS)
+
+    mix = options.get(constants.MIX_COLORS)
+    use_colors = options.get(constants.COLORS)
+    logger.info('Colour mix is set to %s', mix)
+    logger.info('Vertex colours set to %s', use_colors)
+
+    for mat, index in material_sets:
+        try:
+            dbg_color = constants.DBG_COLORS[index]
+        except IndexError:
+            dbg_color = constants.DBG_COLORS[0]
+        
+        logger.info('Compiling attributes for %s', mat.name)
+        attributes = {
+            constants.COLOR_AMBIENT: material.ambient_color(mat),
+            constants.COLOR_EMISSIVE: material.emissive_color(mat),
+            constants.SHADING: material.shading(mat),
+            constants.OPACITY: material.opacity(mat),
+            constants.TRANSPARENT: material.transparent(mat),
+            constants.VISIBLE: material.visible(mat),
+            constants.WIREFRAME: material.wireframe(mat),
+            constants.BLENDING: material.blending(mat),
+            constants.DEPTH_TEST: material.depth_test(mat),
+            constants.DEPTH_WRITE: material.depth_write(mat),
+            constants.DBG_NAME: mat.name,
+            constants.DBG_COLOR: dbg_color,
+            constants.DBG_INDEX: index
+        }
+
+        if use_colors:
+            colors = material.use_vertex_colors(mat)
+            attributes[constants.VERTEX_COLORS] = colors
+                
+        if (use_colors and mix) or (not use_colors):
+            colors = material.diffuse_color(mat)
+            attributes[constants.COLOR_DIFFUSE] = colors
+
+        if attributes[constants.SHADING] == constants.PHONG:
+            logger.info('Adding specular attributes')
+            attributes.update({
+                constants.SPECULAR_COEF: material.specular_coef(mat),
+                constants.COLOR_SPECULAR: material.specular_color(mat)
+            })
+
+        if mesh.show_double_sided:
+            logger.info('Double sided is on')
+            attributes[constants.DOUBLE_SIDED] = True
+
+        materials.append(attributes)
+
+        if not maps: continue
+
+        diffuse = _diffuse_map(mat)
+        if diffuse:
+            logger.info('Diffuse map found')
+            attributes.update(diffuse)
+        
+        light = _light_map(mat)
+        if light:
+            logger.info('Light map found')
+            attributes.update(light)
+
+        specular = _specular_map(mat)
+        if specular:
+            logger.info('Specular map found')
+            attributes.update(specular)
+
+        if attributes[constants.SHADING] == constants.PHONG:
+            normal = _normal_map(mat)
+            if normal:
+                logger.info('Normal map found')
+                attributes.update(normal)
+
+            bump = _bump_map(mat)
+            if bump:
+                logger.info('Bump map found')
+                attributes.update(bump)
+
+    return materials
+
+
+@_mesh
+def normals(mesh, options):
+    logger.debug('mesh.normals(%s, %s)', mesh, options)
+    flattened = []
+
+    for vector in _normals(mesh, options):
+        flattened.extend(vector)
+
+    return flattened
+
+
+@_mesh
+def skin_weights(mesh):
+    logger.debug('mesh.skin_weights(%s)', mesh)
+    return _skinning_data(mesh, 1)
+
+
+@_mesh
+def skin_indices(mesh):
+    logger.debug('mesh.skin_indices(%s)', mesh)
+    return _skinning_data(mesh, 0)
+
+
+@_mesh
+def texture_registration(mesh):
+    logger.debug('mesh.texture_registration(%s)', mesh)
+    materials = mesh.materials or []
+    registration = {}
+
+    funcs = (
+        (constants.MAP_DIFFUSE, material.diffuse_map), 
+        (constants.SPECULAR_MAP, material.specular_map),
+        (constants.LIGHT_MAP, material.light_map), 
+        (constants.BUMP_MAP, material.bump_map), 
+        (constants.NORMAL_MAP, material.normal_map)
+    )
+    
+    def _registration(file_path, file_name):
+        return {
+            'file_path': file_path,
+            'file_name': file_name,
+            'maps': []
+        }
+
+    logger.info('found %d materials', len(materials))
+    for mat in materials:
+        for (key, func) in funcs:
+            tex = func(mat)
+            if tex is None: continue
+
+            logger.info('%s has texture %s', key, tex.name)
+            file_path = texture.file_path(tex)
+            file_name = texture.file_name(tex)
+
+            hash_ = utilities.hash(file_path)
+
+            reg = registration.setdefault(hash_, 
+                _registration(file_path, file_name))
+
+            reg['maps'].append(key)
+
+    return registration
+
+
+@_mesh
+def uvs(mesh, options):
+    logger.debug('mesh.uvs(%s, %s)', mesh, options)
+    uvs = []
+    for layer in _uvs(mesh, options):
+        uvs.append([])
+        logger.info('Parsing UV layer %d', len(uvs))
+        for pair in layer:
+            uvs[-1].extend(pair)
+    return uvs
+
+
+@_mesh
+def vertex_colors(mesh):
+    logger.debug('mesh.vertex_colors(%s)', mesh)
+    vertex_colours = []
+
+    try:
+        vertex_colour = mesh.tessface_vertex_colors.active.data
+    except AttributeError:
+        logger.info('No vertex colours found')
+        return
+
+    for face in mesh.tessfaces:
+
+        colours = (vertex_colour[face.index].color1,
+            vertex_colour[face.index].color2,
+            vertex_colour[face.index].color3,
+            vertex_colour[face.index].color4)
+
+        for colour in colours:
+            colour = utilities.rgb2int((colour.r, colour.g, colour.b))
+
+            if colour not in vertex_colours:
+                vertex_colours.append(colour)
+
+    return vertex_colours
+
+
+@_mesh
+def vertices(mesh, options):
+    logger.debug('mesh.vertices(%s, %s)', mesh, options)
+    vertices = []
+
+    round_off, round_val = utilities.rounding(options)
+
+    for vertex in mesh.vertices:
+        vector = (vertex.co.x, vertex.co.y, vertex.co.z)
+        if round_off:
+            vector = utilities.round_off(vector, round_val)
+
+        vertices.extend(vector)
+
+    return vertices
+
+
+def _normal_map(mat):
+    tex = material.normal_map(mat)
+    if tex is None:
+        return
+
+    logger.info('Found normal texture map %s', tex.name)
+
+    normal = {
+        constants.MAP_NORMAL: 
+            texture.file_name(tex),
+        constants.MAP_NORMAL_FACTOR:
+            material.normal_scale(mat), 
+        constants.MAP_NORMAL_ANISOTROPY: 
+            texture.anisotropy(tex),
+        constants.MAP_NORMAL_WRAP: texture.wrap(tex), 
+        constants.MAP_NORMAL_REPEAT: texture.repeat(tex)
+    }
+
+    return normal
+
+
+def _bump_map(mat):
+    tex = material.bump_map(mat)
+    if tex is None:
+        return
+
+    logger.info('Found bump texture map %s', tex.name)
+
+    bump = {
+        constants.MAP_BUMP: 
+            texture.file_name(tex),
+        constants.MAP_BUMP_ANISOTROPY: 
+            texture.anisotropy(tex),
+        constants.MAP_BUMP_WRAP: texture.wrap(tex),
+        constants.MAP_BUMP_REPEAT: texture.repeat(tex),
+        constants.MAP_BUMP_SCALE:
+            material.bump_scale(mat), 
+    }
+
+    return bump
+
+
+def _specular_map(mat):
+    tex = material.specular_map(mat)
+    if tex is None:
+        return 
+
+    logger.info('Found specular texture map %s', tex.name)
+
+    specular = {
+        constants.MAP_SPECULAR: 
+            texture.file_name(tex),
+        constants.MAP_SPECULAR_ANISOTROPY: 
+            texture.anisotropy(tex),
+        constants.MAP_SPECULAR_WRAP: texture.wrap(tex),
+        constants.MAP_SPECULAR_REPEAT: texture.repeat(tex)
+    }
+
+    return specular 
+
+
+def _light_map(mat):
+    tex = material.light_map(mat)
+    if tex is None:
+        return 
+
+    logger.info('Found light texture map %s', tex.name)
+
+    light = {
+        constants.MAP_LIGHT: 
+            texture.file_name(tex),
+        constants.MAP_LIGHT_ANISOTROPY: 
+            texture.anisotropy(tex),
+        constants.MAP_LIGHT_WRAP: texture.wrap(tex),
+        constants.MAP_LIGHT_REPEAT: texture.repeat(tex)
+    }
+
+    return light 
+
+
+def _diffuse_map(mat):
+    tex = material.diffuse_map(mat)
+    if tex is None:
+        return 
+
+    logger.info('Found diffuse texture map %s', tex.name)
+
+    diffuse = {
+        constants.MAP_DIFFUSE: 
+            texture.file_name(tex),
+        constants.MAP_DIFFUSE_ANISOTROPY: 
+            texture.anisotropy(tex),
+        constants.MAP_DIFFUSE_WRAP: texture.wrap(tex),
+        constants.MAP_DIFFUSE_REPEAT: texture.repeat(tex)
+    }
+
+    return diffuse
+
+
+def _normals(mesh, options):
+    vectors = []
+    round_off, round_val = utilities.rounding(options)
+
+    for face in mesh.tessfaces:
+
+        for vertex_index in face.vertices:
+            normal = mesh.vertices[vertex_index].normal
+            vector = (normal.x, normal.y, normal.z)
+            if round_off:
+                vector = utilities.round_off(vector, round_val)
+
+            if vector not in vectors:
+                vectors.append(vector)
+
+    return vectors
+
+
+def _uvs(mesh, options):
+    uv_layers = []
+    round_off, round_val = utilities.rounding(options)
+
+    for layer in mesh.uv_layers:
+        uv_layers.append([])
+
+        for uv in layer.data:
+            uv = (uv.uv[0], uv.uv[1])
+            if round_off:
+                uv = utilities.round_off(uv, round_val)
+
+            if uv not in uv_layers[-1]:
+                uv_layers[-1].append(uv)
+
+    return uv_layers
+
+
+def _armature(mesh):
+    obj = object_.objects_using_mesh(mesh)[0]
+    armature = obj.find_armature()
+    if armature:
+        logger.info('Found armature %s for %s', armature.name, obj.name)
+    else:
+        logger.info('Found no armature for %s', obj.name)
+    return armature
+
+
+def _skinning_data(mesh, array_index):
+    armature = _armature(mesh)
+    if not armature: return
+
+    obj = object_.objects_using_mesh(mesh)[0]
+    logger.debug('Skinned object found %s', obj.name)
+
+    manifest = []
+    for vertex in mesh.vertices:
+        bone_array = []
+        for group in vertex.groups:
+            bone_array.append((group.group, group.weight))
+
+        bone_array.sort(key=operator.itemgetter(1), reverse=True)
+
+        for index in range(2):
+            if index >= len(bone_array):
+                manifest.append(0)
+                continue
+
+            for bone_index, bone in enumerate(armature.data.bones):
+                if bone.name != obj.vertex_groups[bone_array[index][0]].name:
+                    continue
+                if array_index is 0:
+                    entry = bone_index
+                else:
+                    entry = bone_array[index][1]
+
+                manifest.append(entry)
+                break
+            else:
+                manifest.append(0)
+
+    return manifest
+
+
+def _skeletal_animations(armature, options):
+    action = armature.animation_data.action
+    end_frame = action.frame_range[1]
+    start_frame = action.frame_range[0]
+    frame_length = end_frame - start_frame
+    l,r,s = armature.matrix_world.decompose()
+    rotation_matrix = r.to_matrix()
+    hierarchy = []
+    parent_index = -1
+    frame_step = options.get(constants.FRAME_STEP, 1)
+    fps = context.scene.render.fps
+
+    start = int(start_frame)
+    end = int(end_frame / frame_step) + 1
+
+    #@TODO need key constants
+    for bone in armature.data.bones:
+        logger.info('Parsing animation data for bone %s', bone.name)
+
+        keys = []
+        for frame in range(start, end):
+            computed_frame = frame * frame_step
+            pos, pchange = _position(bone, computed_frame, 
+                action, armature.matrix_world)
+            rot, rchange = _rotation(bone, computed_frame, 
+                action, rotation_matrix)
+
+            # flip y and z
+            px, py, pz = pos.x, pos.z, -pos.y
+            rx, ry, rz, rw = rot.x, rot.z, -rot.y, rot.w
+
+            if frame == start_frame:
+
+                time = (frame * frame_step - start_frame) / fps
+                keyframe = {
+                    'time': time,
+                    'pos': [px, py, pz],
+                    'rot': [rx, ry, rz, rw],
+                    'scl': [1, 1, 1]
+                }
+                keys.append(keyframe)
+
+            # END-FRAME: needs pos, rot and scl attributes 
+            # with animation length (required frame)
+
+            elif frame == end_frame / frame_step:
+
+                time = frame_length / fps
+                keyframe = {
+                    'time': time,
+                    'pos': [px, py, pz],
+                    'rot': [rx, ry, rz, rw],
+                    'scl': [1, 1, 1]
+                }
+                keys.append(keyframe)
+
+            # MIDDLE-FRAME: needs only one of the attributes, 
+            # can be an empty frame (optional frame)
+
+            elif pchange == True or rchange == True:
+
+                time = (frame * frame_step - start_frame) / fps
+
+                if pchange == True and rchange == True:
+                    keyframe = {
+                        'time': time, 
+                        'pos': [px, py, pz],
+                        'rot': [rx, ry, rz, rw]
+                    }
+                elif pchange == True:
+                    keyframe = {
+                        'time': time, 
+                        'pos': [px, py, pz]
+                    }
+                elif rchange == True:
+                    keyframe = {
+                        'time': time, 
+                        'rot': [rx, ry, rz, rw]
+                    }
+
+                keys.append(keyframe)
+
+        hierarchy.append({'keys': keys, 'parent': parent_index})
+        parent_index += 1
+
+    #@TODO key constants
+    animation = {
+        'hierarchy': hierarchy, 
+        'length':frame_length / fps,
+        'fps': fps,
+        'name': action.name
+    }
+
+    return animation
+
+
+def _position(bone, frame, action, armature_matrix):
+
+    position = mathutils.Vector((0,0,0))
+    change = False
+
+    ngroups = len(action.groups)
+
+    if ngroups > 0:
+
+        index = 0
+
+        for i in range(ngroups):
+            if action.groups[i].name == bone.name:
+                index = i
+
+        for channel in action.groups[index].channels:
+            if "location" in channel.data_path:
+                has_changed = _handle_position_channel(
+                    channel, frame, position)
+                change = change or has_changed
+
+    else:
+
+        bone_label = '"%s"' % bone.name
+
+        for channel in action.fcurves:
+            data_path = channel.data_path
+            if bone_label in data_path and \
+            "location" in data_path:
+                has_changed = _handle_position_channel(
+                    channel, frame, position)
+                change = change or has_changed
+
+    position = position * bone.matrix_local.inverted()
+
+    if bone.parent is None:
+
+        position.x += bone.head.x
+        position.y += bone.head.y
+        position.z += bone.head.z
+
+    else:
+
+        parent = bone.parent
+
+        parent_matrix = parent.matrix_local.inverted()
+        diff = parent.tail_local - parent.head_local
+
+        position.x += (bone.head * parent_matrix).x + diff.x
+        position.y += (bone.head * parent_matrix).y + diff.y
+        position.z += (bone.head * parent_matrix).z + diff.z
+
+    return armature_matrix*position, change
+
+
+def _rotation(bone, frame, action, armature_matrix):
+
+    # TODO: calculate rotation also from rotation_euler channels
+
+    rotation = mathutils.Vector((0,0,0,1))
+
+    change = False
+
+    ngroups = len(action.groups)
+
+    # animation grouped by bones
+
+    if ngroups > 0:
+
+        index = -1
+
+        for i in range(ngroups):
+            if action.groups[i].name == bone.name:
+                index = i
+
+        if index > -1:
+            for channel in action.groups[index].channels:
+                if "quaternion" in channel.data_path:
+                    has_changed = _handle_rotation_channel(
+                        channel, frame, rotation)
+                    change = change or has_changed
+
+    # animation in raw fcurves
+
+    else:
+
+        bone_label = '"%s"' % bone.name
+
+        for channel in action.fcurves:
+            data_path = channel.data_path
+            if bone_label in data_path and \
+            "quaternion" in data_path:
+                has_changed = _handle_rotation_channel(
+                    channel, frame, rotation)
+                change = change or has_changed
+
+    rot3 = rotation.to_3d()
+    rotation.xyz = rot3 * bone.matrix_local.inverted()
+    rotation.xyz = armature_matrix * rotation.xyz
+
+    return rotation, change
+
+
+def _handle_rotation_channel(channel, frame, rotation):
+
+    change = False
+
+    if channel.array_index in [0, 1, 2, 3]:
+
+        for keyframe in channel.keyframe_points:
+            if keyframe.co[0] == frame:
+                change = True
+
+        value = channel.evaluate(frame)
+
+        if channel.array_index == 1:
+            rotation.x = value
+
+        elif channel.array_index == 2:
+            rotation.y = value
+
+        elif channel.array_index == 3:
+            rotation.z = value
+
+        elif channel.array_index == 0:
+            rotation.w = value
+
+    return change
+
+
+def _handle_position_channel(channel, frame, position):
+
+    change = False
+
+    if channel.array_index in [0, 1, 2]:
+        for keyframe in channel.keyframe_points:
+            if keyframe.co[0] == frame:
+                change = True
+
+        value = channel.evaluate(frame)
+
+        if channel.array_index == 0:
+            position.x = value
+
+        if channel.array_index == 1:
+            position.y = value
+
+        if channel.array_index == 2:
+            position.z = value
+
+    return change

+ 407 - 0
utils/exporters/blender/addons/io_three/exporter/api/object.py

@@ -0,0 +1,407 @@
+import math
+import mathutils
+import bpy
+from bpy import data, context, types
+from .. import constants, logger, utilities
+from .constants import (
+    MESH,
+    EMPTY,
+    ARMATURE,
+    LAMP,
+    SPOT,
+    SUN,
+    POINT,
+    HEMI,
+    AREA,
+    PERSP,
+    ORTHO,
+    CAMERA,
+    PERSP,
+    ORTHO,
+    RENDER,
+    ZYX,
+    NO_SHADOW
+)
+
+ROTATE_X_PI2 = mathutils.Quaternion((1.0, 0.0, 0.0), 
+    math.radians(-90.0)).to_matrix().to_4x4()
+
+
+# Blender doesn't seem to have a good way to link a mesh back to the
+# objects that are instancing it, or it is bloody obvious and I haven't
+# discovered yet. This manifest serves as a way for me to map a mesh
+# node to the object nodes that are using it.
+_MESH_MAP = {}
+
+
+def _object(func):
+
+    def inner(name, *args, **kwargs):
+
+        if isinstance(name, types.Object):
+            obj = name
+        else:
+            obj = data.objects[name]
+
+        return func(obj, *args, **kwargs)
+
+    return inner
+
+
+def assemblies(valid_types):
+    logger.debug('object.assemblies(%s)', valid_types)
+    nodes = []
+    for obj in data.objects:
+        if not obj.parent and obj.type in valid_types:
+            yield obj.name
+        elif obj.parent and not obj.parent.parent \
+        and obj.parent.type == ARMATURE:
+            logger.info('Has armature parent %s', obj.name)
+            yield obj.name
+
+
+@_object
+def cast_shadow(obj):
+    logger.debug('object.cast_shadow(%s)', obj)
+    if obj.type == LAMP:
+        if obj.data.type in (SPOT, SUN):
+            ret = obj.data.shadow_method != NO_SHADOW
+        else:
+            logger.info('%s is a lamp but this lamp type does not '\
+                'have supported shadows in ThreeJS', obj.name)
+            ret = None
+        return ret
+    elif obj.type == MESH:
+        mat = material(obj)
+        if mat:
+            return data.materials[mat].use_cast_shadows
+        else:
+            return False
+
+
+@_object
+def children(obj, valid_types):
+    logger.debug('object.children(%s, %s)', obj, valid_types)
+    for child in obj.children:
+        if child.type in valid_types:
+            yield child.name
+
+
+@_object
+def material(obj):
+    logger.debug('object.material(%s)', obj)
+    try:
+        return obj.material_slots[0].name
+    except IndexError:
+        pass
+
+
+@_object
+def mesh(obj, options):
+    logger.debug('object.mesh(%s, %s)', obj, options)
+    if obj.type != MESH:
+        return
+    
+    for mesh, objects in _MESH_MAP.items():
+        if obj in objects:
+            return mesh
+    else:
+        logger.debug('Could not map object, updating manifest')
+        mesh = extract_mesh(obj, options)
+        if len(mesh.tessfaces) is not 0:
+            manifest = _MESH_MAP.setdefault(mesh.name, [])
+            manifest.append(obj)
+            mesh = mesh.name
+        else:
+            # possibly just being used as a controller
+            logger.info('Object %s has no faces', obj.name)
+            mesh = None
+
+    return mesh
+
+
+@_object
+def name(obj):
+    return obj.name
+
+
+@_object
+def node_type(obj):
+    logger.debug('object.node_type(%s)', obj)
+    # standard transformation nodes are inferred
+    if obj.type == MESH: 
+        return constants.MESH.title()
+    elif obj.type == EMPTY:
+        return constants.OBJECT.title()
+
+    dispatch = {
+        LAMP: {
+            POINT: constants.POINT_LIGHT,
+            SUN: constants.DIRECTIONAL_LIGHT,
+            SPOT: constants.SPOT_LIGHT,
+            HEMI: constants.HEMISPHERE_LIGHT,
+            AREA: constants.AREA_LIGHT,
+        },
+        CAMERA: {
+            PERSP: constants.PERSPECTIVE_CAMERA,
+            ORTHO: constants.ORTHOGRAPHIC_CAMERA
+        }
+    }
+    try:
+        return dispatch[obj.type][obj.data.type]
+    except AttributeError:
+        msg = 'Invalid type: %s' % obj.type
+        raise exceptions.UnsupportedObjectType(msg)
+ 
+
+def nodes(valid_types, options):
+    visible_layers = _visible_scene_layers()
+    for obj in data.objects:
+        # skip objects that are not on visible layers
+        if not _on_visible_layer(obj, visible_layers): 
+            continue
+        try:
+            export = obj.THREE_export
+        except AttributeError:
+            export = True
+
+        mesh_node = mesh(obj, options)
+        is_mesh = obj.type == MESH
+
+        # skip objects that a mesh could not be resolved
+        if is_mesh and not mesh_node:
+            continue
+
+        # secondary test; if a mesh node was resolved but no
+        # faces are detected then bow out
+        if is_mesh:
+            mesh_node = data.meshes[mesh_node]
+            if len(mesh_node.tessfaces) is 0:
+                continue
+
+        if obj.type in valid_types and export:
+            yield obj.name
+
+
+@_object
+def position(obj, options):
+    logger.debug('object.position(%s)', obj)
+    vector = _matrix(obj)[0]
+    vector = (vector.x, vector.y, vector.z)
+
+    round_off, round_val = utilities.rounding(options)
+    if round_off:
+        vector = utilities.round_off(vector, round_val)
+
+    return vector
+
+
+@_object
+def receive_shadow(obj):
+    if obj.type == MESH:
+        mat = material(obj)
+        if mat:
+            return data.materials[mat].use_shadows
+        else:
+            return False
+
+
+@_object
+def rotation(obj, options):
+    logger.debug('object.rotation(%s)', obj)
+    vector = _matrix(obj)[1].to_euler(ZYX)
+    vector = (vector.x, vector.y, vector.z)
+
+    round_off, round_val = utilities.rounding(options)
+    if round_off:
+        vector = utilities.round_off(vector, round_val)
+
+    return vector
+
+
+@_object
+def scale(obj, options):
+    logger.debug('object.scale(%s)', obj)
+    vector = _matrix(obj)[2]
+    vector = (vector.x, vector.y, vector.z)
+
+    round_off, round_val = utilities.rounding(options)
+    if round_off:
+        vector = utilities.round_off(vector, round_val)
+
+    return vector
+
+
+@_object
+def select(obj):
+    obj.select = True
+
+
+@_object
+def unselect(obj):
+    obj.select = False
+
+
+@_object
+def visible(obj):
+    logger.debug('object.visible(%s)', obj)
+    return obj.is_visible(context.scene)
+
+
+def extract_mesh(obj, options, recalculate=False):
+    logger.debug('object.extract_mesh(%s, %s)', obj, options)
+    mesh = obj.to_mesh(context.scene, True, RENDER)
+
+    # transfer the geometry type to the extracted mesh
+    mesh.THREE_geometry_type = obj.data.THREE_geometry_type
+
+    # now determine whether or not to export using the geometry type
+    # set globally from the exporter's options or to use the local
+    # override on the mesh node itself
+    opt_buffer = options.get(constants.GEOMETRY_TYPE) 
+    opt_buffer = opt_buffer == constants.BUFFER_GEOMETRY
+    prop_buffer = mesh.THREE_geometry_type == constants.BUFFER_GEOMETRY
+
+    # if doing buffer geometry it is imperative to triangulate the mesh
+    if opt_buffer or prop_buffer:
+        original_mesh = obj.data
+        obj.data = mesh
+        logger.debug('swapped %s for %s', original_mesh.name, mesh.name)
+    
+        obj.select = True
+        bpy.context.scene.objects.active = obj
+        logger.info('Applying triangulation to %s', obj.data.name)
+        bpy.ops.object.modifier_add(type='TRIANGULATE')
+        bpy.ops.object.modifier_apply(apply_as='DATA', 
+            modifier='Triangulate')
+        obj.data = original_mesh
+        obj.select = False
+
+    # recalculate the normals to face outwards, this is usually
+    # best after applying a modifiers, especialy for something 
+    # like the mirror
+    if recalculate:
+        logger.info('Recalculating normals')
+        original_mesh = obj.data
+        obj.data = mesh
+
+        bpy.context.scene.objects.active = obj
+        bpy.ops.object.mode_set(mode='EDIT')
+        bpy.ops.mesh.select_all(action='SELECT')
+        bpy.ops.mesh.normals_make_consistent()
+        bpy.ops.object.editmode_toggle()
+
+        obj.data = original_mesh
+
+    if not options.get(constants.SCENE):
+        xrot = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
+        mesh.transform(xrot * obj.matrix_world)
+
+    # now generate a unique name
+    index = 0
+    while True:
+        if index is 0:
+            name = '%sGeometry' % obj.data.name
+        else:
+            name = '%sGeometry.%d' % (obj.data.name, index)
+        try:
+            data.meshes[name]
+            index += 1
+        except KeyError:
+            break
+    mesh.name = name
+
+    mesh.update(calc_tessface=True)
+    mesh.calc_normals()
+    mesh.calc_tessface()
+    scale = options.get(constants.SCALE, 1)
+    mesh.transform(mathutils.Matrix.Scale(scale, 4))
+
+    return mesh
+
+
+def objects_using_mesh(mesh):
+    logger.debug('object.objects_using_mesh(%s)', mesh)
+    for name, objects in _MESH_MAP.items():
+        if name == mesh.name:
+            return objects
+    else:
+        logger.warning('Could not find mesh mapping')
+
+
+def prep_meshes(options):
+    '''
+    Prep the mesh nodes. Preperation includes identifying:
+        - nodes that are on visible layers
+        - nodes that have export disabled
+        - nodes that have modifiers that need to be applied
+    '''
+    logger.debug('object.prep_meshes(%s)', options)
+    mapping = {}
+
+    visible_layers = _visible_scene_layers()
+        
+    for obj in data.objects:
+        if obj.type != MESH: 
+            continue
+
+        # this is ideal for skipping controller or proxy nodes
+        # that may apply to a Blender but not a 3js scene
+        if not _on_visible_layer(obj, visible_layers): 
+            logger.info('%s is not on a visible layer', obj.name)
+            continue
+
+        # if someone really insists on a visible node not being exportable
+        if not obj.THREE_export: 
+            logger.info('%s export is disabled', obj.name)
+            continue
+
+        # need to apply modifiers before moving on, and before
+        # handling instancing. it is possible for 2 or more objects
+        # instance the same mesh but to not all use the same modifiers
+        # this logic identifies the object with modifiers and extracts
+        # the mesh making the mesh unique to this particular object
+        if len(obj.modifiers):
+            logger.info('%s has modifiers' % obj.name)
+            mesh = extract_mesh(obj, options, recalculate=True)
+            _MESH_MAP[mesh.name] = [obj]
+            continue
+
+        logger.info('adding mesh %s.%s to prep', 
+            obj.name, obj.data.name)
+        manifest = mapping.setdefault(obj.data.name, [])
+        manifest.append(obj)
+    
+    # now associate the extracted mesh node with all the objects
+    # that are instancing it
+    for objects in mapping.values():
+        mesh = extract_mesh(objects[0], options)
+        _MESH_MAP[mesh.name] = objects
+
+
+def extracted_meshes():
+    logger.debug('object.extracted_meshes()')
+    return [key for key in _MESH_MAP.keys()]
+
+
+def _matrix(obj):
+    matrix = ROTATE_X_PI2 * obj.matrix_world
+    return matrix.decompose()
+
+
+def _on_visible_layer(obj, visible_layers):
+    obj_layers = []
+    visible = True
+    for index, layer in enumerate(obj.layers):
+        if layer and index not in visible_layers:
+            logger.info('%s is on a hidden layer', obj.name)
+            visible = False
+            break
+    return visible
+
+
+def _visible_scene_layers():
+    visible_layers = []
+    for index, layer in enumerate(context.scene.layers):
+        if layer: visible_layers.append(index)
+    return visible_layers

+ 102 - 0
utils/exporters/blender/addons/io_three/exporter/api/texture.py

@@ -0,0 +1,102 @@
+import bpy
+from bpy import data, types
+from .. import constants, logger
+from .constants import IMAGE, MAG_FILTER, MIN_FILTER, MAPPING
+from . import image
+
+
+def _texture(func):
+
+    def inner(name, *args, **kwargs):
+
+        if isinstance(name, types.Texture):
+            texture = name
+        else:
+            texture = data.textures[name] 
+
+        return func(texture, *args, **kwargs)
+
+    return inner
+
+
+@_texture
+def anisotropy(texture):
+    logger.debug('texture.file_path(%s)', texture)
+    return texture.filter_size
+
+
+@_texture
+def file_name(texture):
+    logger.debug('texture.file_name(%s)', texture)
+    if texture.image:
+        return image.file_name(texture.image)
+
+
+@_texture
+def file_path(texture):
+    logger.debug('texture.file_path(%s)', texture)
+    if texture.image:
+        return image.file_path(texture.image)
+
+
+@_texture
+def image_node(texture):
+    logger.debug('texture.image_node(%s)', texture)
+    return texture.image
+
+
+@_texture
+def mag_filter(texture):
+    logger.debug('texture.mag_filter(%s)', texture)
+    try:
+        val = texture.THREE_mag_filter
+    except AttributeError:
+        logger.debug('No THREE_mag_filter attribute found')
+        val = MAG_FILTER
+
+    return val
+
+
+@_texture
+def mapping(texture):
+    logger.debug('texture.mapping(%s)', texture)
+    try:
+        val = texture.THREE_mapping
+    except AttributeError:
+        logger.debug('No THREE_mapping attribute found')
+        val = MAPPING
+
+    return val
+@_texture
+def min_filter(texture):
+    logger.debug('texture.min_filter(%s)', texture)
+    try:
+        val = texture.THREE_min_filter
+    except AttributeError:
+        logger.debug('No THREE_min_filter attribute found')
+        val = MIN_FILTER 
+
+    return val
+
+
+@_texture
+def repeat(texture):
+    logger.debug('texture.repeat(%s)', texture)
+    return (texture.repeat_x, texture.repeat_y)
+
+
+@_texture
+def wrap(texture):
+    logger.debug('texture.wrap(%s)', texture)
+    wrapping = {
+        True: constants.WRAPPING.MIRROR, 
+        False: constants.WRAPPING.REPEAT
+    }
+    return (wrapping[texture.use_mirror_x], wrapping[texture.use_mirror_y])
+
+
+def textures():
+    logger.debug('texture.textures()')
+    for texture in data.textures:
+        if texture.type == IMAGE:
+            yield texture.name

+ 100 - 0
utils/exporters/blender/addons/io_three/exporter/base_classes.py

@@ -0,0 +1,100 @@
+import uuid
+from .. import constants 
+from .exceptions import ThreeValueError
+
+
+class BaseClass(constants.BASE_DICT):
+    _defaults = {}
+
+    def __init__(self, parent=None, type=None):
+        constants.BASE_DICT.__init__(self)
+
+        self.__type = type
+
+        self.__parent = parent
+
+        constants.BASE_DICT.update(self, self._defaults.copy())
+     
+    def __setitem__(self, key, value):
+        if not isinstance(value, constants.VALID_DATA_TYPES):
+            msg = 'Value is an invalid data type: %s' % type(value)
+            raise ThreeValueError(msg) 
+        constants.BASE_DICT.__setitem__(self, key, value)
+
+    @property
+    def count(self):
+        return len(self.keys())
+
+    @property
+    def parent(self):
+        return self.__parent
+
+    @property
+    def type(self):
+        return self.__type
+    
+    def copy(self):
+        data = {}
+        def _dict_copy(old, new):
+            for key, value in old.items():
+                if isinstance(value, (str, list)):
+                    new[key] = value[:]
+                elif isinstance(value, tuple):
+                    new[key] = value+tuple()
+                elif isinstance(value, dict):
+                    new[key] = {}
+                    _dict_copy(value, new[key])
+                else:
+                    new[key] = value
+
+        _dict_copy(self, data)
+
+        return data
+
+
+class BaseNode(BaseClass):  
+    def __init__(self, node, parent, type):
+        BaseClass.__init__(self, parent=parent, type=type)
+        self.__node = node
+        if node is not None:
+            self[constants.NAME] = node
+
+        if isinstance(parent, BaseScene):
+            scene = parent
+        elif parent is not None:
+            scene = parent.scene
+        else:
+            scene = None
+
+        self.__scene = scene
+
+        self[constants.UUID] = str(uuid.uuid4()).upper()
+    
+    @property
+    def node(self):
+        return self.__node
+
+    @property
+    def scene(self):
+        return self.__scene
+
+    @property
+    def options(self):
+        return self.scene.options
+
+
+class BaseScene(BaseClass):
+    def __init__(self, filepath, options):
+        BaseClass.__init__(self, type=constants.SCENE)
+
+        self.__filepath = filepath
+
+        self.__options = options.copy()
+
+    @property
+    def filepath(self):
+        return self.__filepath
+
+    @property
+    def options(self):
+        return self.__options

+ 8 - 0
utils/exporters/blender/addons/io_three/exporter/exceptions.py

@@ -0,0 +1,8 @@
+class ThreeError(Exception): pass
+class UnimplementedFeatureError(ThreeError): pass
+class ThreeValueError(ThreeError): pass
+class UnsupportedObjectType(ThreeError): pass
+class GeometryError(ThreeError): pass
+class SelectionError(ThreeError): pass
+class NGonError(ThreeError): pass
+class BufferGeometryError(ThreeError): pass

+ 353 - 0
utils/exporters/blender/addons/io_three/exporter/geometry.py

@@ -0,0 +1,353 @@
+import os
+from .. import constants
+from . import base_classes, io, logger, api
+
+
+FORMAT_VERSION = 3
+
+
+class Geometry(base_classes.BaseNode):
+    def __init__(self, node, parent=None):
+        logger.debug('Geometry().__init__(%s)', node)
+        
+        #@TODO: maybe better to have `three` constants for
+        #       strings that are specific to `three` properties
+        geo_type = constants.GEOMETRY.title()
+        if parent.options.get(constants.GEOMETRY_TYPE):
+            opt_type = parent.options[constants.GEOMETRY_TYPE]
+            if opt_type == constants.BUFFER_GEOMETRY:
+                geo_type = constants.BUFFER_GEOMETRY
+            elif opt_type != constants.GEOMETRY:
+                logger.error('Unknown geometry type %s', opt_type)
+
+        logger.info('Setting %s to "%s"', node, geo_type)
+
+        self._defaults[constants.TYPE] = geo_type
+        base_classes.BaseNode.__init__(self, node, parent=parent,
+            type=geo_type)
+
+    @property
+    def animation_filename(self):
+        compression = self.options.get(constants.COMPRESSION)
+        if compression in (None, constants.NONE):
+            ext = constants.JSON
+        elif compression == constants.MSGPACK:
+            ext = constants.PACK
+
+        for key in (constants.MORPH_TARGETS, constants.ANIMATION):
+            try:
+                self[key]
+                break
+            except KeyError:
+                pass
+        else:
+            logger.info('%s has no animation data', self.node)
+            return
+
+        return '%s.%s.%s' % (self.node, key, ext)
+
+    @property
+    def face_count(self):
+        try:
+            faces = self[constants.FACES]
+        except KeyError:
+            logger.debug('No parsed faces found')
+            return 0
+
+        length = len(faces)
+        offset = 0
+        bitset = lambda x,y: x & ( 1 << y )
+        face_count = 0
+
+        while offset < length:
+            bit = faces[offset]
+            offset += 1
+            face_count += 1
+            is_quad = bitset(bit, constants.MASK[constants.QUAD])
+            has_material = bitset(bit, constants.MASK[constants.MATERIALS])
+            has_uv = bitset(bit, constants.MASK[constants.UVS])
+            has_normal = bitset(bit, constants.MASK[constants.NORMALS])
+            has_color = bitset(bit, constants.MASK[constants.COLORS])
+
+            vector = 4 if is_quad else 3
+            offset += vector
+
+            if has_material:
+                offset += 1
+            if has_uv:
+                offset += vector
+            if has_normal:
+                offset += vector
+            if has_color:
+                offset += vector
+
+        return face_count
+
+    @property
+    def metadata(self):
+        metadata = {
+            constants.GENERATOR: constants.THREE,
+            constants.VERSION: FORMAT_VERSION
+        }
+
+        if self[constants.TYPE] == constants.GEOMETRY.title():
+            self.__geometry_metadata(metadata)
+        else:
+            self.__buffer_geometry_metadata(metadata)
+
+        return metadata
+
+    def copy(self, scene=True):
+        logger.debug('Geometry().copy(scene=%s)', scene)
+        dispatch = {
+            True: self._scene_format,
+            False: self._geometry_format
+        }
+        data = dispatch[scene]()
+
+        try:
+            data[constants.MATERIALS] = self[constants.MATERIALS].copy()
+        except KeyError:
+            logger.debug('No materials to copy')
+            pass
+
+        return data
+
+    def copy_textures(self):
+        logger.debug('Geometry().copy_textures()')
+        if self.options.get(constants.COPY_TEXTURES):
+            texture_registration = self.register_textures()
+            if texture_registration:
+                logger.info('%s has registered textures', self.node)
+                io.copy_registered_textures(
+                    os.path.dirname(self.scene.filepath),
+                    texture_registration)
+
+    def parse(self):
+        logger.debug('Geometry().parse()')
+        if self[constants.TYPE] == constants.GEOMETRY.title():
+            logger.info('Parsing Geometry format')
+            self.__parse_geometry()
+        else:
+            logger.info('Parsing BufferGeometry format')
+            self.__parse_buffer_geometry()
+
+    def register_textures(self):
+        logger.debug('Geometry().register_textures()')
+        return api.mesh.texture_registration(self.node) 
+
+    def write(self, filepath=None):
+        logger.debug('Geometry().write(filepath=%s)', filepath)
+
+        filepath = filepath or self.scene.filepath
+
+        io.dump(filepath, self.copy(scene=False), 
+            options=self.scene.options) 
+
+        if self.options.get(constants.MAPS):
+            logger.info('Copying textures for %s', self.node)
+            self.copy_textures()
+
+    def write_animation(self, filepath):
+        logger.debug('Geometry().write_animation(%s)', filepath)
+
+        for key in (constants.MORPH_TARGETS, constants.ANIMATION):
+            try:
+                data = self[key]
+                break
+            except KeyError:
+                pass
+        else:
+            logger.info('%s has no animation data', self.node)
+            return
+
+        filepath = os.path.join(filepath, self.animation_filename)
+        if filepath:
+            logger.info('Dumping animation data to %s', filepath)
+            io.dump(filepath, data, options=self.scene.options)
+            return filepath
+        else:
+            logger.warning('Could not determine a filepath for '\
+                'animation data. Nothing written to disk.')
+
+    def _component_data(self):
+        logger.debug('Geometry()._component_data()')
+        
+        if self[constants.TYPE] != constants.GEOMETRY.title():
+            return self[constants.ATTRIBUTES]
+
+        components = [constants.VERTICES, constants.FACES, 
+            constants.UVS, constants.COLORS, constants.NORMALS,
+            constants.BONES, constants.SKIN_WEIGHTS, 
+            constants.SKIN_INDICES, constants.NAME]
+
+        data = {}
+        anim_components = [constants.MORPH_TARGETS, constants.ANIMATION]
+        if self.options.get(constants.EMBED_ANIMATION):
+            components.extend(anim_components)
+        else:
+            for component in anim_components:
+                try:
+                    self[component]
+                except KeyError:
+                    pass
+                else:
+                    data[component] = os.path.basename(
+                        self.animation_filename) 
+            else:
+                logger.info('No animation data found for %s', self.node)
+
+        for component in components:
+            try:
+                data[component] = self[component]
+            except KeyError:
+                logger.debug('Component %s not found', component)
+                pass
+
+        return data
+
+    def _geometry_format(self):
+        data = self._component_data()
+
+        if self[constants.TYPE] != constants.GEOMETRY.title():
+            data = {constants.ATTRIBUTES: data}
+
+        data[constants.METADATA] = {
+            constants.TYPE: self[constants.TYPE]
+        }
+
+        data[constants.METADATA].update(self.metadata)
+
+        return data
+
+    def __buffer_geometry_metadata(self, metadata):
+        for key, value in self[constants.ATTRIBUTES].items():
+            size = value[constants.ITEM_SIZE]
+            array = value[constants.ARRAY]
+            metadata[key] = len(array)/size
+        
+    def __geometry_metadata(self, metadata): 
+        skip = (constants.TYPE, constants.FACES, constants.UUID,
+            constants.ANIMATION, constants.SKIN_INDICES,
+            constants.SKIN_WEIGHTS, constants.NAME)
+        vectors = (constants.VERTICES, constants.NORMALS)
+
+        for key in self.keys():
+            if key in vectors:
+                try:
+                    metadata[key] = int(len(self[key])/3)
+                except KeyError:
+                    pass
+                continue
+
+            if key in skip: continue
+
+            metadata[key] = len(self[key])
+
+        faces = self.face_count
+        if faces > 0:
+            metadata[constants.FACES] = faces
+
+    def _scene_format(self):
+        data = {
+            constants.UUID: self[constants.UUID],
+            constants.TYPE: self[constants.TYPE]
+        }
+
+        component_data = self._component_data()
+        if self[constants.TYPE] == constants.GEOMETRY.title():
+            data[constants.DATA] = component_data
+            data[constants.DATA].update({
+                constants.METADATA: self.metadata
+            })
+        else:
+            if self.options[constants.EMBED_GEOMETRY]:
+                data[constants.DATA] = {
+                    constants.ATTRIBUTES: component_data
+                }
+            else:
+                data[constants.ATTRIBUTES] = component_data
+            data[constants.METADATA] = self.metadata 
+            data[constants.NAME] = self[constants.NAME]
+
+        return data 
+
+    def __parse_buffer_geometry(self):
+        self[constants.ATTRIBUTES] = {}
+
+        options_vertices = self.options.get(constants.VERTICES)
+        option_normals = self.options.get(constants.NORMALS)
+        option_uvs = self.options.get(constants.UVS)
+
+        dispatch = (
+            (constants.POSITION, options_vertices, 
+                api.mesh.buffer_position, 3), 
+            (constants.UV, option_uvs, api.mesh.buffer_uv, 2), 
+            (constants.NORMAL, option_normals, 
+                api.mesh.buffer_normal, 3)
+        )
+
+        for key, option, func, size in dispatch: 
+
+            if not option:
+                continue
+
+            array = func(self.node, self.options)
+            if not array: 
+                logger.warning('No array could be made for %s', key)
+                continue
+
+            self[constants.ATTRIBUTES][key] = {
+                constants.ITEM_SIZE: size,
+                constants.TYPE: constants.FLOAT_32,
+                constants.ARRAY: array
+            }
+
+    def __parse_geometry(self):
+        if self.options.get(constants.VERTICES):
+            logger.info('Parsing %s', constants.VERTICES)
+            self[constants.VERTICES] = api.mesh.vertices(
+                self.node, self.options)
+
+        if self.options.get(constants.FACES):
+            logger.info('Parsing %s', constants.FACES)
+            self[constants.FACES] = api.mesh.faces(
+                self.node, self.options)
+
+        if self.options.get(constants.NORMALS):
+            logger.info('Parsing %s', constants.NORMALS)
+            self[constants.NORMALS] = api.mesh.normals(
+                self.node, self.options)
+
+        if self.options.get(constants.COLORS):
+            logger.info('Parsing %s', constants.COLORS)
+            self[constants.COLORS] = api.mesh.vertex_colors(self.node)
+        
+        if self.options.get(constants.FACE_MATERIALS):
+            logger.info('Parsing %s', constants.FACE_MATERIALS)
+            self[constants.MATERIALS] = api.mesh.materials(
+                self.node, self.options)
+
+        if self.options.get(constants.UVS):
+            logger.info('Parsing %s', constants.UVS)
+            self[constants.UVS] = api.mesh.uvs(self.node, self.options)
+
+        if self.options.get(constants.ANIMATION):
+            logger.info('Parsing %s', constants.ANIMATION)
+            self[constants.ANIMATION] = api.mesh.animation(
+                self.node, self.options)
+
+        if self.options.get(constants.BONES):
+            logger.info('Parsing %s', constants.BONES)
+            self[constants.BONES] = api.mesh.bones(self.node) 
+
+        if self.options.get(constants.SKINNING):
+            logger.info('Parsing %s', constants.SKINNING)
+            self[constants.SKIN_INDICES] = api.mesh.skin_indices(self.node)
+            self[constants.SKIN_WEIGHTS] = api.mesh.skin_weights(self.node)
+
+        if self.options.get(constants.MORPH_TARGETS):
+            logger.info('Parsing %s', constants.MORPH_TARGETS)
+            self[constants.MORPH_TARGETS] = api.mesh.morph_targets(
+                self.node, self.options)
+

+ 25 - 0
utils/exporters/blender/addons/io_three/exporter/image.py

@@ -0,0 +1,25 @@
+import os
+from .. import constants
+from . import base_classes, io, api, logger
+
+
+class Image(base_classes.BaseNode):
+    def __init__(self, node, parent):
+        logger.debug('Image().__init__(%s)', node)
+        base_classes.BaseNode.__init__(self, node, parent, constants.IMAGE)
+
+        self[constants.URL] = api.image.file_name(self.node)
+
+    @property
+    def destination(self):
+        dirname = os.path.dirname(self.scene.filepath)
+        return os.path.join(dirname, self[constants.URL])
+
+    @property
+    def filepath(self):
+        return api.image.file_path(self.node)
+
+    def copy_texture(self, func=io.copy):
+        logger.debug('Image().copy_texture()')
+        func(self.filepath, self.destination)
+        return self.destination

+ 67 - 0
utils/exporters/blender/addons/io_three/exporter/io.py

@@ -0,0 +1,67 @@
+import shutil
+from .. import constants
+from . import _json, logger, exceptions 
+
+
+def copy_registered_textures(dest, registration):
+    logger.debug('io.copy_registered_textures(%s, %s)', dest, registration)
+    for value in registration.values():
+        copy(value['file_path'], dest)
+
+
+def copy(src, dst):
+    logger.debug('io.copy(%s, %s)' % (src, dst))
+    shutil.copy(src, dst)
+
+
+def dump(filepath, data, options=None):
+    options = options or {}
+    logger.debug('io.dump(%s, data, options=%s)', filepath, options)
+
+    compress = options.get(constants.COMPRESSION, constants.NONE)
+    if compress == constants.MSGPACK:
+        try:
+            import msgpack
+        except ImportError:
+            logger.error('msgpack module not found')
+            raise
+
+        logger.info('Dumping to msgpack')
+        func = lambda x,y: msgpack.dump(x, y)
+        mode = 'wb'
+    else:
+        round_off = options.get(constants.ENABLE_PRECISION)
+        if round_off:
+            _json.ROUND = options[constants.PRECISION]
+        else:
+            _json.ROUND = None
+
+        logger.info('Dumping to JSON')
+        func = lambda x,y: _json.json.dump(x, y, indent=4)
+        mode = 'w'
+
+    logger.info('Writing to %s', filepath)
+    with open(filepath, mode=mode) as stream:
+        func(data, stream)
+
+
+def load(filepath, options):
+    logger.debug('io.load(%s, %s)', filepath, options)
+    compress = options.get(constants.COMPRESSION, constants.NONE)
+    if compress == constants.MSGPACK:
+        try:
+            import msgpack
+        except ImportError:
+            logger.error('msgpack module not found')
+            raise
+        module = msgpack
+        mode = 'rb'
+    else:
+        logger.info('Loading JSON')
+        module = _json.json
+        mode = 'r'
+
+    with open(filepath, mode=mode) as stream:
+        data = module.load(stream)
+
+    return data

+ 58 - 0
utils/exporters/blender/addons/io_three/exporter/logger.py

@@ -0,0 +1,58 @@
+import os
+import logging
+import tempfile
+
+from .. import constants
+
+LOG_FILE = None
+LOGGER = None
+FILE_NAME = 'io_three.export.log'
+
+LEVELS = {
+    constants.DEBUG: logging.DEBUG,
+    constants.INFO: logging.INFO,
+    constants.WARNING: logging.WARNING,
+    constants.ERROR: logging.ERROR,
+    constants.CRITICAL: logging.CRITICAL
+}
+
+def init(level=constants.DEBUG):
+    global LOG_FILE
+    LOG_FILE = os.path.join(tempfile.gettempdir(), FILE_NAME)
+    with open(LOG_FILE, 'w'):
+        pass
+
+    global LOGGER
+    LOGGER = logging.getLogger('Three.Export')
+    LOGGER.setLevel(LEVELS[level])
+
+    stream = logging.StreamHandler()
+    stream.setLevel(LEVELS[level])
+
+    format_ = '%(asctime)s - %(name)s - %(levelname)s: %(message)s'
+    formatter = logging.Formatter(format_)
+
+    stream.setFormatter(formatter)
+
+    file_handler = logging.FileHandler(LOG_FILE)
+    file_handler.setLevel(LEVELS[level])
+    file_handler.setFormatter(formatter)
+
+    LOGGER.addHandler(stream)
+    LOGGER.addHandler(file_handler)
+
+
+def info(*args):
+    LOGGER.info(*args)
+
+def debug(*args):
+    LOGGER.debug(*args)
+
+def warning(*args):
+    LOGGER.warning(*args)
+
+def error(*args):
+    LOGGER.error(*args)
+
+def critical(*args):
+    LOGGER.critical(*args)

+ 83 - 0
utils/exporters/blender/addons/io_three/exporter/material.py

@@ -0,0 +1,83 @@
+from .. import constants
+from . import base_classes, utilities, logger, api
+
+
+class Material(base_classes.BaseNode):
+    def __init__(self, node, parent):
+        logger.debug('Material().__init__(%s)', node)
+        base_classes.BaseNode.__init__(self, node, parent, 
+            constants.MATERIAL)
+        
+        self.__common_attributes()
+        if self[constants.TYPE] == constants.THREE_PHONG:
+            self.__phong_attributes()
+
+        textures = self.parent.options.get(constants.MAPS)
+        if textures:
+            self.__update_maps()
+
+    def __common_attributes(self):
+        logger.debug('Material().__common_attributes()')
+        dispatch = {
+            constants.PHONG: constants.THREE_PHONG,
+            constants.LAMBERT: constants.THREE_LAMBERT,
+            constants.BASIC: constants.THREE_BASIC
+        }
+        shader_type = api.material.type(self.node)
+        self[constants.TYPE] = dispatch[shader_type]
+
+        ambient = api.material.ambient_color(self.node)
+        self[constants.AMBIENT] = utilities.rgb2int(ambient)
+
+        diffuse = api.material.diffuse_color(self.node)
+        self[constants.COLOR] = utilities.rgb2int(diffuse)
+    
+        emissive = api.material.emissive_color(self.node)
+        self[constants.EMISSIVE] = utilities.rgb2int(emissive)
+
+        vertex_color = api.material.use_vertex_colors(self.node)
+        self[constants.VERTEX_COLORS] = vertex_color
+
+        self[constants.BLENDING] = api.material.blending(self.node)
+
+        self[constants.DEPTH_TEST] = api.material.depth_test(self.node)
+
+        self[constants.DEPTH_WRITE] = api.material.depth_write(self.node)
+
+    def __phong_attributes(self):
+        logger.debug('Material().__phong_attributes()')
+        specular = api.material.specular_color(self.node)
+        self[constants.SPECULAR] = utilities.rgb2int(specular)
+        self[constants.SHININESS] = api.material.specular_coef(self.node)
+
+    def __update_maps(self):
+        logger.debug('Material().__update_maps()')
+
+        mapping = (
+            (api.material.diffuse_map, constants.MAP),
+            (api.material.specular_map, constants.SPECULAR_MAP),
+            (api.material.light_map, constants.LIGHT_MAP)
+        )
+
+        for func,key in mapping:
+            map_node = func(self.node)
+            if map_node:
+                logger.info('Found map node %s for %s', map_node, key)
+                tex_inst = self.scene.texture(map_node.name)
+                self[key] = tex_inst[constants.UUID] 
+
+        if self[constants.TYPE] ==  constants.THREE_PHONG:
+            mapping = (
+                (api.material.bump_map, constants.BUMP_MAP,
+                 constants.BUMP_SCALE, api.material.bump_scale),
+                (api.material.normal_map, constants.NORMAL_MAP,
+                 constants.NORMAL_SCALE, api.material.normal_scale)
+            )
+
+            for func, map_key, scale_key, scale_func in mapping:
+                map_node = func(self.node)
+                if not map_node: continue
+                logger.info('Found map node %s for %s', map_node, map_key)
+                tex_inst = self.scene.texture(map_node.name)
+                self[map_key] = tex_inst[constants.UUID] 
+                self[scale_key] = scale_func(self.node)

+ 116 - 0
utils/exporters/blender/addons/io_three/exporter/object.py

@@ -0,0 +1,116 @@
+from .. import constants
+from . import base_classes, logger, api
+
+
+class Object(base_classes.BaseNode):
+
+    def __init__(self, node, parent=None, type=None):
+        logger.debug('Object().__init__(%s)', node)
+        base_classes.BaseNode.__init__(self, node, parent=parent, type=type)
+
+        if self.node:
+            self.__node_setup()
+        else:
+            self.__root_setup()
+
+    def __init_camera(self):
+        logger.debug('Object().__init_camera()')
+        self[constants.FAR] = api.camera.far(self.node)
+        self[constants.NEAR] = api.camera.near(self.node)
+
+        if self[constants.TYPE] == constants.PERSPECTIVE_CAMERA:
+            self[constants.ASPECT] = api.camera.aspect(self.node)
+            self[constants.FOV] = api.camera.fov(self.node)
+        elif self[constants.TYPE] == constants.ORTHOGRAPHIC_CAMERA:
+            self[constants.LEFT] = api.camera.left(self.node)
+            self[constants.RIGHT] = api.camera.right(self.node)
+            self[constants.TOP] = api.camera.top(self.node)
+            self[constants.BOTTOM] = api.camera.bottom(self.node)
+
+    #@TODO: need more light attributes. Some may have to come from
+    #       custom blender attributes.
+    def __init_light(self):
+        logger.debug('Object().__init_light()')
+        self[constants.COLOR] = api.light.color(self.node)
+        self[constants.INTENSITY] = api.light.intensity(self.node)
+
+        if self[constants.TYPE] != constants.DIRECTIONAL_LIGHT:
+            self[constants.DISTANCE] = api.light.distance(self.node)
+    
+        if self[constants.TYPE] == constants.SPOT_LIGHT:
+            self[constants.ANGLE] = api.light.angle(self.node)
+
+    def __init_mesh(self):
+        logger.debug('Object().__init_mesh()')
+        mesh = api.object.mesh(self.node, self.options)
+        node = self.scene.geometry(mesh)
+        if node:
+            self[constants.GEOMETRY] = node[constants.UUID]
+        else:
+            msg = 'Could not find Geometry() node for %s'
+            logger.error(msg, self.node)
+
+    def __node_setup(self):
+        logger.debug('Object().__node_setup()')
+        self[constants.NAME] = api.object.name(self.node)
+
+        self[constants.POSITION] = api.object.position(
+            self.node, self.options)
+
+        self[constants.ROTATION] = api.object.rotation(
+            self.node, self.options)
+
+        self[constants.SCALE] = api.object.scale(
+            self.node, self.options)
+
+        self[constants.VISIBLE] = api.object.visible(self.node)
+
+        self[constants.TYPE] = api.object.node_type(self.node)
+
+        if self.options.get(constants.MATERIALS):
+            logger.info('Parsing materials for %s', self.node)
+            material_name = api.object.material(self.node)
+            if material_name:
+                logger.info('Material found %s', material_name)
+                material_inst = self.scene.material(material_name)
+                self[constants.MATERIAL] = material_inst[constants.UUID]
+            else:
+                logger.info('%s has no materials', self.node)
+
+        casts_shadow = (constants.MESH, 
+            constants.DIRECTIONAL_LIGHT,
+            constants.SPOT_LIGHT)
+
+        if self[constants.TYPE] in casts_shadow:
+            logger.info('Querying shadow casting for %s', self.node)
+            self[constants.CAST_SHADOW] = \
+                api.object.cast_shadow(self.node)
+        
+        if self[constants.TYPE] == constants.MESH:
+            logger.info('Querying shadow receive for %s', self.node)
+            self[constants.RECEIVE_SHADOW] = \
+                api.object.receive_shadow(self.node)
+
+        camera = (constants.PERSPECTIVE_CAMERA,
+            constants.ORTHOGRAPHIC_CAMERA)
+
+        lights = (constants.AMBIENT_LIGHT, constants.DIRECTIONAL_LIGHT,
+            constants.AREA_LIGHT, constants.POINT_LIGHT, 
+            constants.SPOT_LIGHT, constants.HEMISPHERE_LIGHT)
+
+        if self[constants.TYPE] == constants.MESH:
+            self.__init_mesh()
+        elif self[constants.TYPE] in camera:
+            self.__init_camera()
+        elif self[constants.TYPE] in lights:
+            self.__init_light()
+
+        #for child in api.object.children(self.node, self.scene.valid_types):
+        #    if not self.get(constants.CHILDREN):
+        #        self[constants.CHILDREN] = [Object(child, parent=self)]
+        #    else:
+        #        self[constants.CHILDREN].append(Object(child, parent=self))
+
+    def __root_setup(self):
+        logger.debug('Object().__root_setup()')
+        self[constants.MATRIX] = [1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1]

+ 196 - 0
utils/exporters/blender/addons/io_three/exporter/scene.py

@@ -0,0 +1,196 @@
+import os
+from .. import constants
+from . import (
+    base_classes,
+    image,
+    texture,
+    material,
+    geometry, 
+    exceptions,
+    object,
+    logger,
+    io,
+    api
+)
+
+
+class Scene(base_classes.BaseScene):
+    _defaults = {
+        constants.METADATA: constants.DEFAULT_METADATA.copy(),
+        constants.GEOMETRIES: [],
+        constants.MATERIALS: [],
+        constants.IMAGES: [],
+        constants.TEXTURES: []
+    }
+
+    def __init__(self, filepath, options=None):
+        logger.debug('Scene().__init__(%s, %s)', filepath, options)
+        base_classes.BaseScene.__init__(self, filepath, options or {})
+
+        source_file = api.scene_name()
+        if source_file:
+            self[constants.METADATA][constants.SOURCE_FILE] = source_file
+
+    @property
+    def valid_types(self):
+        valid_types = [api.constants.MESH]
+
+        if self.options.get(constants.CAMERAS):
+            logger.info('Adding cameras to valid object types') 
+            valid_types.append(api.constants.CAMERA)
+
+        if self.options.get(constants.LIGHTS):
+            logger.info('Adding lights to valid object types') 
+            valid_types.append(api.constants.LAMP)
+
+        return valid_types
+
+    def geometry(self, arg):
+        logger.debug('Scene().geometry(%s)', arg)
+        return self._find_node(arg, self[constants.GEOMETRIES])
+
+    def image(self, arg):
+        logger.debug('Scene().image%s)', arg)
+        return self._find_node(arg, self[constants.IMAGES])
+
+    def material(self, arg):
+        logger.debug('Scene().material(%s)', arg)
+        return self._find_node(arg, self[constants.MATERIALS])
+
+    def parse(self):
+        logger.debug('Scene().parse()')
+        if self.options.get(constants.MAPS):
+            self.__parse_textures()
+
+        if self.options.get(constants.MATERIALS):
+            self.__parse_materials()
+
+        self.__parse_geometries()
+        self.__parse_objects()
+
+    def texture(self, arg):
+        logger.debug('Scene().texture(%s)', arg)
+        return self._find_node(arg, self[constants.TEXTURES])
+
+    def write(self):
+        logger.debug('Scene().write()')
+        data = {}
+        
+        embed_anim = self.options.get(constants.EMBED_ANIMATION, True)
+        embed = self.options[constants.EMBED_GEOMETRY]
+
+        compression = self.options.get(constants.COMPRESSION)
+        extension = constants.EXTENSIONS.get(compression, 
+            constants.EXTENSIONS[constants.JSON])
+
+        #@TODO: test this new logic
+        export_dir = os.path.dirname(self.filepath)
+        for key, value in self.items():
+            
+            if key == constants.GEOMETRIES:
+                geometries = []
+                for geometry in value:
+
+                    if not embed_anim:
+                        geometry.write_animation(export_dir)
+
+                    if embed:
+                        for each in value:
+                            geometries.append(each.copy())
+                        continue
+
+                    geom_data = geometry.copy()
+
+                    geo_type = geom_data[constants.TYPE].lower()
+                    if geo_type == constants.GEOMETRY.lower():
+                        geom_data.pop(constants.DATA)
+                    elif geo_type == constants.BUFFER_GEOMETRY.lower():
+                        geom_data.pop(constants.ATTRIBUTES)
+                        geom_data.pop(constants.METADATA)
+
+                    url = 'geometry.%s%s' % (geometry.node, extension)
+                    geometry_file = os.path.join(export_dir, url)
+
+                    geometry.write(filepath=geometry_file)
+                    geom_data[constants.URL] = os.path.basename(url)
+
+                    geometries.append(geom_data)
+
+                data[key] = geometries
+            elif isinstance(value, list):
+                data[key] = []
+                for each in value:
+                    data[key].append(each.copy())
+            elif isinstance(value, dict):
+                data[key] = value.copy()
+
+        io.dump(self.filepath, data, options=self.options)
+
+        if self.options.get(constants.COPY_TEXTURES):
+            for geo in self[constants.GEOMETRIES]:
+                logger.info('Copying textures from %s', geo.node)
+                geo.copy_textures()
+
+    def _find_node(self, arg, manifest):
+        for index in manifest:
+            uuid = index.get(constants.UUID) == arg
+            name = index.node == arg
+            if uuid or name:
+                return index
+        else:
+            logger.debug('No matching node for %s', arg)
+
+    def __parse_geometries(self):
+        logger.debug('Scene().__parse_geometries()')
+
+        # this is an important step. please refer to the doc string
+        # on the function for more information
+        api.object.prep_meshes(self.options)
+        geometries = []
+
+        # now iterate over all the extracted mesh nodes and parse each one
+        for mesh in api.object.extracted_meshes():
+            logger.info('Parsing geometry %s', mesh)
+            geo = geometry.Geometry(mesh, self)
+            geo.parse()
+            geometries.append(geo)
+
+        logger.info('Added %d geometry nodes', len(geometries))
+        self[constants.GEOMETRIES] = geometries
+
+    def __parse_materials(self):
+        logger.debug('Scene().__parse_materials()')
+        materials = []
+
+        for material_name in api.material.used_materials():
+            logger.info('Parsing material %s', material_name)
+            materials.append(material.Material(material_name, parent=self)) 
+
+        logger.info('Added %d material nodes', len(materials))
+        self[constants.MATERIALS] = materials
+
+    def __parse_objects(self): 
+        logger.debug('Scene().__parse_objects()')
+        self[constants.OBJECT] = object.Object(None, parent=self)
+        self[constants.OBJECT][constants.TYPE] = constants.SCENE.title()
+
+        objects = [] 
+        for node in api.object.nodes(self.valid_types, self.options):
+            logger.info('Parsing object %s', node)
+            obj = object.Object(node, parent=self[constants.OBJECT])
+            objects.append(obj)
+
+        logger.info('Added %d object nodes', len(objects))
+        self[constants.OBJECT][constants.CHILDREN] = objects
+
+    def __parse_textures(self):
+        logger.debug('Scene().__parse_textures()')
+        textures = []
+
+        for texture_name in api.texture.textures():
+            logger.info('Parsing texture %s', texture_name)
+            tex_inst = texture.Texture(texture_name, self)
+            textures.append(tex_inst)
+
+        logger.info('Added %d texture nodes', len(textures))
+        self[constants.TEXTURES] = textures

+ 32 - 0
utils/exporters/blender/addons/io_three/exporter/texture.py

@@ -0,0 +1,32 @@
+from .. import constants
+from . import base_classes, image, api, logger
+
+
+class Texture(base_classes.BaseNode):
+    def __init__(self, node, parent):
+        logger.debug('Texture().__init__(%s)', node)
+        base_classes.BaseNode.__init__(self, node, parent, constants.TEXTURE)
+
+        img_inst = self.scene.image(api.texture.file_name(self.node))
+
+        if not img_inst:
+            image_node = api.texture.image_node(self.node)
+            img_inst = image.Image(image_node.name, self.scene)
+            self.scene[constants.IMAGES].append(img_inst)
+
+        self[constants.IMAGE] = img_inst[constants.UUID]
+
+        self[constants.WRAP] = api.texture.wrap(self.node)
+
+        if constants.WRAPPING.REPEAT in self[constants.WRAP]:
+            self[constants.REPEAT] = api.texture.repeat(self.node)
+
+        self[constants.ANISOTROPY] = api.texture.anisotropy(self.node)
+        self[constants.MAG_FILTER] = api.texture.mag_filter(self.node)
+        self[constants.MIN_FILTER] = api.texture.min_filter(self.node)
+        self[constants.MAPPING] = api.texture.mapping(self.node)
+
+
+    @property
+    def image(self):
+        return self.scene.image(self[constants.IMAGE])

+ 64 - 0
utils/exporters/blender/addons/io_three/exporter/utilities.py

@@ -0,0 +1,64 @@
+import uuid
+import hashlib
+
+from .. import constants
+
+
+ROUND = constants.DEFAULT_PRECISION
+
+
+def bit_mask(flags):
+    bit = 0
+    true = lambda x,y: (x | (1 << y))
+    false = lambda x,y: (x & (~(1 << y)))
+
+    for mask, position in constants.MASK.items():
+        func = true if flags.get(mask) else false
+        bit = func(bit, position)
+
+    return bit
+
+
+def hash(value):
+    hash_ = hashlib.md5()
+    hash_.update(repr(value).encode('utf8'))
+    return hash_.hexdigest()
+
+
+def id():
+    return str(uuid.uuid4()).upper()
+
+
+def rgb2int(rgb):
+    is_tuple = isinstance(rgb, tuple)
+    rgb = list(rgb) if is_tuple else rgb
+
+    colour = (int(rgb[0]*255) << 16) + (int(rgb[1]*255) << 8) + int(rgb[2]*255)
+    return colour
+
+
+def round_off(value, ndigits=ROUND):
+    is_tuple = isinstance(value, tuple)
+    is_list = isinstance(value, list)
+
+    value = list(value) if is_tuple else value
+    value = [value] if not is_list and not is_tuple else value
+
+    value = [round(val, ndigits) for val in value]
+
+    if is_tuple:
+        value = tuple(value)
+    elif not is_list:
+        value = value[0]
+
+    return value
+
+
+def rounding(options):
+    round_off = options.get(constants.ENABLE_PRECISION)
+    if round_off:
+        round_val = options[constants.PRECISION]
+    else:
+        round_val = None
+
+    return (round_off, round_val)

+ 2 - 0
utils/exporters/blender/modules/README.md

@@ -0,0 +1,2 @@
+## mspgack
+https://github.com/msgpack/msgpack-python

+ 54 - 0
utils/exporters/blender/modules/msgpack/__init__.py

@@ -0,0 +1,54 @@
+# coding: utf-8
+from msgpack._version import version
+from msgpack.exceptions import *
+
+from collections import namedtuple
+
+
+class ExtType(namedtuple('ExtType', 'code data')):
+    """ExtType represents ext type in msgpack."""
+    def __new__(cls, code, data):
+        if not isinstance(code, int):
+            raise TypeError("code must be int")
+        if not isinstance(data, bytes):
+            raise TypeError("data must be bytes")
+        if not 0 <= code <= 127:
+            raise ValueError("code must be 0~127")
+        return super(ExtType, cls).__new__(cls, code, data)
+
+
+import os
+if os.environ.get('MSGPACK_PUREPYTHON'):
+    from msgpack.fallback import Packer, unpack, unpackb, Unpacker
+else:
+    try:
+        from msgpack._packer import Packer
+        from msgpack._unpacker import unpack, unpackb, Unpacker
+    except ImportError:
+        from msgpack.fallback import Packer, unpack, unpackb, Unpacker
+
+
+def pack(o, stream, **kwargs):
+    """
+    Pack object `o` and write it to `stream`
+
+    See :class:`Packer` for options.
+    """
+    packer = Packer(**kwargs)
+    stream.write(packer.pack(o))
+
+
+def packb(o, **kwargs):
+    """
+    Pack object `o` and return packed bytes
+
+    See :class:`Packer` for options.
+    """
+    return Packer(**kwargs).pack(o)
+
+# alias for compatibility to simplejson/marshal/pickle.
+load = unpack
+loads = unpackb
+
+dump = pack
+dumps = packb

BIN
utils/exporters/blender/modules/msgpack/__pycache__/__init__.cpython-34.pyc


BIN
utils/exporters/blender/modules/msgpack/__pycache__/_version.cpython-34.pyc


BIN
utils/exporters/blender/modules/msgpack/__pycache__/exceptions.cpython-34.pyc


BIN
utils/exporters/blender/modules/msgpack/__pycache__/fallback.cpython-34.pyc


+ 295 - 0
utils/exporters/blender/modules/msgpack/_packer.pyx

@@ -0,0 +1,295 @@
+# coding: utf-8
+#cython: embedsignature=True
+
+from cpython cimport *
+from libc.stdlib cimport *
+from libc.string cimport *
+from libc.limits cimport *
+from libc.stdint cimport int8_t
+
+from msgpack.exceptions import PackValueError
+from msgpack import ExtType
+
+
+cdef extern from "pack.h":
+    struct msgpack_packer:
+        char* buf
+        size_t length
+        size_t buf_size
+        bint use_bin_type
+
+    int msgpack_pack_int(msgpack_packer* pk, int d)
+    int msgpack_pack_nil(msgpack_packer* pk)
+    int msgpack_pack_true(msgpack_packer* pk)
+    int msgpack_pack_false(msgpack_packer* pk)
+    int msgpack_pack_long(msgpack_packer* pk, long d)
+    int msgpack_pack_long_long(msgpack_packer* pk, long long d)
+    int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d)
+    int msgpack_pack_float(msgpack_packer* pk, float d)
+    int msgpack_pack_double(msgpack_packer* pk, double d)
+    int msgpack_pack_array(msgpack_packer* pk, size_t l)
+    int msgpack_pack_map(msgpack_packer* pk, size_t l)
+    int msgpack_pack_raw(msgpack_packer* pk, size_t l)
+    int msgpack_pack_bin(msgpack_packer* pk, size_t l)
+    int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l)
+    int msgpack_pack_ext(msgpack_packer* pk, int8_t typecode, size_t l)
+
+cdef int DEFAULT_RECURSE_LIMIT=511
+
+
+cdef class Packer(object):
+    """
+    MessagePack Packer
+
+    usage::
+
+        packer = Packer()
+        astream.write(packer.pack(a))
+        astream.write(packer.pack(b))
+
+    Packer's constructor has some keyword arguments:
+
+    :param callable default:
+        Convert user type to builtin type that Packer supports.
+        See also simplejson's document.
+    :param str encoding:
+        Convert unicode to bytes with this encoding. (default: 'utf-8')
+    :param str unicode_errors:
+        Error handler for encoding unicode. (default: 'strict')
+    :param bool use_single_float:
+        Use single precision float type for float. (default: False)
+    :param bool autoreset:
+        Reset buffer after each pack and return it's content as `bytes`. (default: True).
+        If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
+    :param bool use_bin_type:
+        Use bin type introduced in msgpack spec 2.0 for bytes.
+        It also enable str8 type for unicode.
+    """
+    cdef msgpack_packer pk
+    cdef object _default
+    cdef object _bencoding
+    cdef object _berrors
+    cdef char *encoding
+    cdef char *unicode_errors
+    cdef bool use_float
+    cdef bint autoreset
+
+    def __cinit__(self):
+        cdef int buf_size = 1024*1024
+        self.pk.buf = <char*> malloc(buf_size);
+        if self.pk.buf == NULL:
+            raise MemoryError("Unable to allocate internal buffer.")
+        self.pk.buf_size = buf_size
+        self.pk.length = 0
+
+    def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
+                 use_single_float=False, bint autoreset=1, bint use_bin_type=0):
+        """
+        """
+        self.use_float = use_single_float
+        self.autoreset = autoreset
+        self.pk.use_bin_type = use_bin_type
+        if default is not None:
+            if not PyCallable_Check(default):
+                raise TypeError("default must be a callable.")
+        self._default = default
+        if encoding is None:
+            self.encoding = NULL
+            self.unicode_errors = NULL
+        else:
+            if isinstance(encoding, unicode):
+                self._bencoding = encoding.encode('ascii')
+            else:
+                self._bencoding = encoding
+            self.encoding = PyBytes_AsString(self._bencoding)
+            if isinstance(unicode_errors, unicode):
+                self._berrors = unicode_errors.encode('ascii')
+            else:
+                self._berrors = unicode_errors
+            self.unicode_errors = PyBytes_AsString(self._berrors)
+
+    def __dealloc__(self):
+        free(self.pk.buf);
+
+    cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
+        cdef long long llval
+        cdef unsigned long long ullval
+        cdef long longval
+        cdef float fval
+        cdef double dval
+        cdef char* rawval
+        cdef int ret
+        cdef dict d
+        cdef size_t L
+        cdef int default_used = 0
+
+        if nest_limit < 0:
+            raise PackValueError("recursion limit exceeded.")
+
+        while True:
+            if o is None:
+                ret = msgpack_pack_nil(&self.pk)
+            elif isinstance(o, bool):
+                if o:
+                    ret = msgpack_pack_true(&self.pk)
+                else:
+                    ret = msgpack_pack_false(&self.pk)
+            elif PyLong_Check(o):
+                # PyInt_Check(long) is True for Python 3.
+                # Sow we should test long before int.
+                if o > 0:
+                    ullval = o
+                    ret = msgpack_pack_unsigned_long_long(&self.pk, ullval)
+                else:
+                    llval = o
+                    ret = msgpack_pack_long_long(&self.pk, llval)
+            elif PyInt_Check(o):
+                longval = o
+                ret = msgpack_pack_long(&self.pk, longval)
+            elif PyFloat_Check(o):
+                if self.use_float:
+                   fval = o
+                   ret = msgpack_pack_float(&self.pk, fval)
+                else:
+                   dval = o
+                   ret = msgpack_pack_double(&self.pk, dval)
+            elif PyBytes_Check(o):
+                L = len(o)
+                if L > (2**32)-1:
+                    raise ValueError("bytes is too large")
+                rawval = o
+                ret = msgpack_pack_bin(&self.pk, L)
+                if ret == 0:
+                    ret = msgpack_pack_raw_body(&self.pk, rawval, L)
+            elif PyUnicode_Check(o):
+                if not self.encoding:
+                    raise TypeError("Can't encode unicode string: no encoding is specified")
+                o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+                L = len(o)
+                if L > (2**32)-1:
+                    raise ValueError("dict is too large")
+                rawval = o
+                ret = msgpack_pack_raw(&self.pk, len(o))
+                if ret == 0:
+                    ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+            elif PyDict_CheckExact(o):
+                d = <dict>o
+                L = len(d)
+                if L > (2**32)-1:
+                    raise ValueError("dict is too large")
+                ret = msgpack_pack_map(&self.pk, L)
+                if ret == 0:
+                    for k, v in d.iteritems():
+                        ret = self._pack(k, nest_limit-1)
+                        if ret != 0: break
+                        ret = self._pack(v, nest_limit-1)
+                        if ret != 0: break
+            elif PyDict_Check(o):
+                L = len(o)
+                if L > (2**32)-1:
+                    raise ValueError("dict is too large")
+                ret = msgpack_pack_map(&self.pk, L)
+                if ret == 0:
+                    for k, v in o.items():
+                        ret = self._pack(k, nest_limit-1)
+                        if ret != 0: break
+                        ret = self._pack(v, nest_limit-1)
+                        if ret != 0: break
+            elif isinstance(o, ExtType):
+                # This should be before Tuple because ExtType is namedtuple.
+                longval = o.code
+                rawval = o.data
+                L = len(o.data)
+                if L > (2**32)-1:
+                    raise ValueError("EXT data is too large")
+                ret = msgpack_pack_ext(&self.pk, longval, L)
+                ret = msgpack_pack_raw_body(&self.pk, rawval, L)
+            elif PyTuple_Check(o) or PyList_Check(o):
+                L = len(o)
+                if L > (2**32)-1:
+                    raise ValueError("list is too large")
+                ret = msgpack_pack_array(&self.pk, L)
+                if ret == 0:
+                    for v in o:
+                        ret = self._pack(v, nest_limit-1)
+                        if ret != 0: break
+            elif not default_used and self._default:
+                o = self._default(o)
+                default_used = 1
+                continue
+            else:
+                raise TypeError("can't serialize %r" % (o,))
+            return ret
+
+    cpdef pack(self, object obj):
+        cdef int ret
+        ret = self._pack(obj, DEFAULT_RECURSE_LIMIT)
+        if ret == -1:
+            raise MemoryError
+        elif ret:  # should not happen.
+            raise TypeError
+        if self.autoreset:
+            buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+            self.pk.length = 0
+            return buf
+
+    def pack_ext_type(self, typecode, data):
+        msgpack_pack_ext(&self.pk, typecode, len(data))
+        msgpack_pack_raw_body(&self.pk, data, len(data))
+
+    def pack_array_header(self, size_t size):
+        if size > (2**32-1):
+            raise ValueError
+        cdef int ret = msgpack_pack_array(&self.pk, size)
+        if ret == -1:
+            raise MemoryError
+        elif ret:  # should not happen
+            raise TypeError
+        if self.autoreset:
+            buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+            self.pk.length = 0
+            return buf
+
+    def pack_map_header(self, size_t size):
+        if size > (2**32-1):
+            raise ValueError
+        cdef int ret = msgpack_pack_map(&self.pk, size)
+        if ret == -1:
+            raise MemoryError
+        elif ret:  # should not happen
+            raise TypeError
+        if self.autoreset:
+            buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+            self.pk.length = 0
+            return buf
+
+    def pack_map_pairs(self, object pairs):
+        """
+        Pack *pairs* as msgpack map type.
+
+        *pairs* should sequence of pair.
+        (`len(pairs)` and `for k, v in pairs:` should be supported.)
+        """
+        cdef int ret = msgpack_pack_map(&self.pk, len(pairs))
+        if ret == 0:
+            for k, v in pairs:
+                ret = self._pack(k)
+                if ret != 0: break
+                ret = self._pack(v)
+                if ret != 0: break
+        if ret == -1:
+            raise MemoryError
+        elif ret:  # should not happen
+            raise TypeError
+        if self.autoreset:
+            buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+            self.pk.length = 0
+            return buf
+
+    def reset(self):
+        """Clear internal buffer."""
+        self.pk.length = 0
+
+    def bytes(self):
+        """Return buffer content."""
+        return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)

+ 426 - 0
utils/exporters/blender/modules/msgpack/_unpacker.pyx

@@ -0,0 +1,426 @@
+# coding: utf-8
+#cython: embedsignature=True
+
+from cpython cimport *
+cdef extern from "Python.h":
+    ctypedef struct PyObject
+    cdef int PyObject_AsReadBuffer(object o, const void** buff, Py_ssize_t* buf_len) except -1
+
+from libc.stdlib cimport *
+from libc.string cimport *
+from libc.limits cimport *
+
+from msgpack.exceptions import (
+        BufferFull,
+        OutOfData,
+        UnpackValueError,
+        ExtraData,
+        )
+from msgpack import ExtType
+
+
+cdef extern from "unpack.h":
+    ctypedef struct msgpack_user:
+        bint use_list
+        PyObject* object_hook
+        bint has_pairs_hook # call object_hook with k-v pairs
+        PyObject* list_hook
+        PyObject* ext_hook
+        char *encoding
+        char *unicode_errors
+
+    ctypedef struct unpack_context:
+        msgpack_user user
+        PyObject* obj
+        size_t count
+
+    ctypedef int (*execute_fn)(unpack_context* ctx, const char* data,
+                               size_t len, size_t* off) except? -1
+    execute_fn unpack_construct
+    execute_fn unpack_skip
+    execute_fn read_array_header
+    execute_fn read_map_header
+    void unpack_init(unpack_context* ctx)
+    object unpack_data(unpack_context* ctx)
+
+cdef inline init_ctx(unpack_context *ctx,
+                     object object_hook, object object_pairs_hook,
+                     object list_hook, object ext_hook,
+                     bint use_list, char* encoding, char* unicode_errors):
+    unpack_init(ctx)
+    ctx.user.use_list = use_list
+    ctx.user.object_hook = ctx.user.list_hook = <PyObject*>NULL
+
+    if object_hook is not None and object_pairs_hook is not None:
+        raise TypeError("object_pairs_hook and object_hook are mutually exclusive.")
+
+    if object_hook is not None:
+        if not PyCallable_Check(object_hook):
+            raise TypeError("object_hook must be a callable.")
+        ctx.user.object_hook = <PyObject*>object_hook
+
+    if object_pairs_hook is None:
+        ctx.user.has_pairs_hook = False
+    else:
+        if not PyCallable_Check(object_pairs_hook):
+            raise TypeError("object_pairs_hook must be a callable.")
+        ctx.user.object_hook = <PyObject*>object_pairs_hook
+        ctx.user.has_pairs_hook = True
+
+    if list_hook is not None:
+        if not PyCallable_Check(list_hook):
+            raise TypeError("list_hook must be a callable.")
+        ctx.user.list_hook = <PyObject*>list_hook
+
+    if ext_hook is not None:
+        if not PyCallable_Check(ext_hook):
+            raise TypeError("ext_hook must be a callable.")
+        ctx.user.ext_hook = <PyObject*>ext_hook
+
+    ctx.user.encoding = encoding
+    ctx.user.unicode_errors = unicode_errors
+
+def default_read_extended_type(typecode, data):
+    raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode)
+
+def unpackb(object packed, object object_hook=None, object list_hook=None,
+            bint use_list=1, encoding=None, unicode_errors="strict",
+            object_pairs_hook=None, ext_hook=ExtType):
+    """
+    Unpack packed_bytes to object. Returns an unpacked object.
+
+    Raises `ValueError` when `packed` contains extra bytes.
+
+    See :class:`Unpacker` for options.
+    """
+    cdef unpack_context ctx
+    cdef size_t off = 0
+    cdef int ret
+
+    cdef char* buf
+    cdef Py_ssize_t buf_len
+    cdef char* cenc = NULL
+    cdef char* cerr = NULL
+
+    PyObject_AsReadBuffer(packed, <const void**>&buf, &buf_len)
+
+    if encoding is not None:
+        if isinstance(encoding, unicode):
+            encoding = encoding.encode('ascii')
+        cenc = PyBytes_AsString(encoding)
+
+    if unicode_errors is not None:
+        if isinstance(unicode_errors, unicode):
+            unicode_errors = unicode_errors.encode('ascii')
+        cerr = PyBytes_AsString(unicode_errors)
+
+    init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook,
+             use_list, cenc, cerr)
+    ret = unpack_construct(&ctx, buf, buf_len, &off)
+    if ret == 1:
+        obj = unpack_data(&ctx)
+        if off < buf_len:
+            raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off))
+        return obj
+    else:
+        raise UnpackValueError("Unpack failed: error = %d" % (ret,))
+
+
+def unpack(object stream, object object_hook=None, object list_hook=None,
+           bint use_list=1, encoding=None, unicode_errors="strict",
+           object_pairs_hook=None,
+           ):
+    """
+    Unpack an object from `stream`.
+
+    Raises `ValueError` when `stream` has extra bytes.
+
+    See :class:`Unpacker` for options.
+    """
+    return unpackb(stream.read(), use_list=use_list,
+                   object_hook=object_hook, object_pairs_hook=object_pairs_hook, list_hook=list_hook,
+                   encoding=encoding, unicode_errors=unicode_errors,
+                   )
+
+
+cdef class Unpacker(object):
+    """
+    Streaming unpacker.
+
+    arguments:
+
+    :param file_like:
+        File-like object having `.read(n)` method.
+        If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable.
+
+    :param int read_size:
+        Used as `file_like.read(read_size)`. (default: `min(1024**2, max_buffer_size)`)
+
+    :param bool use_list:
+        If true, unpack msgpack array to Python list.
+        Otherwise, unpack to Python tuple. (default: True)
+
+    :param callable object_hook:
+        When specified, it should be callable.
+        Unpacker calls it with a dict argument after unpacking msgpack map.
+        (See also simplejson)
+
+    :param callable object_pairs_hook:
+        When specified, it should be callable.
+        Unpacker calls it with a list of key-value pairs after unpacking msgpack map.
+        (See also simplejson)
+
+    :param str encoding:
+        Encoding used for decoding msgpack raw.
+        If it is None (default), msgpack raw is deserialized to Python bytes.
+
+    :param str unicode_errors:
+        Used for decoding msgpack raw with *encoding*.
+        (default: `'strict'`)
+
+    :param int max_buffer_size:
+        Limits size of data waiting unpacked.  0 means system's INT_MAX (default).
+        Raises `BufferFull` exception when it is insufficient.
+        You shoud set this parameter when unpacking data from untrasted source.
+
+    example of streaming deserialize from file-like object::
+
+        unpacker = Unpacker(file_like)
+        for o in unpacker:
+            process(o)
+
+    example of streaming deserialize from socket::
+
+        unpacker = Unpacker()
+        while True:
+            buf = sock.recv(1024**2)
+            if not buf:
+                break
+            unpacker.feed(buf)
+            for o in unpacker:
+                process(o)
+    """
+    cdef unpack_context ctx
+    cdef char* buf
+    cdef size_t buf_size, buf_head, buf_tail
+    cdef object file_like
+    cdef object file_like_read
+    cdef Py_ssize_t read_size
+    # To maintain refcnt.
+    cdef object object_hook, object_pairs_hook, list_hook, ext_hook
+    cdef object encoding, unicode_errors
+    cdef size_t max_buffer_size
+
+    def __cinit__(self):
+        self.buf = NULL
+
+    def __dealloc__(self):
+        free(self.buf)
+        self.buf = NULL
+
+    def __init__(self, file_like=None, Py_ssize_t read_size=0, bint use_list=1,
+                 object object_hook=None, object object_pairs_hook=None, object list_hook=None,
+                 str encoding=None, str unicode_errors='strict', int max_buffer_size=0,
+                 object ext_hook=ExtType):
+        cdef char *cenc=NULL,
+        cdef char *cerr=NULL
+
+        self.object_hook = object_hook
+        self.object_pairs_hook = object_pairs_hook
+        self.list_hook = list_hook
+        self.ext_hook = ext_hook
+
+        self.file_like = file_like
+        if file_like:
+            self.file_like_read = file_like.read
+            if not PyCallable_Check(self.file_like_read):
+                raise TypeError("`file_like.read` must be a callable.")
+        if not max_buffer_size:
+            max_buffer_size = INT_MAX
+        if read_size > max_buffer_size:
+            raise ValueError("read_size should be less or equal to max_buffer_size")
+        if not read_size:
+            read_size = min(max_buffer_size, 1024**2)
+        self.max_buffer_size = max_buffer_size
+        self.read_size = read_size
+        self.buf = <char*>malloc(read_size)
+        if self.buf == NULL:
+            raise MemoryError("Unable to allocate internal buffer.")
+        self.buf_size = read_size
+        self.buf_head = 0
+        self.buf_tail = 0
+
+        if encoding is not None:
+            if isinstance(encoding, unicode):
+                self.encoding = encoding.encode('ascii')
+            else:
+                self.encoding = encoding
+            cenc = PyBytes_AsString(self.encoding)
+
+        if unicode_errors is not None:
+            if isinstance(unicode_errors, unicode):
+                self.unicode_errors = unicode_errors.encode('ascii')
+            else:
+                self.unicode_errors = unicode_errors
+            cerr = PyBytes_AsString(self.unicode_errors)
+
+        init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook,
+                 ext_hook, use_list, cenc, cerr)
+
+    def feed(self, object next_bytes):
+        """Append `next_bytes` to internal buffer."""
+        cdef Py_buffer pybuff
+        if self.file_like is not None:
+            raise AssertionError(
+                    "unpacker.feed() is not be able to use with `file_like`.")
+        PyObject_GetBuffer(next_bytes, &pybuff, PyBUF_SIMPLE)
+        try:
+            self.append_buffer(<char*>pybuff.buf, pybuff.len)
+        finally:
+            PyBuffer_Release(&pybuff)
+
+    cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len):
+        cdef:
+            char* buf = self.buf
+            char* new_buf
+            size_t head = self.buf_head
+            size_t tail = self.buf_tail
+            size_t buf_size = self.buf_size
+            size_t new_size
+
+        if tail + _buf_len > buf_size:
+            if ((tail - head) + _buf_len) <= buf_size:
+                # move to front.
+                memmove(buf, buf + head, tail - head)
+                tail -= head
+                head = 0
+            else:
+                # expand buffer.
+                new_size = (tail-head) + _buf_len
+                if new_size > self.max_buffer_size:
+                    raise BufferFull
+                new_size = min(new_size*2, self.max_buffer_size)
+                new_buf = <char*>malloc(new_size)
+                if new_buf == NULL:
+                    # self.buf still holds old buffer and will be freed during
+                    # obj destruction
+                    raise MemoryError("Unable to enlarge internal buffer.")
+                memcpy(new_buf, buf + head, tail - head)
+                free(buf)
+
+                buf = new_buf
+                buf_size = new_size
+                tail -= head
+                head = 0
+
+        memcpy(buf + tail, <char*>(_buf), _buf_len)
+        self.buf = buf
+        self.buf_head = head
+        self.buf_size = buf_size
+        self.buf_tail = tail + _buf_len
+
+    cdef read_from_file(self):
+        next_bytes = self.file_like_read(
+                min(self.read_size,
+                    self.max_buffer_size - (self.buf_tail - self.buf_head)
+                    ))
+        if next_bytes:
+            self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes))
+        else:
+            self.file_like = None
+
+    cdef object _unpack(self, execute_fn execute, object write_bytes, bint iter=0):
+        cdef int ret
+        cdef object obj
+        cdef size_t prev_head
+
+        if self.buf_head >= self.buf_tail and self.file_like is not None:
+            self.read_from_file()
+
+        while 1:
+            prev_head = self.buf_head
+            if prev_head >= self.buf_tail:
+                if iter:
+                    raise StopIteration("No more data to unpack.")
+                else:
+                    raise OutOfData("No more data to unpack.")
+
+            ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
+            if write_bytes is not None:
+                write_bytes(PyBytes_FromStringAndSize(self.buf + prev_head, self.buf_head - prev_head))
+
+            if ret == 1:
+                obj = unpack_data(&self.ctx)
+                unpack_init(&self.ctx)
+                return obj
+            elif ret == 0:
+                if self.file_like is not None:
+                    self.read_from_file()
+                    continue
+                if iter:
+                    raise StopIteration("No more data to unpack.")
+                else:
+                    raise OutOfData("No more data to unpack.")
+            else:
+                raise ValueError("Unpack failed: error = %d" % (ret,))
+
+    def read_bytes(self, Py_ssize_t nbytes):
+        """read a specified number of raw bytes from the stream"""
+        cdef size_t nread
+        nread = min(self.buf_tail - self.buf_head, nbytes)
+        ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread)
+        self.buf_head += nread
+        if len(ret) < nbytes and self.file_like is not None:
+            ret += self.file_like.read(nbytes - len(ret))
+        return ret
+
+    def unpack(self, object write_bytes=None):
+        """
+        unpack one object
+
+        If write_bytes is not None, it will be called with parts of the raw
+        message as it is unpacked.
+
+        Raises `OutOfData` when there are no more bytes to unpack.
+        """
+        return self._unpack(unpack_construct, write_bytes)
+
+    def skip(self, object write_bytes=None):
+        """
+        read and ignore one object, returning None
+
+        If write_bytes is not None, it will be called with parts of the raw
+        message as it is unpacked.
+
+        Raises `OutOfData` when there are no more bytes to unpack.
+        """
+        return self._unpack(unpack_skip, write_bytes)
+
+    def read_array_header(self, object write_bytes=None):
+        """assuming the next object is an array, return its size n, such that
+        the next n unpack() calls will iterate over its contents.
+
+        Raises `OutOfData` when there are no more bytes to unpack.
+        """
+        return self._unpack(read_array_header, write_bytes)
+
+    def read_map_header(self, object write_bytes=None):
+        """assuming the next object is a map, return its size n, such that the
+        next n * 2 unpack() calls will iterate over its key-value pairs.
+
+        Raises `OutOfData` when there are no more bytes to unpack.
+        """
+        return self._unpack(read_map_header, write_bytes)
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        return self._unpack(unpack_construct, None, 1)
+
+    # for debug.
+    #def _buf(self):
+    #    return PyString_FromStringAndSize(self.buf, self.buf_tail)
+
+    #def _off(self):
+    #    return self.buf_head

+ 1 - 0
utils/exporters/blender/modules/msgpack/_version.py

@@ -0,0 +1 @@
+version = (0, 4, 2)

+ 29 - 0
utils/exporters/blender/modules/msgpack/exceptions.py

@@ -0,0 +1,29 @@
+class UnpackException(Exception):
+    pass
+
+
+class BufferFull(UnpackException):
+    pass
+
+
+class OutOfData(UnpackException):
+    pass
+
+
+class UnpackValueError(UnpackException, ValueError):
+    pass
+
+
+class ExtraData(ValueError):
+    def __init__(self, unpacked, extra):
+        self.unpacked = unpacked
+        self.extra = extra
+
+    def __str__(self):
+        return "unpack(b) received extra data."
+
+class PackException(Exception):
+    pass
+
+class PackValueError(PackException, ValueError):
+    pass

+ 714 - 0
utils/exporters/blender/modules/msgpack/fallback.py

@@ -0,0 +1,714 @@
+"""Fallback pure Python implementation of msgpack"""
+
+import sys
+import array
+import struct
+
+if sys.version_info[0] == 3:
+    PY3 = True
+    int_types = int
+    Unicode = str
+    xrange = range
+    def dict_iteritems(d):
+        return d.items()
+else:
+    PY3 = False
+    int_types = (int, long)
+    Unicode = unicode
+    def dict_iteritems(d):
+        return d.iteritems()
+
+
+if hasattr(sys, 'pypy_version_info'):
+    # cStringIO is slow on PyPy, StringIO is faster.  However: PyPy's own
+    # StringBuilder is fastest.
+    from __pypy__ import newlist_hint
+    from __pypy__.builders import StringBuilder
+    USING_STRINGBUILDER = True
+    class StringIO(object):
+        def __init__(self, s=b''):
+            if s:
+                self.builder = StringBuilder(len(s))
+                self.builder.append(s)
+            else:
+                self.builder = StringBuilder()
+        def write(self, s):
+            self.builder.append(s)
+        def getvalue(self):
+            return self.builder.build()
+else:
+    USING_STRINGBUILDER = False
+    from io import BytesIO as StringIO
+    newlist_hint = lambda size: []
+
+from msgpack.exceptions import (
+    BufferFull,
+    OutOfData,
+    UnpackValueError,
+    PackValueError,
+    ExtraData)
+
+from msgpack import ExtType
+
+
+EX_SKIP                 = 0
+EX_CONSTRUCT            = 1
+EX_READ_ARRAY_HEADER    = 2
+EX_READ_MAP_HEADER      = 3
+
+TYPE_IMMEDIATE          = 0
+TYPE_ARRAY              = 1
+TYPE_MAP                = 2
+TYPE_RAW                = 3
+TYPE_BIN                = 4
+TYPE_EXT                = 5
+
+DEFAULT_RECURSE_LIMIT = 511
+
+
+def unpack(stream, **kwargs):
+    """
+    Unpack an object from `stream`.
+
+    Raises `ExtraData` when `packed` contains extra bytes.
+    See :class:`Unpacker` for options.
+    """
+    unpacker = Unpacker(stream, **kwargs)
+    ret = unpacker._fb_unpack()
+    if unpacker._fb_got_extradata():
+        raise ExtraData(ret, unpacker._fb_get_extradata())
+    return ret
+
+
+def unpackb(packed, **kwargs):
+    """
+    Unpack an object from `packed`.
+
+    Raises `ExtraData` when `packed` contains extra bytes.
+    See :class:`Unpacker` for options.
+    """
+    unpacker = Unpacker(None, **kwargs)
+    unpacker.feed(packed)
+    try:
+        ret = unpacker._fb_unpack()
+    except OutOfData:
+        raise UnpackValueError("Data is not enough.")
+    if unpacker._fb_got_extradata():
+        raise ExtraData(ret, unpacker._fb_get_extradata())
+    return ret
+
+
+class Unpacker(object):
+    """
+    Streaming unpacker.
+
+    `file_like` is a file-like object having a `.read(n)` method.
+    When `Unpacker` is initialized with a `file_like`, `.feed()` is not
+    usable.
+
+    `read_size` is used for `file_like.read(read_size)`.
+
+    If `use_list` is True (default), msgpack lists are deserialized to Python
+    lists.  Otherwise they are deserialized to tuples.
+
+    `object_hook` is the same as in simplejson.  If it is not None, it should
+    be callable and Unpacker calls it with a dict argument after deserializing
+    a map.
+
+    `object_pairs_hook` is the same as in simplejson.  If it is not None, it
+    should be callable and Unpacker calls it with a list of key-value pairs
+    after deserializing a map.
+
+    `ext_hook` is callback for ext (User defined) type. It called with two
+    arguments: (code, bytes). default: `msgpack.ExtType`
+
+    `encoding` is the encoding used for decoding msgpack bytes.  If it is
+    None (default), msgpack bytes are deserialized to Python bytes.
+
+    `unicode_errors` is used for decoding bytes.
+
+    `max_buffer_size` limits the buffer size.  0 means INT_MAX (default).
+
+    Raises `BufferFull` exception when it is unsufficient.
+
+    You should set this parameter when unpacking data from an untrustred source.
+
+    example of streaming deserialization from file-like object::
+
+        unpacker = Unpacker(file_like)
+        for o in unpacker:
+            do_something(o)
+
+    example of streaming deserialization from socket::
+
+        unpacker = Unpacker()
+        while 1:
+            buf = sock.recv(1024*2)
+            if not buf:
+                break
+            unpacker.feed(buf)
+            for o in unpacker:
+                do_something(o)
+    """
+
+    def __init__(self, file_like=None, read_size=0, use_list=True,
+                 object_hook=None, object_pairs_hook=None, list_hook=None,
+                 encoding=None, unicode_errors='strict', max_buffer_size=0,
+                 ext_hook=ExtType):
+        if file_like is None:
+            self._fb_feeding = True
+        else:
+            if not callable(file_like.read):
+                raise TypeError("`file_like.read` must be callable")
+            self.file_like = file_like
+            self._fb_feeding = False
+        self._fb_buffers = []
+        self._fb_buf_o = 0
+        self._fb_buf_i = 0
+        self._fb_buf_n = 0
+        self._max_buffer_size = max_buffer_size or 2**31-1
+        if read_size > self._max_buffer_size:
+            raise ValueError("read_size must be smaller than max_buffer_size")
+        self._read_size = read_size or min(self._max_buffer_size, 2048)
+        self._encoding = encoding
+        self._unicode_errors = unicode_errors
+        self._use_list = use_list
+        self._list_hook = list_hook
+        self._object_hook = object_hook
+        self._object_pairs_hook = object_pairs_hook
+        self._ext_hook = ext_hook
+
+        if list_hook is not None and not callable(list_hook):
+            raise TypeError('`list_hook` is not callable')
+        if object_hook is not None and not callable(object_hook):
+            raise TypeError('`object_hook` is not callable')
+        if object_pairs_hook is not None and not callable(object_pairs_hook):
+            raise TypeError('`object_pairs_hook` is not callable')
+        if object_hook is not None and object_pairs_hook is not None:
+            raise TypeError("object_pairs_hook and object_hook are mutually "
+                            "exclusive")
+        if not callable(ext_hook):
+            raise TypeError("`ext_hook` is not callable")
+
+    def feed(self, next_bytes):
+        if isinstance(next_bytes, array.array):
+            next_bytes = next_bytes.tostring()
+        elif isinstance(next_bytes, bytearray):
+            next_bytes = bytes(next_bytes)
+        assert self._fb_feeding
+        if self._fb_buf_n + len(next_bytes) > self._max_buffer_size:
+            raise BufferFull
+        self._fb_buf_n += len(next_bytes)
+        self._fb_buffers.append(next_bytes)
+
+    def _fb_consume(self):
+        self._fb_buffers = self._fb_buffers[self._fb_buf_i:]
+        if self._fb_buffers:
+            self._fb_buffers[0] = self._fb_buffers[0][self._fb_buf_o:]
+        self._fb_buf_o = 0
+        self._fb_buf_i = 0
+        self._fb_buf_n = sum(map(len, self._fb_buffers))
+
+    def _fb_got_extradata(self):
+        if self._fb_buf_i != len(self._fb_buffers):
+            return True
+        if self._fb_feeding:
+            return False
+        if not self.file_like:
+            return False
+        if self.file_like.read(1):
+            return True
+        return False
+
+    def __iter__(self):
+        return self
+
+    def read_bytes(self, n):
+        return self._fb_read(n)
+
+    def _fb_rollback(self):
+        self._fb_buf_i = 0
+        self._fb_buf_o = 0
+
+    def _fb_get_extradata(self):
+        bufs = self._fb_buffers[self._fb_buf_i:]
+        if bufs:
+            bufs[0] = bufs[0][self._fb_buf_o:]
+        return b''.join(bufs)
+
+    def _fb_read(self, n, write_bytes=None):
+        buffs = self._fb_buffers
+        if (write_bytes is None and self._fb_buf_i < len(buffs) and
+                self._fb_buf_o + n < len(buffs[self._fb_buf_i])):
+            self._fb_buf_o += n
+            return buffs[self._fb_buf_i][self._fb_buf_o - n:self._fb_buf_o]
+
+        ret = b''
+        while len(ret) != n:
+            if self._fb_buf_i == len(buffs):
+                if self._fb_feeding:
+                    break
+                tmp = self.file_like.read(self._read_size)
+                if not tmp:
+                    break
+                buffs.append(tmp)
+                continue
+            sliced = n - len(ret)
+            ret += buffs[self._fb_buf_i][self._fb_buf_o:self._fb_buf_o + sliced]
+            self._fb_buf_o += sliced
+            if self._fb_buf_o >= len(buffs[self._fb_buf_i]):
+                self._fb_buf_o = 0
+                self._fb_buf_i += 1
+        if len(ret) != n:
+            self._fb_rollback()
+            raise OutOfData
+        if write_bytes is not None:
+            write_bytes(ret)
+        return ret
+
+    def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None):
+        typ = TYPE_IMMEDIATE
+        n = 0
+        obj = None
+        c = self._fb_read(1, write_bytes)
+        b = ord(c)
+        if   b & 0b10000000 == 0:
+            obj = b
+        elif b & 0b11100000 == 0b11100000:
+            obj = struct.unpack("b", c)[0]
+        elif b & 0b11100000 == 0b10100000:
+            n = b & 0b00011111
+            obj = self._fb_read(n, write_bytes)
+            typ = TYPE_RAW
+        elif b & 0b11110000 == 0b10010000:
+            n = b & 0b00001111
+            typ = TYPE_ARRAY
+        elif b & 0b11110000 == 0b10000000:
+            n = b & 0b00001111
+            typ = TYPE_MAP
+        elif b == 0xc0:
+            obj = None
+        elif b == 0xc2:
+            obj = False
+        elif b == 0xc3:
+            obj = True
+        elif b == 0xc4:
+            typ = TYPE_BIN
+            n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
+            obj = self._fb_read(n, write_bytes)
+        elif b == 0xc5:
+            typ = TYPE_BIN
+            n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
+            obj = self._fb_read(n, write_bytes)
+        elif b == 0xc6:
+            typ = TYPE_BIN
+            n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
+            obj = self._fb_read(n, write_bytes)
+        elif b == 0xc7:  # ext 8
+            typ = TYPE_EXT
+            L, n = struct.unpack('Bb', self._fb_read(2, write_bytes))
+            obj = self._fb_read(L, write_bytes)
+        elif b == 0xc8:  # ext 16
+            typ = TYPE_EXT
+            L, n = struct.unpack('>Hb', self._fb_read(3, write_bytes))
+            obj = self._fb_read(L, write_bytes)
+        elif b == 0xc9:  # ext 32
+            typ = TYPE_EXT
+            L, n = struct.unpack('>Ib', self._fb_read(5, write_bytes))
+            obj = self._fb_read(L, write_bytes)
+        elif b == 0xca:
+            obj = struct.unpack(">f", self._fb_read(4, write_bytes))[0]
+        elif b == 0xcb:
+            obj = struct.unpack(">d", self._fb_read(8, write_bytes))[0]
+        elif b == 0xcc:
+            obj = struct.unpack("B", self._fb_read(1, write_bytes))[0]
+        elif b == 0xcd:
+            obj = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
+        elif b == 0xce:
+            obj = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
+        elif b == 0xcf:
+            obj = struct.unpack(">Q", self._fb_read(8, write_bytes))[0]
+        elif b == 0xd0:
+            obj = struct.unpack("b", self._fb_read(1, write_bytes))[0]
+        elif b == 0xd1:
+            obj = struct.unpack(">h", self._fb_read(2, write_bytes))[0]
+        elif b == 0xd2:
+            obj = struct.unpack(">i", self._fb_read(4, write_bytes))[0]
+        elif b == 0xd3:
+            obj = struct.unpack(">q", self._fb_read(8, write_bytes))[0]
+        elif b == 0xd4:  # fixext 1
+            typ = TYPE_EXT
+            n, obj = struct.unpack('b1s', self._fb_read(2, write_bytes))
+        elif b == 0xd5:  # fixext 2
+            typ = TYPE_EXT
+            n, obj = struct.unpack('b2s', self._fb_read(3, write_bytes))
+        elif b == 0xd6:  # fixext 4
+            typ = TYPE_EXT
+            n, obj = struct.unpack('b4s', self._fb_read(5, write_bytes))
+        elif b == 0xd7:  # fixext 8
+            typ = TYPE_EXT
+            n, obj = struct.unpack('b8s', self._fb_read(9, write_bytes))
+        elif b == 0xd8:  # fixext 16
+            typ = TYPE_EXT
+            n, obj = struct.unpack('b16s', self._fb_read(17, write_bytes))
+        elif b == 0xd9:
+            typ = TYPE_RAW
+            n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
+            obj = self._fb_read(n, write_bytes)
+        elif b == 0xda:
+            typ = TYPE_RAW
+            n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
+            obj = self._fb_read(n, write_bytes)
+        elif b == 0xdb:
+            typ = TYPE_RAW
+            n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
+            obj = self._fb_read(n, write_bytes)
+        elif b == 0xdc:
+            n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
+            typ = TYPE_ARRAY
+        elif b == 0xdd:
+            n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
+            typ = TYPE_ARRAY
+        elif b == 0xde:
+            n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
+            typ = TYPE_MAP
+        elif b == 0xdf:
+            n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
+            typ = TYPE_MAP
+        else:
+            raise UnpackValueError("Unknown header: 0x%x" % b)
+        return typ, n, obj
+
+    def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None):
+        typ, n, obj = self._read_header(execute, write_bytes)
+
+        if execute == EX_READ_ARRAY_HEADER:
+            if typ != TYPE_ARRAY:
+                raise UnpackValueError("Expected array")
+            return n
+        if execute == EX_READ_MAP_HEADER:
+            if typ != TYPE_MAP:
+                raise UnpackValueError("Expected map")
+            return n
+        # TODO should we eliminate the recursion?
+        if typ == TYPE_ARRAY:
+            if execute == EX_SKIP:
+                for i in xrange(n):
+                    # TODO check whether we need to call `list_hook`
+                    self._fb_unpack(EX_SKIP, write_bytes)
+                return
+            ret = newlist_hint(n)
+            for i in xrange(n):
+                ret.append(self._fb_unpack(EX_CONSTRUCT, write_bytes))
+            if self._list_hook is not None:
+                ret = self._list_hook(ret)
+            # TODO is the interaction between `list_hook` and `use_list` ok?
+            return ret if self._use_list else tuple(ret)
+        if typ == TYPE_MAP:
+            if execute == EX_SKIP:
+                for i in xrange(n):
+                    # TODO check whether we need to call hooks
+                    self._fb_unpack(EX_SKIP, write_bytes)
+                    self._fb_unpack(EX_SKIP, write_bytes)
+                return
+            if self._object_pairs_hook is not None:
+                ret = self._object_pairs_hook(
+                    (self._fb_unpack(EX_CONSTRUCT, write_bytes),
+                     self._fb_unpack(EX_CONSTRUCT, write_bytes))
+                    for _ in xrange(n))
+            else:
+                ret = {}
+                for _ in xrange(n):
+                    key = self._fb_unpack(EX_CONSTRUCT, write_bytes)
+                    ret[key] = self._fb_unpack(EX_CONSTRUCT, write_bytes)
+                if self._object_hook is not None:
+                    ret = self._object_hook(ret)
+            return ret
+        if execute == EX_SKIP:
+            return
+        if typ == TYPE_RAW:
+            if self._encoding is not None:
+                obj = obj.decode(self._encoding, self._unicode_errors)
+            return obj
+        if typ == TYPE_EXT:
+            return self._ext_hook(n, obj)
+        if typ == TYPE_BIN:
+            return obj
+        assert typ == TYPE_IMMEDIATE
+        return obj
+
+    def next(self):
+        try:
+            ret = self._fb_unpack(EX_CONSTRUCT, None)
+            self._fb_consume()
+            return ret
+        except OutOfData:
+            raise StopIteration
+    __next__ = next
+
+    def skip(self, write_bytes=None):
+        self._fb_unpack(EX_SKIP, write_bytes)
+        self._fb_consume()
+
+    def unpack(self, write_bytes=None):
+        ret = self._fb_unpack(EX_CONSTRUCT, write_bytes)
+        self._fb_consume()
+        return ret
+
+    def read_array_header(self, write_bytes=None):
+        ret = self._fb_unpack(EX_READ_ARRAY_HEADER, write_bytes)
+        self._fb_consume()
+        return ret
+
+    def read_map_header(self, write_bytes=None):
+        ret = self._fb_unpack(EX_READ_MAP_HEADER, write_bytes)
+        self._fb_consume()
+        return ret
+
+
+class Packer(object):
+    """
+    MessagePack Packer
+
+    usage:
+
+        packer = Packer()
+        astream.write(packer.pack(a))
+        astream.write(packer.pack(b))
+
+    Packer's constructor has some keyword arguments:
+
+    :param callable default:
+        Convert user type to builtin type that Packer supports.
+        See also simplejson's document.
+    :param str encoding:
+            Convert unicode to bytes with this encoding. (default: 'utf-8')
+    :param str unicode_errors:
+        Error handler for encoding unicode. (default: 'strict')
+    :param bool use_single_float:
+        Use single precision float type for float. (default: False)
+    :param bool autoreset:
+        Reset buffer after each pack and return it's content as `bytes`. (default: True).
+        If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
+    :param bool use_bin_type:
+        Use bin type introduced in msgpack spec 2.0 for bytes.
+        It also enable str8 type for unicode.
+    """
+    def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
+                 use_single_float=False, autoreset=True, use_bin_type=False):
+        self._use_float = use_single_float
+        self._autoreset = autoreset
+        self._use_bin_type = use_bin_type
+        self._encoding = encoding
+        self._unicode_errors = unicode_errors
+        self._buffer = StringIO()
+        if default is not None:
+            if not callable(default):
+                raise TypeError("default must be callable")
+        self._default = default
+
+    def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance):
+        default_used = False
+        while True:
+            if nest_limit < 0:
+                raise PackValueError("recursion limit exceeded")
+            if obj is None:
+                return self._buffer.write(b"\xc0")
+            if isinstance(obj, bool):
+                if obj:
+                    return self._buffer.write(b"\xc3")
+                return self._buffer.write(b"\xc2")
+            if isinstance(obj, int_types):
+                if 0 <= obj < 0x80:
+                    return self._buffer.write(struct.pack("B", obj))
+                if -0x20 <= obj < 0:
+                    return self._buffer.write(struct.pack("b", obj))
+                if 0x80 <= obj <= 0xff:
+                    return self._buffer.write(struct.pack("BB", 0xcc, obj))
+                if -0x80 <= obj < 0:
+                    return self._buffer.write(struct.pack(">Bb", 0xd0, obj))
+                if 0xff < obj <= 0xffff:
+                    return self._buffer.write(struct.pack(">BH", 0xcd, obj))
+                if -0x8000 <= obj < -0x80:
+                    return self._buffer.write(struct.pack(">Bh", 0xd1, obj))
+                if 0xffff < obj <= 0xffffffff:
+                    return self._buffer.write(struct.pack(">BI", 0xce, obj))
+                if -0x80000000 <= obj < -0x8000:
+                    return self._buffer.write(struct.pack(">Bi", 0xd2, obj))
+                if 0xffffffff < obj <= 0xffffffffffffffff:
+                    return self._buffer.write(struct.pack(">BQ", 0xcf, obj))
+                if -0x8000000000000000 <= obj < -0x80000000:
+                    return self._buffer.write(struct.pack(">Bq", 0xd3, obj))
+                raise PackValueError("Integer value out of range")
+            if self._use_bin_type and isinstance(obj, bytes):
+                n = len(obj)
+                if n <= 0xff:
+                    self._buffer.write(struct.pack('>BB', 0xc4, n))
+                elif n <= 0xffff:
+                    self._buffer.write(struct.pack(">BH", 0xc5, n))
+                elif n <= 0xffffffff:
+                    self._buffer.write(struct.pack(">BI", 0xc6, n))
+                else:
+                    raise PackValueError("Bytes is too large")
+                return self._buffer.write(obj)
+            if isinstance(obj, (Unicode, bytes)):
+                if isinstance(obj, Unicode):
+                    if self._encoding is None:
+                        raise TypeError(
+                            "Can't encode unicode string: "
+                            "no encoding is specified")
+                    obj = obj.encode(self._encoding, self._unicode_errors)
+                n = len(obj)
+                if n <= 0x1f:
+                    self._buffer.write(struct.pack('B', 0xa0 + n))
+                elif self._use_bin_type and n <= 0xff:
+                    self._buffer.write(struct.pack('>BB', 0xd9, n))
+                elif n <= 0xffff:
+                    self._buffer.write(struct.pack(">BH", 0xda, n))
+                elif n <= 0xffffffff:
+                    self._buffer.write(struct.pack(">BI", 0xdb, n))
+                else:
+                    raise PackValueError("String is too large")
+                return self._buffer.write(obj)
+            if isinstance(obj, float):
+                if self._use_float:
+                    return self._buffer.write(struct.pack(">Bf", 0xca, obj))
+                return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
+            if isinstance(obj, ExtType):
+                code = obj.code
+                data = obj.data
+                assert isinstance(code, int)
+                assert isinstance(data, bytes)
+                L = len(data)
+                if L == 1:
+                    self._buffer.write(b'\xd4')
+                elif L == 2:
+                    self._buffer.write(b'\xd5')
+                elif L == 4:
+                    self._buffer.write(b'\xd6')
+                elif L == 8:
+                    self._buffer.write(b'\xd7')
+                elif L == 16:
+                    self._buffer.write(b'\xd8')
+                elif L <= 0xff:
+                    self._buffer.write(struct.pack(">BB", 0xc7, L))
+                elif L <= 0xffff:
+                    self._buffer.write(struct.pack(">BH", 0xc8, L))
+                else:
+                    self._buffer.write(struct.pack(">BI", 0xc9, L))
+                self._buffer.write(struct.pack("b", code))
+                self._buffer.write(data)
+                return
+            if isinstance(obj, (list, tuple)):
+                n = len(obj)
+                self._fb_pack_array_header(n)
+                for i in xrange(n):
+                    self._pack(obj[i], nest_limit - 1)
+                return
+            if isinstance(obj, dict):
+                return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj),
+                                               nest_limit - 1)
+            if not default_used and self._default is not None:
+                obj = self._default(obj)
+                default_used = 1
+                continue
+            raise TypeError("Cannot serialize %r" % obj)
+
+    def pack(self, obj):
+        self._pack(obj)
+        ret = self._buffer.getvalue()
+        if self._autoreset:
+            self._buffer = StringIO()
+        elif USING_STRINGBUILDER:
+            self._buffer = StringIO(ret)
+        return ret
+
+    def pack_map_pairs(self, pairs):
+        self._fb_pack_map_pairs(len(pairs), pairs)
+        ret = self._buffer.getvalue()
+        if self._autoreset:
+            self._buffer = StringIO()
+        elif USING_STRINGBUILDER:
+            self._buffer = StringIO(ret)
+        return ret
+
+    def pack_array_header(self, n):
+        if n >= 2**32:
+            raise ValueError
+        self._fb_pack_array_header(n)
+        ret = self._buffer.getvalue()
+        if self._autoreset:
+            self._buffer = StringIO()
+        elif USING_STRINGBUILDER:
+            self._buffer = StringIO(ret)
+        return ret
+
+    def pack_map_header(self, n):
+        if n >= 2**32:
+            raise ValueError
+        self._fb_pack_map_header(n)
+        ret = self._buffer.getvalue()
+        if self._autoreset:
+            self._buffer = StringIO()
+        elif USING_STRINGBUILDER:
+            self._buffer = StringIO(ret)
+        return ret
+
+    def pack_ext_type(self, typecode, data):
+        if not isinstance(typecode, int):
+            raise TypeError("typecode must have int type.")
+        if not 0 <= typecode <= 127:
+            raise ValueError("typecode should be 0-127")
+        if not isinstance(data, bytes):
+            raise TypeError("data must have bytes type")
+        L = len(data)
+        if L > 0xffffffff:
+            raise ValueError("Too large data")
+        if L == 1:
+            self._buffer.write(b'\xd4')
+        elif L == 2:
+            self._buffer.write(b'\xd5')
+        elif L == 4:
+            self._buffer.write(b'\xd6')
+        elif L == 8:
+            self._buffer.write(b'\xd7')
+        elif L == 16:
+            self._buffer.write(b'\xd8')
+        elif L <= 0xff:
+            self._buffer.write(b'\xc7' + struct.pack('B', L))
+        elif L <= 0xffff:
+            self._buffer.write(b'\xc8' + struct.pack('>H', L))
+        else:
+            self._buffer.write(b'\xc9' + struct.pack('>I', L))
+        self._buffer.write(struct.pack('B', typecode))
+        self._buffer.write(data)
+
+    def _fb_pack_array_header(self, n):
+        if n <= 0x0f:
+            return self._buffer.write(struct.pack('B', 0x90 + n))
+        if n <= 0xffff:
+            return self._buffer.write(struct.pack(">BH", 0xdc, n))
+        if n <= 0xffffffff:
+            return self._buffer.write(struct.pack(">BI", 0xdd, n))
+        raise PackValueError("Array is too large")
+
+    def _fb_pack_map_header(self, n):
+        if n <= 0x0f:
+            return self._buffer.write(struct.pack('B', 0x80 + n))
+        if n <= 0xffff:
+            return self._buffer.write(struct.pack(">BH", 0xde, n))
+        if n <= 0xffffffff:
+            return self._buffer.write(struct.pack(">BI", 0xdf, n))
+        raise PackValueError("Dict is too large")
+
+    def _fb_pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
+        self._fb_pack_map_header(n)
+        for (k, v) in pairs:
+            self._pack(k, nest_limit - 1)
+            self._pack(v, nest_limit - 1)
+
+    def bytes(self):
+        return self._buffer.getvalue()
+
+    def reset(self):
+        self._buffer = StringIO()

+ 103 - 0
utils/exporters/blender/modules/msgpack/pack.h

@@ -0,0 +1,103 @@
+/*
+ * MessagePack for Python packing routine
+ *
+ * Copyright (C) 2009 Naoki INADA
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License");
+ *    you may not use this file except in compliance with the License.
+ *    You may obtain a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS,
+ *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *    See the License for the specific language governing permissions and
+ *    limitations under the License.
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+#include "sysdep.h"
+#include <limits.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _MSC_VER
+#define inline __inline
+#endif
+
+typedef struct msgpack_packer {
+    char *buf;
+    size_t length;
+    size_t buf_size;
+    bool use_bin_type;
+} msgpack_packer;
+
+typedef struct Packer Packer;
+
+static inline int msgpack_pack_int(msgpack_packer* pk, int d);
+static inline int msgpack_pack_long(msgpack_packer* pk, long d);
+static inline int msgpack_pack_long_long(msgpack_packer* pk, long long d);
+static inline int msgpack_pack_unsigned_short(msgpack_packer* pk, unsigned short d);
+static inline int msgpack_pack_unsigned_int(msgpack_packer* pk, unsigned int d);
+static inline int msgpack_pack_unsigned_long(msgpack_packer* pk, unsigned long d);
+//static inline int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d);
+
+static inline int msgpack_pack_uint8(msgpack_packer* pk, uint8_t d);
+static inline int msgpack_pack_uint16(msgpack_packer* pk, uint16_t d);
+static inline int msgpack_pack_uint32(msgpack_packer* pk, uint32_t d);
+static inline int msgpack_pack_uint64(msgpack_packer* pk, uint64_t d);
+static inline int msgpack_pack_int8(msgpack_packer* pk, int8_t d);
+static inline int msgpack_pack_int16(msgpack_packer* pk, int16_t d);
+static inline int msgpack_pack_int32(msgpack_packer* pk, int32_t d);
+static inline int msgpack_pack_int64(msgpack_packer* pk, int64_t d);
+
+static inline int msgpack_pack_float(msgpack_packer* pk, float d);
+static inline int msgpack_pack_double(msgpack_packer* pk, double d);
+
+static inline int msgpack_pack_nil(msgpack_packer* pk);
+static inline int msgpack_pack_true(msgpack_packer* pk);
+static inline int msgpack_pack_false(msgpack_packer* pk);
+
+static inline int msgpack_pack_array(msgpack_packer* pk, unsigned int n);
+
+static inline int msgpack_pack_map(msgpack_packer* pk, unsigned int n);
+
+static inline int msgpack_pack_raw(msgpack_packer* pk, size_t l);
+static inline int msgpack_pack_bin(msgpack_packer* pk, size_t l);
+static inline int msgpack_pack_raw_body(msgpack_packer* pk, const void* b, size_t l);
+
+static inline int msgpack_pack_ext(msgpack_packer* pk, int8_t typecode, size_t l);
+
+static inline int msgpack_pack_write(msgpack_packer* pk, const char *data, size_t l)
+{
+    char* buf = pk->buf;
+    size_t bs = pk->buf_size;
+    size_t len = pk->length;
+
+    if (len + l > bs) {
+        bs = (len + l) * 2;
+        buf = (char*)realloc(buf, bs);
+        if (!buf) return -1;
+    }
+    memcpy(buf + len, data, l);
+    len += l;
+
+    pk->buf = buf;
+    pk->buf_size = bs;
+    pk->length = len;
+    return 0;
+}
+
+#define msgpack_pack_append_buffer(user, buf, len) \
+        return msgpack_pack_write(user, (const char*)buf, len)
+
+#include "pack_template.h"
+
+#ifdef __cplusplus
+}
+#endif

+ 785 - 0
utils/exporters/blender/modules/msgpack/pack_template.h

@@ -0,0 +1,785 @@
+/*
+ * MessagePack packing routine template
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License");
+ *    you may not use this file except in compliance with the License.
+ *    You may obtain a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS,
+ *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *    See the License for the specific language governing permissions and
+ *    limitations under the License.
+ */
+
+#if defined(__LITTLE_ENDIAN__)
+#define TAKE8_8(d)  ((uint8_t*)&d)[0]
+#define TAKE8_16(d) ((uint8_t*)&d)[0]
+#define TAKE8_32(d) ((uint8_t*)&d)[0]
+#define TAKE8_64(d) ((uint8_t*)&d)[0]
+#elif defined(__BIG_ENDIAN__)
+#define TAKE8_8(d)  ((uint8_t*)&d)[0]
+#define TAKE8_16(d) ((uint8_t*)&d)[1]
+#define TAKE8_32(d) ((uint8_t*)&d)[3]
+#define TAKE8_64(d) ((uint8_t*)&d)[7]
+#endif
+
+#ifndef msgpack_pack_append_buffer
+#error msgpack_pack_append_buffer callback is not defined
+#endif
+
+
+/*
+ * Integer
+ */
+
+#define msgpack_pack_real_uint8(x, d) \
+do { \
+    if(d < (1<<7)) { \
+        /* fixnum */ \
+        msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
+    } else { \
+        /* unsigned 8 */ \
+        unsigned char buf[2] = {0xcc, TAKE8_8(d)}; \
+        msgpack_pack_append_buffer(x, buf, 2); \
+    } \
+} while(0)
+
+#define msgpack_pack_real_uint16(x, d) \
+do { \
+    if(d < (1<<7)) { \
+        /* fixnum */ \
+        msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
+    } else if(d < (1<<8)) { \
+        /* unsigned 8 */ \
+        unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
+        msgpack_pack_append_buffer(x, buf, 2); \
+    } else { \
+        /* unsigned 16 */ \
+        unsigned char buf[3]; \
+        buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+        msgpack_pack_append_buffer(x, buf, 3); \
+    } \
+} while(0)
+
+#define msgpack_pack_real_uint32(x, d) \
+do { \
+    if(d < (1<<8)) { \
+        if(d < (1<<7)) { \
+            /* fixnum */ \
+            msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
+        } else { \
+            /* unsigned 8 */ \
+            unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
+            msgpack_pack_append_buffer(x, buf, 2); \
+        } \
+    } else { \
+        if(d < (1<<16)) { \
+            /* unsigned 16 */ \
+            unsigned char buf[3]; \
+            buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+            msgpack_pack_append_buffer(x, buf, 3); \
+        } else { \
+            /* unsigned 32 */ \
+            unsigned char buf[5]; \
+            buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+            msgpack_pack_append_buffer(x, buf, 5); \
+        } \
+    } \
+} while(0)
+
+#define msgpack_pack_real_uint64(x, d) \
+do { \
+    if(d < (1ULL<<8)) { \
+        if(d < (1ULL<<7)) { \
+            /* fixnum */ \
+            msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
+        } else { \
+            /* unsigned 8 */ \
+            unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
+            msgpack_pack_append_buffer(x, buf, 2); \
+        } \
+    } else { \
+        if(d < (1ULL<<16)) { \
+            /* unsigned 16 */ \
+            unsigned char buf[3]; \
+            buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+            msgpack_pack_append_buffer(x, buf, 3); \
+        } else if(d < (1ULL<<32)) { \
+            /* unsigned 32 */ \
+            unsigned char buf[5]; \
+            buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+            msgpack_pack_append_buffer(x, buf, 5); \
+        } else { \
+            /* unsigned 64 */ \
+            unsigned char buf[9]; \
+            buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
+            msgpack_pack_append_buffer(x, buf, 9); \
+        } \
+    } \
+} while(0)
+
+#define msgpack_pack_real_int8(x, d) \
+do { \
+    if(d < -(1<<5)) { \
+        /* signed 8 */ \
+        unsigned char buf[2] = {0xd0, TAKE8_8(d)}; \
+        msgpack_pack_append_buffer(x, buf, 2); \
+    } else { \
+        /* fixnum */ \
+        msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
+    } \
+} while(0)
+
+#define msgpack_pack_real_int16(x, d) \
+do { \
+    if(d < -(1<<5)) { \
+        if(d < -(1<<7)) { \
+            /* signed 16 */ \
+            unsigned char buf[3]; \
+            buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+            msgpack_pack_append_buffer(x, buf, 3); \
+        } else { \
+            /* signed 8 */ \
+            unsigned char buf[2] = {0xd0, TAKE8_16(d)}; \
+            msgpack_pack_append_buffer(x, buf, 2); \
+        } \
+    } else if(d < (1<<7)) { \
+        /* fixnum */ \
+        msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
+    } else { \
+        if(d < (1<<8)) { \
+            /* unsigned 8 */ \
+            unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
+            msgpack_pack_append_buffer(x, buf, 2); \
+        } else { \
+            /* unsigned 16 */ \
+            unsigned char buf[3]; \
+            buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+            msgpack_pack_append_buffer(x, buf, 3); \
+        } \
+    } \
+} while(0)
+
+#define msgpack_pack_real_int32(x, d) \
+do { \
+    if(d < -(1<<5)) { \
+        if(d < -(1<<15)) { \
+            /* signed 32 */ \
+            unsigned char buf[5]; \
+            buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
+            msgpack_pack_append_buffer(x, buf, 5); \
+        } else if(d < -(1<<7)) { \
+            /* signed 16 */ \
+            unsigned char buf[3]; \
+            buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+            msgpack_pack_append_buffer(x, buf, 3); \
+        } else { \
+            /* signed 8 */ \
+            unsigned char buf[2] = {0xd0, TAKE8_32(d)}; \
+            msgpack_pack_append_buffer(x, buf, 2); \
+        } \
+    } else if(d < (1<<7)) { \
+        /* fixnum */ \
+        msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
+    } else { \
+        if(d < (1<<8)) { \
+            /* unsigned 8 */ \
+            unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
+            msgpack_pack_append_buffer(x, buf, 2); \
+        } else if(d < (1<<16)) { \
+            /* unsigned 16 */ \
+            unsigned char buf[3]; \
+            buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+            msgpack_pack_append_buffer(x, buf, 3); \
+        } else { \
+            /* unsigned 32 */ \
+            unsigned char buf[5]; \
+            buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+            msgpack_pack_append_buffer(x, buf, 5); \
+        } \
+    } \
+} while(0)
+
+#define msgpack_pack_real_int64(x, d) \
+do { \
+    if(d < -(1LL<<5)) { \
+        if(d < -(1LL<<15)) { \
+            if(d < -(1LL<<31)) { \
+                /* signed 64 */ \
+                unsigned char buf[9]; \
+                buf[0] = 0xd3; _msgpack_store64(&buf[1], d); \
+                msgpack_pack_append_buffer(x, buf, 9); \
+            } else { \
+                /* signed 32 */ \
+                unsigned char buf[5]; \
+                buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
+                msgpack_pack_append_buffer(x, buf, 5); \
+            } \
+        } else { \
+            if(d < -(1<<7)) { \
+                /* signed 16 */ \
+                unsigned char buf[3]; \
+                buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+                msgpack_pack_append_buffer(x, buf, 3); \
+            } else { \
+                /* signed 8 */ \
+                unsigned char buf[2] = {0xd0, TAKE8_64(d)}; \
+                msgpack_pack_append_buffer(x, buf, 2); \
+            } \
+        } \
+    } else if(d < (1<<7)) { \
+        /* fixnum */ \
+        msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
+    } else { \
+        if(d < (1LL<<16)) { \
+            if(d < (1<<8)) { \
+                /* unsigned 8 */ \
+                unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
+                msgpack_pack_append_buffer(x, buf, 2); \
+            } else { \
+                /* unsigned 16 */ \
+                unsigned char buf[3]; \
+                buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+                msgpack_pack_append_buffer(x, buf, 3); \
+            } \
+        } else { \
+            if(d < (1LL<<32)) { \
+                /* unsigned 32 */ \
+                unsigned char buf[5]; \
+                buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+                msgpack_pack_append_buffer(x, buf, 5); \
+            } else { \
+                /* unsigned 64 */ \
+                unsigned char buf[9]; \
+                buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
+                msgpack_pack_append_buffer(x, buf, 9); \
+            } \
+        } \
+    } \
+} while(0)
+
+
+static inline int msgpack_pack_uint8(msgpack_packer* x, uint8_t d)
+{
+    msgpack_pack_real_uint8(x, d);
+}
+
+static inline int msgpack_pack_uint16(msgpack_packer* x, uint16_t d)
+{
+    msgpack_pack_real_uint16(x, d);
+}
+
+static inline int msgpack_pack_uint32(msgpack_packer* x, uint32_t d)
+{
+    msgpack_pack_real_uint32(x, d);
+}
+
+static inline int msgpack_pack_uint64(msgpack_packer* x, uint64_t d)
+{
+    msgpack_pack_real_uint64(x, d);
+}
+
+static inline int msgpack_pack_int8(msgpack_packer* x, int8_t d)
+{
+    msgpack_pack_real_int8(x, d);
+}
+
+static inline int msgpack_pack_int16(msgpack_packer* x, int16_t d)
+{
+    msgpack_pack_real_int16(x, d);
+}
+
+static inline int msgpack_pack_int32(msgpack_packer* x, int32_t d)
+{
+    msgpack_pack_real_int32(x, d);
+}
+
+static inline int msgpack_pack_int64(msgpack_packer* x, int64_t d)
+{
+    msgpack_pack_real_int64(x, d);
+}
+
+
+//#ifdef msgpack_pack_inline_func_cint
+
+static inline int msgpack_pack_short(msgpack_packer* x, short d)
+{
+#if defined(SIZEOF_SHORT)
+#if SIZEOF_SHORT == 2
+    msgpack_pack_real_int16(x, d);
+#elif SIZEOF_SHORT == 4
+    msgpack_pack_real_int32(x, d);
+#else
+    msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(SHRT_MAX)
+#if SHRT_MAX == 0x7fff
+    msgpack_pack_real_int16(x, d);
+#elif SHRT_MAX == 0x7fffffff
+    msgpack_pack_real_int32(x, d);
+#else
+    msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(short) == 2) {
+    msgpack_pack_real_int16(x, d);
+} else if(sizeof(short) == 4) {
+    msgpack_pack_real_int32(x, d);
+} else {
+    msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+static inline int msgpack_pack_int(msgpack_packer* x, int d)
+{
+#if defined(SIZEOF_INT)
+#if SIZEOF_INT == 2
+    msgpack_pack_real_int16(x, d);
+#elif SIZEOF_INT == 4
+    msgpack_pack_real_int32(x, d);
+#else
+    msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(INT_MAX)
+#if INT_MAX == 0x7fff
+    msgpack_pack_real_int16(x, d);
+#elif INT_MAX == 0x7fffffff
+    msgpack_pack_real_int32(x, d);
+#else
+    msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(int) == 2) {
+    msgpack_pack_real_int16(x, d);
+} else if(sizeof(int) == 4) {
+    msgpack_pack_real_int32(x, d);
+} else {
+    msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+static inline int msgpack_pack_long(msgpack_packer* x, long d)
+{
+#if defined(SIZEOF_LONG)
+#if SIZEOF_LONG == 2
+    msgpack_pack_real_int16(x, d);
+#elif SIZEOF_LONG == 4
+    msgpack_pack_real_int32(x, d);
+#else
+    msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(LONG_MAX)
+#if LONG_MAX == 0x7fffL
+    msgpack_pack_real_int16(x, d);
+#elif LONG_MAX == 0x7fffffffL
+    msgpack_pack_real_int32(x, d);
+#else
+    msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(long) == 2) {
+    msgpack_pack_real_int16(x, d);
+} else if(sizeof(long) == 4) {
+    msgpack_pack_real_int32(x, d);
+} else {
+    msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+static inline int msgpack_pack_long_long(msgpack_packer* x, long long d)
+{
+#if defined(SIZEOF_LONG_LONG)
+#if SIZEOF_LONG_LONG == 2
+    msgpack_pack_real_int16(x, d);
+#elif SIZEOF_LONG_LONG == 4
+    msgpack_pack_real_int32(x, d);
+#else
+    msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(LLONG_MAX)
+#if LLONG_MAX == 0x7fffL
+    msgpack_pack_real_int16(x, d);
+#elif LLONG_MAX == 0x7fffffffL
+    msgpack_pack_real_int32(x, d);
+#else
+    msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(long long) == 2) {
+    msgpack_pack_real_int16(x, d);
+} else if(sizeof(long long) == 4) {
+    msgpack_pack_real_int32(x, d);
+} else {
+    msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+static inline int msgpack_pack_unsigned_short(msgpack_packer* x, unsigned short d)
+{
+#if defined(SIZEOF_SHORT)
+#if SIZEOF_SHORT == 2
+    msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_SHORT == 4
+    msgpack_pack_real_uint32(x, d);
+#else
+    msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(USHRT_MAX)
+#if USHRT_MAX == 0xffffU
+    msgpack_pack_real_uint16(x, d);
+#elif USHRT_MAX == 0xffffffffU
+    msgpack_pack_real_uint32(x, d);
+#else
+    msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned short) == 2) {
+    msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned short) == 4) {
+    msgpack_pack_real_uint32(x, d);
+} else {
+    msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+static inline int msgpack_pack_unsigned_int(msgpack_packer* x, unsigned int d)
+{
+#if defined(SIZEOF_INT)
+#if SIZEOF_INT == 2
+    msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_INT == 4
+    msgpack_pack_real_uint32(x, d);
+#else
+    msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(UINT_MAX)
+#if UINT_MAX == 0xffffU
+    msgpack_pack_real_uint16(x, d);
+#elif UINT_MAX == 0xffffffffU
+    msgpack_pack_real_uint32(x, d);
+#else
+    msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned int) == 2) {
+    msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned int) == 4) {
+    msgpack_pack_real_uint32(x, d);
+} else {
+    msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+static inline int msgpack_pack_unsigned_long(msgpack_packer* x, unsigned long d)
+{
+#if defined(SIZEOF_LONG)
+#if SIZEOF_LONG == 2
+    msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_LONG == 4
+    msgpack_pack_real_uint32(x, d);
+#else
+    msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(ULONG_MAX)
+#if ULONG_MAX == 0xffffUL
+    msgpack_pack_real_uint16(x, d);
+#elif ULONG_MAX == 0xffffffffUL
+    msgpack_pack_real_uint32(x, d);
+#else
+    msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned long) == 2) {
+    msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned long) == 4) {
+    msgpack_pack_real_uint32(x, d);
+} else {
+    msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+static inline int msgpack_pack_unsigned_long_long(msgpack_packer* x, unsigned long long d)
+{
+#if defined(SIZEOF_LONG_LONG)
+#if SIZEOF_LONG_LONG == 2
+    msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_LONG_LONG == 4
+    msgpack_pack_real_uint32(x, d);
+#else
+    msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(ULLONG_MAX)
+#if ULLONG_MAX == 0xffffUL
+    msgpack_pack_real_uint16(x, d);
+#elif ULLONG_MAX == 0xffffffffUL
+    msgpack_pack_real_uint32(x, d);
+#else
+    msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned long long) == 2) {
+    msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned long long) == 4) {
+    msgpack_pack_real_uint32(x, d);
+} else {
+    msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+//#undef msgpack_pack_inline_func_cint
+//#endif
+
+
+
+/*
+ * Float
+ */
+
+static inline int msgpack_pack_float(msgpack_packer* x, float d)
+{
+    union { float f; uint32_t i; } mem;
+    mem.f = d;
+    unsigned char buf[5];
+    buf[0] = 0xca; _msgpack_store32(&buf[1], mem.i);
+    msgpack_pack_append_buffer(x, buf, 5);
+}
+
+static inline int msgpack_pack_double(msgpack_packer* x, double d)
+{
+    union { double f; uint64_t i; } mem;
+    mem.f = d;
+    unsigned char buf[9];
+    buf[0] = 0xcb;
+#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi
+    // https://github.com/msgpack/msgpack-perl/pull/1
+    mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL);
+#endif
+    _msgpack_store64(&buf[1], mem.i);
+    msgpack_pack_append_buffer(x, buf, 9);
+}
+
+
+/*
+ * Nil
+ */
+
+static inline int msgpack_pack_nil(msgpack_packer* x)
+{
+    static const unsigned char d = 0xc0;
+    msgpack_pack_append_buffer(x, &d, 1);
+}
+
+
+/*
+ * Boolean
+ */
+
+static inline int msgpack_pack_true(msgpack_packer* x)
+{
+    static const unsigned char d = 0xc3;
+    msgpack_pack_append_buffer(x, &d, 1);
+}
+
+static inline int msgpack_pack_false(msgpack_packer* x)
+{
+    static const unsigned char d = 0xc2;
+    msgpack_pack_append_buffer(x, &d, 1);
+}
+
+
+/*
+ * Array
+ */
+
+static inline int msgpack_pack_array(msgpack_packer* x, unsigned int n)
+{
+    if(n < 16) {
+        unsigned char d = 0x90 | n;
+        msgpack_pack_append_buffer(x, &d, 1);
+    } else if(n < 65536) {
+        unsigned char buf[3];
+        buf[0] = 0xdc; _msgpack_store16(&buf[1], (uint16_t)n);
+        msgpack_pack_append_buffer(x, buf, 3);
+    } else {
+        unsigned char buf[5];
+        buf[0] = 0xdd; _msgpack_store32(&buf[1], (uint32_t)n);
+        msgpack_pack_append_buffer(x, buf, 5);
+    }
+}
+
+
+/*
+ * Map
+ */
+
+static inline int msgpack_pack_map(msgpack_packer* x, unsigned int n)
+{
+    if(n < 16) {
+        unsigned char d = 0x80 | n;
+        msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
+    } else if(n < 65536) {
+        unsigned char buf[3];
+        buf[0] = 0xde; _msgpack_store16(&buf[1], (uint16_t)n);
+        msgpack_pack_append_buffer(x, buf, 3);
+    } else {
+        unsigned char buf[5];
+        buf[0] = 0xdf; _msgpack_store32(&buf[1], (uint32_t)n);
+        msgpack_pack_append_buffer(x, buf, 5);
+    }
+}
+
+
+/*
+ * Raw
+ */
+
+static inline int msgpack_pack_raw(msgpack_packer* x, size_t l)
+{
+    if (l < 32) {
+        unsigned char d = 0xa0 | (uint8_t)l;
+        msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
+    } else if (x->use_bin_type && l < 256) {  // str8 is new format introduced with bin.
+        unsigned char buf[2] = {0xd9, (uint8_t)l};
+        msgpack_pack_append_buffer(x, buf, 2);
+    } else if (l < 65536) {
+        unsigned char buf[3];
+        buf[0] = 0xda; _msgpack_store16(&buf[1], (uint16_t)l);
+        msgpack_pack_append_buffer(x, buf, 3);
+    } else {
+        unsigned char buf[5];
+        buf[0] = 0xdb; _msgpack_store32(&buf[1], (uint32_t)l);
+        msgpack_pack_append_buffer(x, buf, 5);
+    }
+}
+
+/*
+ * bin
+ */
+static inline int msgpack_pack_bin(msgpack_packer *x, size_t l)
+{
+    if (!x->use_bin_type) {
+        return msgpack_pack_raw(x, l);
+    }
+    if (l < 256) {
+        unsigned char buf[2] = {0xc4, (unsigned char)l};
+        msgpack_pack_append_buffer(x, buf, 2);
+    } else if (l < 65536) {
+        unsigned char buf[3] = {0xc5};
+        _msgpack_store16(&buf[1], (uint16_t)l);
+        msgpack_pack_append_buffer(x, buf, 3);
+    } else {
+        unsigned char buf[5] = {0xc6};
+        _msgpack_store32(&buf[1], (uint32_t)l);
+        msgpack_pack_append_buffer(x, buf, 5);
+    }
+}
+
+static inline int msgpack_pack_raw_body(msgpack_packer* x, const void* b, size_t l)
+{
+    if (l > 0) msgpack_pack_append_buffer(x, (const unsigned char*)b, l);
+    return 0;
+}
+
+/*
+ * Ext
+ */
+static inline int msgpack_pack_ext(msgpack_packer* x, int8_t typecode, size_t l)
+{
+    if (l == 1) {
+        unsigned char buf[2];
+        buf[0] = 0xd4;
+        buf[1] = (unsigned char)typecode;
+        msgpack_pack_append_buffer(x, buf, 2);
+    }
+    else if(l == 2) {
+        unsigned char buf[2];
+        buf[0] = 0xd5;
+        buf[1] = (unsigned char)typecode;
+        msgpack_pack_append_buffer(x, buf, 2);
+    }
+    else if(l == 4) {
+        unsigned char buf[2];
+        buf[0] = 0xd6;
+        buf[1] = (unsigned char)typecode;
+        msgpack_pack_append_buffer(x, buf, 2);
+    }
+    else if(l == 8) {
+        unsigned char buf[2];
+        buf[0] = 0xd7;
+        buf[1] = (unsigned char)typecode;
+        msgpack_pack_append_buffer(x, buf, 2);
+    }
+    else if(l == 16) {
+        unsigned char buf[2];
+        buf[0] = 0xd8;
+        buf[1] = (unsigned char)typecode;
+        msgpack_pack_append_buffer(x, buf, 2);
+    }
+    else if(l < 256) {
+        unsigned char buf[3];
+        buf[0] = 0xc7;
+        buf[1] = l;
+        buf[2] = (unsigned char)typecode;
+        msgpack_pack_append_buffer(x, buf, 3);
+    } else if(l < 65536) {
+        unsigned char buf[4];
+        buf[0] = 0xc8;
+        _msgpack_store16(&buf[1], (uint16_t)l);
+        buf[3] = (unsigned char)typecode;
+        msgpack_pack_append_buffer(x, buf, 4);
+    } else {
+        unsigned char buf[6];
+        buf[0] = 0xc9;
+        _msgpack_store32(&buf[1], (uint32_t)l);
+        buf[5] = (unsigned char)typecode;
+        msgpack_pack_append_buffer(x, buf, 6);
+    }
+
+}
+
+
+
+#undef msgpack_pack_append_buffer
+
+#undef TAKE8_8
+#undef TAKE8_16
+#undef TAKE8_32
+#undef TAKE8_64
+
+#undef msgpack_pack_real_uint8
+#undef msgpack_pack_real_uint16
+#undef msgpack_pack_real_uint32
+#undef msgpack_pack_real_uint64
+#undef msgpack_pack_real_int8
+#undef msgpack_pack_real_int16
+#undef msgpack_pack_real_int32
+#undef msgpack_pack_real_int64

+ 194 - 0
utils/exporters/blender/modules/msgpack/sysdep.h

@@ -0,0 +1,194 @@
+/*
+ * MessagePack system dependencies
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License");
+ *    you may not use this file except in compliance with the License.
+ *    You may obtain a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS,
+ *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *    See the License for the specific language governing permissions and
+ *    limitations under the License.
+ */
+#ifndef MSGPACK_SYSDEP_H__
+#define MSGPACK_SYSDEP_H__
+
+#include <stdlib.h>
+#include <stddef.h>
+#if defined(_MSC_VER) && _MSC_VER < 1600
+typedef __int8 int8_t;
+typedef unsigned __int8 uint8_t;
+typedef __int16 int16_t;
+typedef unsigned __int16 uint16_t;
+typedef __int32 int32_t;
+typedef unsigned __int32 uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+#elif defined(_MSC_VER)  // && _MSC_VER >= 1600
+#include <stdint.h>
+#else
+#include <stdint.h>
+#include <stdbool.h>
+#endif
+
+#ifdef _WIN32
+#define _msgpack_atomic_counter_header <windows.h>
+typedef long _msgpack_atomic_counter_t;
+#define _msgpack_sync_decr_and_fetch(ptr) InterlockedDecrement(ptr)
+#define _msgpack_sync_incr_and_fetch(ptr) InterlockedIncrement(ptr)
+#elif defined(__GNUC__) && ((__GNUC__*10 + __GNUC_MINOR__) < 41)
+#define _msgpack_atomic_counter_header "gcc_atomic.h"
+#else
+typedef unsigned int _msgpack_atomic_counter_t;
+#define _msgpack_sync_decr_and_fetch(ptr) __sync_sub_and_fetch(ptr, 1)
+#define _msgpack_sync_incr_and_fetch(ptr) __sync_add_and_fetch(ptr, 1)
+#endif
+
+#ifdef _WIN32
+
+#ifdef __cplusplus
+/* numeric_limits<T>::min,max */
+#ifdef max
+#undef max
+#endif
+#ifdef min
+#undef min
+#endif
+#endif
+
+#else
+#include <arpa/inet.h>  /* __BYTE_ORDER */
+#endif
+
+#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN__
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define __BIG_ENDIAN__
+#elif _WIN32
+#define __LITTLE_ENDIAN__
+#endif
+#endif
+
+
+#ifdef __LITTLE_ENDIAN__
+
+#ifdef _WIN32
+#  if defined(ntohs)
+#    define _msgpack_be16(x) ntohs(x)
+#  elif defined(_byteswap_ushort) || (defined(_MSC_VER) && _MSC_VER >= 1400)
+#    define _msgpack_be16(x) ((uint16_t)_byteswap_ushort((unsigned short)x))
+#  else
+#    define _msgpack_be16(x) ( \
+        ((((uint16_t)x) <<  8) ) | \
+        ((((uint16_t)x) >>  8) ) )
+#  endif
+#else
+#  define _msgpack_be16(x) ntohs(x)
+#endif
+
+#ifdef _WIN32
+#  if defined(ntohl)
+#    define _msgpack_be32(x) ntohl(x)
+#  elif defined(_byteswap_ulong) || (defined(_MSC_VER) && _MSC_VER >= 1400)
+#    define _msgpack_be32(x) ((uint32_t)_byteswap_ulong((unsigned long)x))
+#  else
+#    define _msgpack_be32(x) \
+        ( ((((uint32_t)x) << 24)               ) | \
+          ((((uint32_t)x) <<  8) & 0x00ff0000U ) | \
+          ((((uint32_t)x) >>  8) & 0x0000ff00U ) | \
+          ((((uint32_t)x) >> 24)               ) )
+#  endif
+#else
+#  define _msgpack_be32(x) ntohl(x)
+#endif
+
+#if defined(_byteswap_uint64) || (defined(_MSC_VER) && _MSC_VER >= 1400)
+#  define _msgpack_be64(x) (_byteswap_uint64(x))
+#elif defined(bswap_64)
+#  define _msgpack_be64(x) bswap_64(x)
+#elif defined(__DARWIN_OSSwapInt64)
+#  define _msgpack_be64(x) __DARWIN_OSSwapInt64(x)
+#else
+#define _msgpack_be64(x) \
+    ( ((((uint64_t)x) << 56)                         ) | \
+      ((((uint64_t)x) << 40) & 0x00ff000000000000ULL ) | \
+      ((((uint64_t)x) << 24) & 0x0000ff0000000000ULL ) | \
+      ((((uint64_t)x) <<  8) & 0x000000ff00000000ULL ) | \
+      ((((uint64_t)x) >>  8) & 0x00000000ff000000ULL ) | \
+      ((((uint64_t)x) >> 24) & 0x0000000000ff0000ULL ) | \
+      ((((uint64_t)x) >> 40) & 0x000000000000ff00ULL ) | \
+      ((((uint64_t)x) >> 56)                         ) )
+#endif
+
+#define _msgpack_load16(cast, from) ((cast)( \
+        (((uint16_t)((uint8_t*)(from))[0]) << 8) | \
+        (((uint16_t)((uint8_t*)(from))[1])     ) ))
+
+#define _msgpack_load32(cast, from) ((cast)( \
+        (((uint32_t)((uint8_t*)(from))[0]) << 24) | \
+        (((uint32_t)((uint8_t*)(from))[1]) << 16) | \
+        (((uint32_t)((uint8_t*)(from))[2]) <<  8) | \
+        (((uint32_t)((uint8_t*)(from))[3])      ) ))
+
+#define _msgpack_load64(cast, from) ((cast)( \
+        (((uint64_t)((uint8_t*)(from))[0]) << 56) | \
+        (((uint64_t)((uint8_t*)(from))[1]) << 48) | \
+        (((uint64_t)((uint8_t*)(from))[2]) << 40) | \
+        (((uint64_t)((uint8_t*)(from))[3]) << 32) | \
+        (((uint64_t)((uint8_t*)(from))[4]) << 24) | \
+        (((uint64_t)((uint8_t*)(from))[5]) << 16) | \
+        (((uint64_t)((uint8_t*)(from))[6]) << 8)  | \
+        (((uint64_t)((uint8_t*)(from))[7])     )  ))
+
+#else
+
+#define _msgpack_be16(x) (x)
+#define _msgpack_be32(x) (x)
+#define _msgpack_be64(x) (x)
+
+#define _msgpack_load16(cast, from) ((cast)( \
+        (((uint16_t)((uint8_t*)from)[0]) << 8) | \
+        (((uint16_t)((uint8_t*)from)[1])     ) ))
+
+#define _msgpack_load32(cast, from) ((cast)( \
+        (((uint32_t)((uint8_t*)from)[0]) << 24) | \
+        (((uint32_t)((uint8_t*)from)[1]) << 16) | \
+        (((uint32_t)((uint8_t*)from)[2]) <<  8) | \
+        (((uint32_t)((uint8_t*)from)[3])      ) ))
+
+#define _msgpack_load64(cast, from) ((cast)( \
+        (((uint64_t)((uint8_t*)from)[0]) << 56) | \
+        (((uint64_t)((uint8_t*)from)[1]) << 48) | \
+        (((uint64_t)((uint8_t*)from)[2]) << 40) | \
+        (((uint64_t)((uint8_t*)from)[3]) << 32) | \
+        (((uint64_t)((uint8_t*)from)[4]) << 24) | \
+        (((uint64_t)((uint8_t*)from)[5]) << 16) | \
+        (((uint64_t)((uint8_t*)from)[6]) << 8)  | \
+        (((uint64_t)((uint8_t*)from)[7])     )  ))
+#endif
+
+
+#define _msgpack_store16(to, num) \
+    do { uint16_t val = _msgpack_be16(num); memcpy(to, &val, 2); } while(0)
+#define _msgpack_store32(to, num) \
+    do { uint32_t val = _msgpack_be32(num); memcpy(to, &val, 4); } while(0)
+#define _msgpack_store64(to, num) \
+    do { uint64_t val = _msgpack_be64(num); memcpy(to, &val, 8); } while(0)
+
+/*
+#define _msgpack_load16(cast, from) \
+    ({ cast val; memcpy(&val, (char*)from, 2); _msgpack_be16(val); })
+#define _msgpack_load32(cast, from) \
+    ({ cast val; memcpy(&val, (char*)from, 4); _msgpack_be32(val); })
+#define _msgpack_load64(cast, from) \
+    ({ cast val; memcpy(&val, (char*)from, 8); _msgpack_be64(val); })
+*/
+
+
+#endif /* msgpack/sysdep.h */

+ 263 - 0
utils/exporters/blender/modules/msgpack/unpack.h

@@ -0,0 +1,263 @@
+/*
+ * MessagePack for Python unpacking routine
+ *
+ * Copyright (C) 2009 Naoki INADA
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License");
+ *    you may not use this file except in compliance with the License.
+ *    You may obtain a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS,
+ *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *    See the License for the specific language governing permissions and
+ *    limitations under the License.
+ */
+
+#define MSGPACK_EMBED_STACK_SIZE  (1024)
+#include "unpack_define.h"
+
+typedef struct unpack_user {
+    int use_list;
+    PyObject *object_hook;
+    bool has_pairs_hook;
+    PyObject *list_hook;
+    PyObject *ext_hook;
+    const char *encoding;
+    const char *unicode_errors;
+} unpack_user;
+
+typedef PyObject* msgpack_unpack_object;
+struct unpack_context;
+typedef struct unpack_context unpack_context;
+typedef int (*execute_fn)(unpack_context *ctx, const char* data, size_t len, size_t* off);
+
+static inline msgpack_unpack_object unpack_callback_root(unpack_user* u)
+{
+    return NULL;
+}
+
+static inline int unpack_callback_uint16(unpack_user* u, uint16_t d, msgpack_unpack_object* o)
+{
+    PyObject *p = PyInt_FromLong((long)d);
+    if (!p)
+        return -1;
+    *o = p;
+    return 0;
+}
+static inline int unpack_callback_uint8(unpack_user* u, uint8_t d, msgpack_unpack_object* o)
+{
+    return unpack_callback_uint16(u, d, o);
+}
+
+
+static inline int unpack_callback_uint32(unpack_user* u, uint32_t d, msgpack_unpack_object* o)
+{
+    PyObject *p;
+#if UINT32_MAX > LONG_MAX
+    if (d > LONG_MAX) {
+        p = PyLong_FromUnsignedLong((unsigned long)d);
+    } else
+#endif
+    {
+        p = PyInt_FromLong((long)d);
+    }
+    if (!p)
+        return -1;
+    *o = p;
+    return 0;
+}
+
+static inline int unpack_callback_uint64(unpack_user* u, uint64_t d, msgpack_unpack_object* o)
+{
+    PyObject *p;
+    if (d > LONG_MAX) {
+        p = PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)d);
+    } else {
+        p = PyInt_FromLong((long)d);
+    }
+    if (!p)
+        return -1;
+    *o = p;
+    return 0;
+}
+
+static inline int unpack_callback_int32(unpack_user* u, int32_t d, msgpack_unpack_object* o)
+{
+    PyObject *p = PyInt_FromLong(d);
+    if (!p)
+        return -1;
+    *o = p;
+    return 0;
+}
+
+static inline int unpack_callback_int16(unpack_user* u, int16_t d, msgpack_unpack_object* o)
+{
+    return unpack_callback_int32(u, d, o);
+}
+
+static inline int unpack_callback_int8(unpack_user* u, int8_t d, msgpack_unpack_object* o)
+{
+    return unpack_callback_int32(u, d, o);
+}
+
+static inline int unpack_callback_int64(unpack_user* u, int64_t d, msgpack_unpack_object* o)
+{
+    PyObject *p;
+    if (d > LONG_MAX || d < LONG_MIN) {
+        p = PyLong_FromLongLong((unsigned PY_LONG_LONG)d);
+    } else {
+        p = PyInt_FromLong((long)d);
+    }
+    *o = p;
+    return 0;
+}
+
+static inline int unpack_callback_double(unpack_user* u, double d, msgpack_unpack_object* o)
+{
+    PyObject *p = PyFloat_FromDouble(d);
+    if (!p)
+        return -1;
+    *o = p;
+    return 0;
+}
+
+static inline int unpack_callback_float(unpack_user* u, float d, msgpack_unpack_object* o)
+{
+    return unpack_callback_double(u, d, o);
+}
+
+static inline int unpack_callback_nil(unpack_user* u, msgpack_unpack_object* o)
+{ Py_INCREF(Py_None); *o = Py_None; return 0; }
+
+static inline int unpack_callback_true(unpack_user* u, msgpack_unpack_object* o)
+{ Py_INCREF(Py_True); *o = Py_True; return 0; }
+
+static inline int unpack_callback_false(unpack_user* u, msgpack_unpack_object* o)
+{ Py_INCREF(Py_False); *o = Py_False; return 0; }
+
+static inline int unpack_callback_array(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
+{
+    PyObject *p = u->use_list ? PyList_New(n) : PyTuple_New(n);
+
+    if (!p)
+        return -1;
+    *o = p;
+    return 0;
+}
+
+static inline int unpack_callback_array_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object o)
+{
+    if (u->use_list)
+        PyList_SET_ITEM(*c, current, o);
+    else
+        PyTuple_SET_ITEM(*c, current, o);
+    return 0;
+}
+
+static inline int unpack_callback_array_end(unpack_user* u, msgpack_unpack_object* c)
+{
+    if (u->list_hook) {
+        PyObject *new_c = PyObject_CallFunctionObjArgs(u->list_hook, *c, NULL);
+        if (!new_c)
+            return -1;
+        Py_DECREF(*c);
+        *c = new_c;
+    }
+    return 0;
+}
+
+static inline int unpack_callback_map(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
+{
+    PyObject *p;
+    if (u->has_pairs_hook) {
+        p = PyList_New(n); // Or use tuple?
+    }
+    else {
+        p = PyDict_New();
+    }
+    if (!p)
+        return -1;
+    *o = p;
+    return 0;
+}
+
+static inline int unpack_callback_map_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object k, msgpack_unpack_object v)
+{
+    if (u->has_pairs_hook) {
+        msgpack_unpack_object item = PyTuple_Pack(2, k, v);
+        if (!item)
+            return -1;
+        Py_DECREF(k);
+        Py_DECREF(v);
+        PyList_SET_ITEM(*c, current, item);
+        return 0;
+    }
+    else if (PyDict_SetItem(*c, k, v) == 0) {
+        Py_DECREF(k);
+        Py_DECREF(v);
+        return 0;
+    }
+    return -1;
+}
+
+static inline int unpack_callback_map_end(unpack_user* u, msgpack_unpack_object* c)
+{
+    if (u->object_hook) {
+        PyObject *new_c = PyObject_CallFunctionObjArgs(u->object_hook, *c, NULL);
+        if (!new_c)
+            return -1;
+
+        Py_DECREF(*c);
+        *c = new_c;
+    }
+    return 0;
+}
+
+static inline int unpack_callback_raw(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o)
+{
+    PyObject *py;
+    if(u->encoding) {
+        py = PyUnicode_Decode(p, l, u->encoding, u->unicode_errors);
+    } else {
+        py = PyBytes_FromStringAndSize(p, l);
+    }
+    if (!py)
+        return -1;
+    *o = py;
+    return 0;
+}
+
+static inline int unpack_callback_bin(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o)
+{
+    PyObject *py = PyBytes_FromStringAndSize(p, l);
+    if (!py)
+        return -1;
+    *o = py;
+    return 0;
+}
+
+static inline int unpack_callback_ext(unpack_user* u, const char* base, const char* pos,
+                                      unsigned int lenght, msgpack_unpack_object* o)
+{
+    PyObject *py;
+    int8_t typecode = (int8_t)*pos++;
+    if (!u->ext_hook) {
+        PyErr_SetString(PyExc_AssertionError, "u->ext_hook cannot be NULL");
+        return -1;
+    }
+    // length also includes the typecode, so the actual data is lenght-1
+#if PY_MAJOR_VERSION == 2
+    py = PyObject_CallFunction(u->ext_hook, "(is#)", typecode, pos, lenght-1);
+#else
+    py = PyObject_CallFunction(u->ext_hook, "(iy#)", typecode, pos, lenght-1);
+#endif
+    if (!py)
+        return -1;
+    *o = py;
+    return 0;
+}
+
+#include "unpack_template.h"

+ 95 - 0
utils/exporters/blender/modules/msgpack/unpack_define.h

@@ -0,0 +1,95 @@
+/*
+ * MessagePack unpacking routine template
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License");
+ *    you may not use this file except in compliance with the License.
+ *    You may obtain a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS,
+ *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *    See the License for the specific language governing permissions and
+ *    limitations under the License.
+ */
+#ifndef MSGPACK_UNPACK_DEFINE_H__
+#define MSGPACK_UNPACK_DEFINE_H__
+
+#include "msgpack/sysdep.h"
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifndef MSGPACK_EMBED_STACK_SIZE
+#define MSGPACK_EMBED_STACK_SIZE 32
+#endif
+
+
+// CS is first byte & 0x1f
+typedef enum {
+    CS_HEADER            = 0x00,  // nil
+
+    //CS_                = 0x01,
+    //CS_                = 0x02,  // false
+    //CS_                = 0x03,  // true
+
+    CS_BIN_8             = 0x04,
+    CS_BIN_16            = 0x05,
+    CS_BIN_32            = 0x06,
+
+    CS_EXT_8             = 0x07,
+    CS_EXT_16            = 0x08,
+    CS_EXT_32            = 0x09,
+
+    CS_FLOAT             = 0x0a,
+    CS_DOUBLE            = 0x0b,
+    CS_UINT_8            = 0x0c,
+    CS_UINT_16           = 0x0d,
+    CS_UINT_32           = 0x0e,
+    CS_UINT_64           = 0x0f,
+    CS_INT_8             = 0x10,
+    CS_INT_16            = 0x11,
+    CS_INT_32            = 0x12,
+    CS_INT_64            = 0x13,
+
+    //CS_FIXEXT1           = 0x14,
+    //CS_FIXEXT2           = 0x15,
+    //CS_FIXEXT4           = 0x16,
+    //CS_FIXEXT8           = 0x17,
+    //CS_FIXEXT16          = 0x18,
+
+    CS_RAW_8             = 0x19,
+    CS_RAW_16            = 0x1a,
+    CS_RAW_32            = 0x1b,
+    CS_ARRAY_16          = 0x1c,
+    CS_ARRAY_32          = 0x1d,
+    CS_MAP_16            = 0x1e,
+    CS_MAP_32            = 0x1f,
+
+    ACS_RAW_VALUE,
+    ACS_BIN_VALUE,
+    ACS_EXT_VALUE,
+} msgpack_unpack_state;
+
+
+typedef enum {
+    CT_ARRAY_ITEM,
+    CT_MAP_KEY,
+    CT_MAP_VALUE,
+} msgpack_container_type;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* msgpack/unpack_define.h */

+ 475 - 0
utils/exporters/blender/modules/msgpack/unpack_template.h

@@ -0,0 +1,475 @@
+/*
+ * MessagePack unpacking routine template
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License");
+ *    you may not use this file except in compliance with the License.
+ *    You may obtain a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS,
+ *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *    See the License for the specific language governing permissions and
+ *    limitations under the License.
+ */
+
+#ifndef USE_CASE_RANGE
+#if !defined(_MSC_VER)
+#define USE_CASE_RANGE
+#endif
+#endif
+
+typedef struct unpack_stack {
+    PyObject* obj;
+    size_t size;
+    size_t count;
+    unsigned int ct;
+    PyObject* map_key;
+} unpack_stack;
+
+struct unpack_context {
+    unpack_user user;
+    unsigned int cs;
+    unsigned int trail;
+    unsigned int top;
+    /*
+    unpack_stack* stack;
+    unsigned int stack_size;
+    unpack_stack embed_stack[MSGPACK_EMBED_STACK_SIZE];
+    */
+    unpack_stack stack[MSGPACK_EMBED_STACK_SIZE];
+};
+
+
+static inline void unpack_init(unpack_context* ctx)
+{
+    ctx->cs = CS_HEADER;
+    ctx->trail = 0;
+    ctx->top = 0;
+    /*
+    ctx->stack = ctx->embed_stack;
+    ctx->stack_size = MSGPACK_EMBED_STACK_SIZE;
+    */
+    ctx->stack[0].obj = unpack_callback_root(&ctx->user);
+}
+
+/*
+static inline void unpack_destroy(unpack_context* ctx)
+{
+    if(ctx->stack_size != MSGPACK_EMBED_STACK_SIZE) {
+        free(ctx->stack);
+    }
+}
+*/
+
+static inline PyObject* unpack_data(unpack_context* ctx)
+{
+    return (ctx)->stack[0].obj;
+}
+
+
+template <bool construct>
+static inline int unpack_execute(unpack_context* ctx, const char* data, size_t len, size_t* off)
+{
+    assert(len >= *off);
+
+    const unsigned char* p = (unsigned char*)data + *off;
+    const unsigned char* const pe = (unsigned char*)data + len;
+    const void* n = NULL;
+
+    unsigned int trail = ctx->trail;
+    unsigned int cs = ctx->cs;
+    unsigned int top = ctx->top;
+    unpack_stack* stack = ctx->stack;
+    /*
+    unsigned int stack_size = ctx->stack_size;
+    */
+    unpack_user* user = &ctx->user;
+
+    PyObject* obj;
+    unpack_stack* c = NULL;
+
+    int ret;
+
+#define construct_cb(name) \
+    construct && unpack_callback ## name
+
+#define push_simple_value(func) \
+    if(construct_cb(func)(user, &obj) < 0) { goto _failed; } \
+    goto _push
+#define push_fixed_value(func, arg) \
+    if(construct_cb(func)(user, arg, &obj) < 0) { goto _failed; } \
+    goto _push
+#define push_variable_value(func, base, pos, len) \
+    if(construct_cb(func)(user, \
+        (const char*)base, (const char*)pos, len, &obj) < 0) { goto _failed; } \
+    goto _push
+
+#define again_fixed_trail(_cs, trail_len) \
+    trail = trail_len; \
+    cs = _cs; \
+    goto _fixed_trail_again
+#define again_fixed_trail_if_zero(_cs, trail_len, ifzero) \
+    trail = trail_len; \
+    if(trail == 0) { goto ifzero; } \
+    cs = _cs; \
+    goto _fixed_trail_again
+
+#define start_container(func, count_, ct_) \
+    if(top >= MSGPACK_EMBED_STACK_SIZE) { goto _failed; } /* FIXME */ \
+    if(construct_cb(func)(user, count_, &stack[top].obj) < 0) { goto _failed; } \
+    if((count_) == 0) { obj = stack[top].obj; \
+        if (construct_cb(func##_end)(user, &obj) < 0) { goto _failed; } \
+        goto _push; } \
+    stack[top].ct = ct_; \
+    stack[top].size  = count_; \
+    stack[top].count = 0; \
+    ++top; \
+    /*printf("container %d count %d stack %d\n",stack[top].obj,count_,top);*/ \
+    /*printf("stack push %d\n", top);*/ \
+    /* FIXME \
+    if(top >= stack_size) { \
+        if(stack_size == MSGPACK_EMBED_STACK_SIZE) { \
+            size_t csize = sizeof(unpack_stack) * MSGPACK_EMBED_STACK_SIZE; \
+            size_t nsize = csize * 2; \
+            unpack_stack* tmp = (unpack_stack*)malloc(nsize); \
+            if(tmp == NULL) { goto _failed; } \
+            memcpy(tmp, ctx->stack, csize); \
+            ctx->stack = stack = tmp; \
+            ctx->stack_size = stack_size = MSGPACK_EMBED_STACK_SIZE * 2; \
+        } else { \
+            size_t nsize = sizeof(unpack_stack) * ctx->stack_size * 2; \
+            unpack_stack* tmp = (unpack_stack*)realloc(ctx->stack, nsize); \
+            if(tmp == NULL) { goto _failed; } \
+            ctx->stack = stack = tmp; \
+            ctx->stack_size = stack_size = stack_size * 2; \
+        } \
+    } \
+    */ \
+    goto _header_again
+
+#define NEXT_CS(p)  ((unsigned int)*p & 0x1f)
+
+#ifdef USE_CASE_RANGE
+#define SWITCH_RANGE_BEGIN     switch(*p) {
+#define SWITCH_RANGE(FROM, TO) case FROM ... TO:
+#define SWITCH_RANGE_DEFAULT   default:
+#define SWITCH_RANGE_END       }
+#else
+#define SWITCH_RANGE_BEGIN     { if(0) {
+#define SWITCH_RANGE(FROM, TO) } else if(FROM <= *p && *p <= TO) {
+#define SWITCH_RANGE_DEFAULT   } else {
+#define SWITCH_RANGE_END       } }
+#endif
+
+    if(p == pe) { goto _out; }
+    do {
+        switch(cs) {
+        case CS_HEADER:
+            SWITCH_RANGE_BEGIN
+            SWITCH_RANGE(0x00, 0x7f)  // Positive Fixnum
+                push_fixed_value(_uint8, *(uint8_t*)p);
+            SWITCH_RANGE(0xe0, 0xff)  // Negative Fixnum
+                push_fixed_value(_int8, *(int8_t*)p);
+            SWITCH_RANGE(0xc0, 0xdf)  // Variable
+                switch(*p) {
+                case 0xc0:  // nil
+                    push_simple_value(_nil);
+                //case 0xc1:  // never used
+                case 0xc2:  // false
+                    push_simple_value(_false);
+                case 0xc3:  // true
+                    push_simple_value(_true);
+                case 0xc4:  // bin 8
+                    again_fixed_trail(NEXT_CS(p), 1);
+                case 0xc5:  // bin 16
+                    again_fixed_trail(NEXT_CS(p), 2);
+                case 0xc6:  // bin 32
+                    again_fixed_trail(NEXT_CS(p), 4);
+                case 0xc7:  // ext 8
+                    again_fixed_trail(NEXT_CS(p), 1);
+                case 0xc8:  // ext 16
+                    again_fixed_trail(NEXT_CS(p), 2);
+                case 0xc9:  // ext 32
+                    again_fixed_trail(NEXT_CS(p), 4);
+                case 0xca:  // float
+                case 0xcb:  // double
+                case 0xcc:  // unsigned int  8
+                case 0xcd:  // unsigned int 16
+                case 0xce:  // unsigned int 32
+                case 0xcf:  // unsigned int 64
+                case 0xd0:  // signed int  8
+                case 0xd1:  // signed int 16
+                case 0xd2:  // signed int 32
+                case 0xd3:  // signed int 64
+                    again_fixed_trail(NEXT_CS(p), 1 << (((unsigned int)*p) & 0x03));
+                case 0xd4:  // fixext 1
+                case 0xd5:  // fixext 2
+                case 0xd6:  // fixext 4
+                case 0xd7:  // fixext 8
+                    again_fixed_trail_if_zero(ACS_EXT_VALUE, 
+                                              (1 << (((unsigned int)*p) & 0x03))+1,
+                                              _ext_zero);
+                case 0xd8:  // fixext 16
+                    again_fixed_trail_if_zero(ACS_EXT_VALUE, 16+1, _ext_zero);
+                case 0xd9:  // str 8
+                    again_fixed_trail(NEXT_CS(p), 1);
+                case 0xda:  // raw 16
+                case 0xdb:  // raw 32
+                case 0xdc:  // array 16
+                case 0xdd:  // array 32
+                case 0xde:  // map 16
+                case 0xdf:  // map 32
+                    again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01));
+                default:
+                    goto _failed;
+                }
+            SWITCH_RANGE(0xa0, 0xbf)  // FixRaw
+                again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero);
+            SWITCH_RANGE(0x90, 0x9f)  // FixArray
+                start_container(_array, ((unsigned int)*p) & 0x0f, CT_ARRAY_ITEM);
+            SWITCH_RANGE(0x80, 0x8f)  // FixMap
+                start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY);
+
+            SWITCH_RANGE_DEFAULT
+                goto _failed;
+            SWITCH_RANGE_END
+            // end CS_HEADER
+
+
+        _fixed_trail_again:
+            ++p;
+
+        default:
+            if((size_t)(pe - p) < trail) { goto _out; }
+            n = p;  p += trail - 1;
+            switch(cs) {
+            case CS_EXT_8:
+                again_fixed_trail_if_zero(ACS_EXT_VALUE, *(uint8_t*)n+1, _ext_zero);
+            case CS_EXT_16:
+                again_fixed_trail_if_zero(ACS_EXT_VALUE,
+                                          _msgpack_load16(uint16_t,n)+1,
+                                          _ext_zero);
+            case CS_EXT_32:
+                again_fixed_trail_if_zero(ACS_EXT_VALUE,
+                                          _msgpack_load32(uint32_t,n)+1,
+                                          _ext_zero);
+            case CS_FLOAT: {
+                    union { uint32_t i; float f; } mem;
+                    mem.i = _msgpack_load32(uint32_t,n);
+                    push_fixed_value(_float, mem.f); }
+            case CS_DOUBLE: {
+                    union { uint64_t i; double f; } mem;
+                    mem.i = _msgpack_load64(uint64_t,n);
+#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi
+                    // https://github.com/msgpack/msgpack-perl/pull/1
+                    mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL);
+#endif
+                    push_fixed_value(_double, mem.f); }
+            case CS_UINT_8:
+                push_fixed_value(_uint8, *(uint8_t*)n);
+            case CS_UINT_16:
+                push_fixed_value(_uint16, _msgpack_load16(uint16_t,n));
+            case CS_UINT_32:
+                push_fixed_value(_uint32, _msgpack_load32(uint32_t,n));
+            case CS_UINT_64:
+                push_fixed_value(_uint64, _msgpack_load64(uint64_t,n));
+
+            case CS_INT_8:
+                push_fixed_value(_int8, *(int8_t*)n);
+            case CS_INT_16:
+                push_fixed_value(_int16, _msgpack_load16(int16_t,n));
+            case CS_INT_32:
+                push_fixed_value(_int32, _msgpack_load32(int32_t,n));
+            case CS_INT_64:
+                push_fixed_value(_int64, _msgpack_load64(int64_t,n));
+
+            case CS_BIN_8:
+                again_fixed_trail_if_zero(ACS_BIN_VALUE, *(uint8_t*)n, _bin_zero);
+            case CS_BIN_16:
+                again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load16(uint16_t,n), _bin_zero);
+            case CS_BIN_32:
+                again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load32(uint32_t,n), _bin_zero);
+            case ACS_BIN_VALUE:
+            _bin_zero:
+                push_variable_value(_bin, data, n, trail);
+
+            case CS_RAW_8:
+                again_fixed_trail_if_zero(ACS_RAW_VALUE, *(uint8_t*)n, _raw_zero);
+            case CS_RAW_16:
+                again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load16(uint16_t,n), _raw_zero);
+            case CS_RAW_32:
+                again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load32(uint32_t,n), _raw_zero);
+            case ACS_RAW_VALUE:
+            _raw_zero:
+                push_variable_value(_raw, data, n, trail);
+
+            case ACS_EXT_VALUE:
+            _ext_zero:
+                push_variable_value(_ext, data, n, trail);
+
+            case CS_ARRAY_16:
+                start_container(_array, _msgpack_load16(uint16_t,n), CT_ARRAY_ITEM);
+            case CS_ARRAY_32:
+                /* FIXME security guard */
+                start_container(_array, _msgpack_load32(uint32_t,n), CT_ARRAY_ITEM);
+
+            case CS_MAP_16:
+                start_container(_map, _msgpack_load16(uint16_t,n), CT_MAP_KEY);
+            case CS_MAP_32:
+                /* FIXME security guard */
+                start_container(_map, _msgpack_load32(uint32_t,n), CT_MAP_KEY);
+
+            default:
+                goto _failed;
+            }
+        }
+
+_push:
+    if(top == 0) { goto _finish; }
+    c = &stack[top-1];
+    switch(c->ct) {
+    case CT_ARRAY_ITEM:
+        if(construct_cb(_array_item)(user, c->count, &c->obj, obj) < 0) { goto _failed; }
+        if(++c->count == c->size) {
+            obj = c->obj;
+            if (construct_cb(_array_end)(user, &obj) < 0) { goto _failed; }
+            --top;
+            /*printf("stack pop %d\n", top);*/
+            goto _push;
+        }
+        goto _header_again;
+    case CT_MAP_KEY:
+        c->map_key = obj;
+        c->ct = CT_MAP_VALUE;
+        goto _header_again;
+    case CT_MAP_VALUE:
+        if(construct_cb(_map_item)(user, c->count, &c->obj, c->map_key, obj) < 0) { goto _failed; }
+        if(++c->count == c->size) {
+            obj = c->obj;
+            if (construct_cb(_map_end)(user, &obj) < 0) { goto _failed; }
+            --top;
+            /*printf("stack pop %d\n", top);*/
+            goto _push;
+        }
+        c->ct = CT_MAP_KEY;
+        goto _header_again;
+
+    default:
+        goto _failed;
+    }
+
+_header_again:
+        cs = CS_HEADER;
+        ++p;
+    } while(p != pe);
+    goto _out;
+
+
+_finish:
+    if (!construct)
+        unpack_callback_nil(user, &obj);
+    stack[0].obj = obj;
+    ++p;
+    ret = 1;
+    /*printf("-- finish --\n"); */
+    goto _end;
+
+_failed:
+    /*printf("** FAILED **\n"); */
+    ret = -1;
+    goto _end;
+
+_out:
+    ret = 0;
+    goto _end;
+
+_end:
+    ctx->cs = cs;
+    ctx->trail = trail;
+    ctx->top = top;
+    *off = p - (const unsigned char*)data;
+
+    return ret;
+#undef construct_cb
+}
+
+#undef SWITCH_RANGE_BEGIN
+#undef SWITCH_RANGE
+#undef SWITCH_RANGE_DEFAULT
+#undef SWITCH_RANGE_END
+#undef push_simple_value
+#undef push_fixed_value
+#undef push_variable_value
+#undef again_fixed_trail
+#undef again_fixed_trail_if_zero
+#undef start_container
+
+template <unsigned int fixed_offset, unsigned int var_offset>
+static inline int unpack_container_header(unpack_context* ctx, const char* data, size_t len, size_t* off)
+{
+    assert(len >= *off);
+    uint32_t size;
+    const unsigned char *const p = (unsigned char*)data + *off;
+
+#define inc_offset(inc) \
+    if (len - *off < inc) \
+        return 0; \
+    *off += inc;
+
+    switch (*p) {
+    case var_offset:
+        inc_offset(3);
+        size = _msgpack_load16(uint16_t, p + 1);
+        break;
+    case var_offset + 1:
+        inc_offset(5);
+        size = _msgpack_load32(uint32_t, p + 1);
+        break;
+#ifdef USE_CASE_RANGE
+    case fixed_offset + 0x0 ... fixed_offset + 0xf:
+#else
+    case fixed_offset + 0x0:
+    case fixed_offset + 0x1:
+    case fixed_offset + 0x2:
+    case fixed_offset + 0x3:
+    case fixed_offset + 0x4:
+    case fixed_offset + 0x5:
+    case fixed_offset + 0x6:
+    case fixed_offset + 0x7:
+    case fixed_offset + 0x8:
+    case fixed_offset + 0x9:
+    case fixed_offset + 0xa:
+    case fixed_offset + 0xb:
+    case fixed_offset + 0xc:
+    case fixed_offset + 0xd:
+    case fixed_offset + 0xe:
+    case fixed_offset + 0xf:
+#endif
+        ++*off;
+        size = ((unsigned int)*p) & 0x0f;
+        break;
+    default:
+        PyErr_SetString(PyExc_ValueError, "Unexpected type header on stream");
+        return -1;
+    }
+    unpack_callback_uint32(&ctx->user, size, &ctx->stack[0].obj);
+    return 1;
+}
+
+#undef SWITCH_RANGE_BEGIN
+#undef SWITCH_RANGE
+#undef SWITCH_RANGE_DEFAULT
+#undef SWITCH_RANGE_END
+
+static const execute_fn unpack_construct = &unpack_execute<true>;
+static const execute_fn unpack_skip = &unpack_execute<false>;
+static const execute_fn read_array_header = &unpack_container_header<0x90, 0xdc>;
+static const execute_fn read_map_header = &unpack_container_header<0x80, 0xde>;
+
+#undef NEXT_CS
+
+/* vim: set ts=4 sw=4 sts=4 expandtab  */

+ 19 - 0
utils/exporters/blender/tests/README.md

@@ -0,0 +1,19 @@
+# Running tests
+In order to use the test scripts you must have your shell setup to execute Blender from the command line using `$ blender`. This either done by setting up your own wrapper scripts or by symlinking /usr/bin/blender directly to $BLENDER_ROOT/blender. 
+
+## OS X
+Make sure your do not point to blender.app as it will not pass the arguments corrently. It is required to execute on $BLENDER_ROOT/blender.app/Contents/MacOS/blender in order for the tests to function correctly.
+
+# Testing
+Each test script focuses on a specific context and feature of the exporter. 
+
+## Context
+Context determines whether an entire scene is being exported or a single mesh node.
+
+## Features
+Features should be tested separately (whenever possible), example: animations should be tested separately from bump maps.
+
+## Review
+When a test is executed a new root directory, if it doesn't already exist, is created at three.js/utils/exporters/blender/tests/review. Inside will contain subdirectories of each test (named the same as the script but with the `test_` prefix removed. The test directory will contain the exported JSON file(s), index.html, and textures (if textures are being tested). The index.html is already setup to source the required libraries and load the JSON file. There is nothing else that a user should need to do in order to test their export.
+
+The review directory has been added to the .gitignore and will not be included when committing changes.

BIN
utils/exporters/blender/tests/blend/anim.blend


BIN
utils/exporters/blender/tests/blend/cubeA.blend


BIN
utils/exporters/blender/tests/blend/cubeB.blend


BIN
utils/exporters/blender/tests/blend/cubeC.blend


BIN
utils/exporters/blender/tests/blend/light_setup.blend


BIN
utils/exporters/blender/tests/blend/lightmap.blend


BIN
utils/exporters/blender/tests/blend/persp_camera.blend


BIN
utils/exporters/blender/tests/blend/planeA.blend


BIN
utils/exporters/blender/tests/blend/planeB.blend


BIN
utils/exporters/blender/tests/blend/scene_area_light.blend


BIN
utils/exporters/blender/tests/blend/scene_directional_light.blend


BIN
utils/exporters/blender/tests/blend/scene_hemi_light.blend


BIN
utils/exporters/blender/tests/blend/scene_instancing.blend


BIN
utils/exporters/blender/tests/blend/scene_maps.blend


BIN
utils/exporters/blender/tests/blend/scene_orthographic_camera.blend


BIN
utils/exporters/blender/tests/blend/scene_perspective_camera.blend


BIN
utils/exporters/blender/tests/blend/scene_point_light.blend


BIN
utils/exporters/blender/tests/blend/scene_spot_light.blend


BIN
utils/exporters/blender/tests/blend/textures/cloud.png


BIN
utils/exporters/blender/tests/blend/textures/lightmap.png


BIN
utils/exporters/blender/tests/blend/textures/normal.png


BIN
utils/exporters/blender/tests/blend/textures/uv_grid.jpg


BIN
utils/exporters/blender/tests/blend/three_point.blend


BIN
utils/exporters/blender/tests/blend/torusA.blend


+ 13 - 0
utils/exporters/blender/tests/scripts/css/style.css

@@ -0,0 +1,13 @@
+body {
+  margin: 0px;
+  padding: 0px;
+  overflow: hidden;
+}
+
+#viewport {
+    position: absolute;
+    width: 100%;
+    height: 100%;
+    background: #1b1c1e;
+    background-image: linear-gradient(#7d8fa3, #1b1c1e);
+}

+ 37 - 0
utils/exporters/blender/tests/scripts/exporter.py

@@ -0,0 +1,37 @@
+import os
+import argparse
+import sys
+import io_three
+from io_three.exporter import constants
+
+
+try:
+    separator = sys.argv.index('--')
+except IndexError:
+    print('ERROR: no parameters specified')
+    sys.exit(1)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('filepath')
+    for key, value in constants.EXPORT_OPTIONS.items():
+        if not isinstance(value, bool):
+            kwargs = {'type': type(value), 'default': value}
+        else:
+            kwargs = {'action':'store_true'}
+        parser.add_argument('--%s' % key, **kwargs)
+
+    return vars(parser.parse_args(sys.argv[separator+1:]))
+
+
+def main():
+    args = parse_args()
+    if args[constants.SCENE]:
+        io_three.exporter.export_scene(args['filepath'], args)
+    else:
+        io_three.exporter.export_geometry(args['filepath'], args)
+
+
+if __name__ == '__main__':
+    main()

+ 249 - 0
utils/exporters/blender/tests/scripts/js/review.js

@@ -0,0 +1,249 @@
+var scene, renderer, camera, container, animation;
+var hasMorph = false;
+var prevTime = Date.now();
+var clock = new THREE.Clock();
+
+function render() {
+        
+    renderer.render( scene, camera );
+ 
+    if ( hasMorph ) {
+
+        var time = Date.now();
+
+        animation.update( time - prevTime );
+
+        prevTime = time;
+
+    }     
+}
+
+function animate() {
+
+    requestAnimationFrame( animate );
+
+    if ( animation !== null ) {
+
+        var delta = clock.getDelta();
+        THREE.AnimationHandler.update( delta );
+
+    }
+
+    render();
+
+}
+
+function onWindowResize() {
+
+    camera.aspect = container.offsetWidth / container.offsetHeight;
+    camera.updateProjectionMatrix();
+
+    renderer.setSize( container.offsetWidth, container.offsetHeight );
+
+    render();
+
+}
+
+function setupScene( result, data ) {
+
+    scene = new THREE.Scene();
+    scene.add( new THREE.GridHelper( 10, 2.5 ) );
+
+}
+
+function setupLights() {
+
+    var directionalLight = new THREE.DirectionalLight( 0xb8b8b8 );
+    directionalLight.position.set(1, 1, 1).normalize();
+    directionalLight.intensity = 1.0;
+    scene.add( directionalLight );
+    
+    directionalLight = new THREE.DirectionalLight( 0xb8b8b8 );
+    directionalLight.position.set(-1, 0.6, 0.5).normalize();
+    directionalLight.intensity = 0.5;
+    scene.add(directionalLight);
+
+    directionalLight = new THREE.DirectionalLight();
+    directionalLight.position.set(-0.3, 0.6, -0.8).normalize( 0xb8b8b8 );
+    directionalLight.intensity = 0.45;
+    scene.add(directionalLight);
+
+}
+
+function loadObject( data ) {
+
+    var loader = new THREE.ObjectLoader();
+    scene = loader.parse( data );
+
+
+    var hasLights = false;
+
+    var lights = ['AmbientLight', 'DirectionalLight', 'AreaLight',
+        'PointLight', 'SpotLight', 'HemisphereLight']
+
+    for ( i = 0; i < data.object.children.length; i ++ ) {
+
+        var index = lights.indexOf( data.object.children[ i ].type );
+
+        if ( index > -1 ) {
+
+            hasLights = true;
+            break;
+
+        }
+
+    }
+
+    if ( ! ( hasLights ) ) setupLights();
+
+    scene.add( new THREE.GridHelper( 10, 2.5 ) );
+
+    render();
+
+}
+
+function loadGeometry( data, url ) {
+
+    var loader = new THREE.JSONLoader();
+    var texturePath = loader.extractUrlBase( url );
+    data = loader.parse( data, texturePath );
+
+    if ( data.materials === undefined ) {
+    
+        console.log('using default material');
+        data.materials = [new THREE.MeshLambertMaterial( { color: 0xb8b8b8 } )];
+    
+    }
+
+    var material = new THREE.MeshFaceMaterial( data.materials ); 
+    var mesh;
+
+    if ( data.geometry.animation !== undefined ) {
+
+        console.log( 'loading animation' );
+        data.materials[ 0 ].skinning = true;
+        mesh = new THREE.SkinnedMesh( data.geometry, material, false);
+
+        var name = data.geometry.animation.name;
+        animation = new THREE.Animation( mesh, data.geometry.animation );
+
+    } else {
+
+        mesh = new THREE.Mesh( data.geometry, material );
+
+        if ( data.geometry.morphTargets.length > 0 ) {
+
+            console.log( 'loading morph targets' );
+            data.materials[ 0 ].morphTargets = true;
+            animation = new THREE.MorphAnimation( mesh );
+            hasMorph = true;
+
+        }
+
+    }
+
+    setupScene();
+    setupLights();
+    scene.add( mesh );
+
+    if ( animation != null ) {
+
+        console.log( 'playing animation' );
+        animation.play();
+        animate();
+
+    } else {
+
+        render();
+
+    }
+}
+
+function loadBufferGeometry( data ) {
+
+    var loader = new THREE.BufferGeometryLoader();
+
+    var bufferGeometry = loader.parse( data );
+
+    var material = new THREE.MeshLambertMaterial( { color: 0xb8b8b8 } );
+    var mesh = new THREE.Mesh( bufferGeometry, material );
+    setupScene();
+    setupLights();
+    scene.add( mesh );
+
+    render();
+
+}
+
+function loadData( data, url ) {
+
+    if ( data.metadata.type === 'Geometry' ) {
+        
+        loadGeometry( data, url );
+    
+    } else if ( data.metadata.type === 'Object' ) {
+    
+        loadObject( data );
+
+    } else if ( data.metadata.type === 'BufferGeometry' ) {
+
+        loadBufferGeometry( data );
+
+    } else {
+
+        console.warn( 'can not determine type' );
+
+    }
+
+}
+
+function init( url ) {
+
+    container = document.createElement( 'div' );
+    container.id = 'viewport';
+    document.body.appendChild( container );
+
+    renderer = new THREE.WebGLRenderer( { antialias: true, alpha: true  } );
+    renderer.setSize( container.offsetWidth, container.offsetHeight );
+    renderer.setClearColor( 0x000000, 0 );
+    container.appendChild( renderer.domElement );
+    renderer.gammaInput = true;
+    renderer.gammaOutput = true;
+    
+    var aspect = container.offsetWidth / container.offsetHeight;
+    camera = new THREE.PerspectiveCamera( 50, aspect, 0.01, 50 );
+    orbit = new THREE.OrbitControls( camera, container );
+    orbit.addEventListener( 'change', render );
+    camera.position.z = 5;
+    camera.position.x = 5;
+    camera.position.y = 5;
+    var target = new THREE.Vector3( 0, 1, 0 );
+    camera.lookAt( target );
+    orbit.target = target;
+    camera.updateProjectionMatrix();
+
+    window.addEventListener( 'resize', onWindowResize, false );
+
+	var xhr = new XMLHttpRequest();
+    xhr.onreadystatechange = function ( x ) {
+    
+        if ( xhr.readyState === xhr.DONE ) {
+
+            if ( xhr.status === 200 || xhr.status === 0  ) {
+
+                loadData( JSON.parse( xhr.responseText ), url );
+
+            } else {
+
+                console.error( 'could not load json ' + xhr.status );
+
+            }
+
+        } 
+    
+    };
+    xhr.open( 'GET', url, true );
+    xhr.withCredentials = false;
+    xhr.send( null );
+
+}

+ 127 - 0
utils/exporters/blender/tests/scripts/review.py

@@ -0,0 +1,127 @@
+import os
+import json
+import stat
+import shutil
+import argparse
+
+
+os.chdir(os.path.dirname(os.path.realpath(__file__)))
+os.chdir('..')
+review = os.path.join(os.getcwd(), 'review')
+
+MASK = stat.S_IRWXU|stat.S_IRGRP|stat.S_IXGRP|stat.S_IROTH|stat.S_IXOTH
+
+HTML = '''<!doctype html>
+<html lang='en'>
+  <head>
+    <title>%(title)s</title>
+    <meta charset='utf-8'>
+    <script src='../../../../../../build/three.min.js'></script>
+    <script src='../../../../../../examples/js/controls/OrbitControls.js'></script>
+    <script src='../../scripts/js/review.js'></script>
+    <link href='../../scripts/css/style.css' rel='stylesheet' />
+  </head>
+  <body>
+    <script>
+      init('%(filename)s');
+    </script>
+  </body>
+</html>
+'''
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('json')
+    parser.add_argument('-t', '--tag', required=True)
+    return vars(parser.parse_args())
+
+
+def copy_for_review(tmp_json, tag):
+    tag_dir = os.path.join(review, tag)
+    if not os.path.exists(tag_dir):
+        print('making %s' % tag_dir)
+        os.makedirs(tag_dir)
+    dst_json = os.path.join(tag_dir, '%s.json' % tag)
+    print('moving %s > %s' % (tmp_json, dst_json))
+    shutil.move(tmp_json, dst_json)
+    create_template(tag_dir, os.path.basename(dst_json))
+
+    print('looking for maps...')
+    with open(dst_json) as stream:
+        data = json.load(stream)
+
+    textures = []
+    materials = data.get('materials')
+    if data['metadata']['type'] == 'Geometry' and materials:
+        textures.extend(_parse_geometry_materials(materials))
+
+    images = data.get('images')
+    if data['metadata']['type'] == 'Object' and images:
+        for each in images:
+            textures.append(each['url'])
+    
+    textures = list(set(textures))
+    print('found %d maps' % len(textures))
+    dir_tmp = os.path.dirname(tmp_json)
+    for texture in textures:
+        texture = os.path.join(dir_tmp, texture)
+        dst = os.path.join(tag_dir, os.path.basename(texture))
+        shutil.move(texture, dst)
+        print('moving %s > %s' % (texture, dst))
+
+    if data['metadata']['type'] == 'Object':
+        print('looking for non-embedded geometry')
+        for geometry in data['geometries']:
+            url = geometry.get('url')
+            if not url: continue
+            src = os.path.join(dir_tmp, url)
+            dst = os.path.join(tag_dir, url)
+            print('moving %s > %s' % (src, dst))
+            shutil.move(src, dst)
+    elif data['metadata']['type'] == 'Geometry':
+        print('looking for external animation files')
+        for key in ('animation', 'morphTargets'):
+            try:
+                value = data[key]
+            except KeyError:
+                continue
+
+            if not isinstance(value, str):
+                continue
+
+            src = os.path.join(dir_tmp, value)
+            dst = os.path.join(tag_dir, value)
+            print('moving %s > %s' % (src, dst))
+            shutil.move(src, dst)
+            
+
+def _parse_geometry_materials(materials):
+    maps = ('mapDiffuse', 'mapSpecular', 'mapBump',
+        'mapLight', 'mapNormal')
+    textures = []
+    for material in materials:
+        for key in material.keys():
+            if key in maps:
+                textures.append(material[key])
+    return textures
+
+
+def create_template(tag_dir, filename):
+    html = HTML % {
+        'title': filename[:-5].title(),
+        'filename': filename
+    }
+
+    html_path = os.path.join(tag_dir, 'index.html')
+    with open(html_path, 'w') as stream:
+        stream.write(html)
+    os.chmod(html_path, MASK)
+
+
+def main():
+    args = parse_args()
+    copy_for_review(args['json'], args['tag'])
+
+
+if __name__ == '__main__':
+    main()

+ 29 - 0
utils/exporters/blender/tests/scripts/setup_test_env.bash

@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# you must have blender setup to run from the command line
+command -v blender >/dev/null 2>&1 || { echo >&2 "Blender is not accessible from the command line. Aborting."; exit 1; }
+
+export JSON=`python -c "import tempfile;print(tempfile.mktemp(prefix='$TAG.', suffix='.json'))"`
+
+export BLENDER_USER_SCRIPTS=$(cd "$DIR/../../"; pwd)
+
+# set the root for blend files
+export BLEND=$(cd "$DIR/../blend"; pwd)
+
+# set the python script to exec in batch
+export PYSCRIPT="$DIR/exporter.py"
+
+function makereview() {
+    if [ ! -f "$JSON" ]; then
+        echo "no json, export error suspected"
+        exit 1
+    fi
+    python3 "$DIR/review.py" $JSON $@
+}
+
+function tagname() {
+    tag=`basename $0`
+    tag=${tag#test_}
+    tag=${tag%%.*}
+    echo $tag
+}

+ 8 - 0
utils/exporters/blender/tests/scripts/test_buffer_geometry.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/cubeA.blend --python $PYSCRIPT -- \
+    $JSON --vertices --normals --geometryType BufferGeometry
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/cubeA.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_geometry_animation.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/anim.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --animation --bones --skinning \
+    --embedAnimation
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry_bump_spec_maps.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/planeA.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --faceMaterials --uvs --maps --copyTextures
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry_diffuse_map.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/cubeA.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --faceMaterials --uvs --maps --copyTextures
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry_lambert_material.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/cubeA.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --faceMaterials
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry_light_map.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/lightmap.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --faceMaterials --uvs --maps --copyTextures
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry_mix_colors.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/cubeB.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --colors --faceMaterials --mixColors
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry_morph_targets.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/anim.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --morphTargets --embedAnimation
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_geometry_normal_map.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/planeB.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --faceMaterials --uvs --maps --normals \
+    --copyTextures
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry_normals.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/torusA.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --normals
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry_phong_material.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/torusA.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --normals --faceMaterials
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry_vertex_colors.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/cubeB.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --colors --faceMaterials
+makereview $@ --tag $(tagname)

+ 8 - 0
utils/exporters/blender/tests/scripts/test_geometry_wireframe.bash

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/cubeC.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --faceMaterials
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_scene_area_light.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/scene_area_light.blend \
+    --python $PYSCRIPT -- $JSON --vertices --faces --scene \
+    --lights --materials --embedGeometry
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_scene_buffer_geometry.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/cubeA.blend --python $PYSCRIPT -- \
+    $JSON --vertices --normals --geometryType BufferGeometry \
+    --scene --materials --embedGeometry 
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_scene_buffer_geometry_noembed.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/cubeA.blend --python $PYSCRIPT -- \
+    $JSON --vertices --normals --geometryType BufferGeometry \
+    --scene --materials
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_scene_directional_light.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/scene_directional_light.blend \
+    --python $PYSCRIPT -- $JSON --vertices --faces --scene \
+    --lights --materials --embedGeometry
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_scene_hemi_light.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/scene_hemi_light.blend \
+    --python $PYSCRIPT -- $JSON --vertices --faces --scene \
+    --lights --materials --embedGeometry
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_scene_instancing.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/scene_instancing.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --scene --materials --enablePrecision \
+    --precision 4 --embedGeometry
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_scene_maps.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/scene_maps.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --scene --materials --maps \
+    --uvs --embedGeometry --copyTextures
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_scene_no_embed.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/scene_instancing.blend --python $PYSCRIPT -- \
+    $JSON --vertices --faces --scene --materials --enablePrecision \
+    --precision 4
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_scene_orthographic.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/scene_orthographic_camera.blend \
+    --python $PYSCRIPT -- $JSON --vertices --faces --scene \
+    --cameras --materials --embedGeometry
+makereview $@ --tag $(tagname)

+ 9 - 0
utils/exporters/blender/tests/scripts/test_scene_perspective.bash

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+source "$DIR/setup_test_env.bash"
+
+blender --background $BLEND/scene_perspective_camera.blend \
+    --python $PYSCRIPT -- $JSON --vertices --faces --scene \
+    --cameras --materials --embedGeometry
+makereview $@ --tag $(tagname)

Some files were not shown because too many files changed in this diff