فهرست منبع

Removed CTM, FBX, msgpack and UTF8 converters.

Mr.doob 7 سال پیش
والد
کامیت
baf0d97c5b
35فایلهای تغییر یافته به همراه0 افزوده شده و 8536 حذف شده
  1. 0 20
      utils/converters/ctm/LICENSE.txt
  2. BIN
      utils/converters/ctm/ctmconv.exe
  3. 0 114
      utils/converters/ctm/join_ctm.py
  4. BIN
      utils/converters/ctm/openctm.dll
  5. 0 21
      utils/converters/fbx/LICENSE
  6. 0 74
      utils/converters/fbx/README.md
  7. 0 2188
      utils/converters/fbx/convert_to_threejs.py
  8. 0 56
      utils/converters/msgpack/json2msgpack.py
  9. 0 54
      utils/converters/msgpack/msgpack/__init__.py
  10. 0 295
      utils/converters/msgpack/msgpack/_packer.pyx
  11. 0 426
      utils/converters/msgpack/msgpack/_unpacker.pyx
  12. 0 1
      utils/converters/msgpack/msgpack/_version.py
  13. 0 29
      utils/converters/msgpack/msgpack/exceptions.py
  14. 0 714
      utils/converters/msgpack/msgpack/fallback.py
  15. 0 103
      utils/converters/msgpack/msgpack/pack.h
  16. 0 785
      utils/converters/msgpack/msgpack/pack_template.h
  17. 0 194
      utils/converters/msgpack/msgpack/sysdep.h
  18. 0 263
      utils/converters/msgpack/msgpack/unpack.h
  19. 0 95
      utils/converters/msgpack/msgpack/unpack_define.h
  20. 0 475
      utils/converters/msgpack/msgpack/unpack_template.h
  21. 0 3
      utils/converters/utf8/build.bat
  22. BIN
      utils/converters/utf8/obj2utf8.exe
  23. BIN
      utils/converters/utf8/obj2utf8x.exe
  24. BIN
      utils/converters/utf8/objcompress.exe
  25. 0 20
      utils/converters/utf8/src/README
  26. 0 188
      utils/converters/utf8/src/base.h
  27. 0 123
      utils/converters/utf8/src/bounds.h
  28. 0 539
      utils/converters/utf8/src/compress.h
  29. 0 712
      utils/converters/utf8/src/mesh.h
  30. 0 135
      utils/converters/utf8/src/obj2utf8.cc
  31. 0 138
      utils/converters/utf8/src/obj2utf8x.cc
  32. 0 165
      utils/converters/utf8/src/objcompress.cc
  33. 0 273
      utils/converters/utf8/src/optimize.h
  34. 0 272
      utils/converters/utf8/src/stream.h
  35. 0 61
      utils/converters/utf8/src/utf8.h

+ 0 - 20
utils/converters/ctm/LICENSE.txt

@@ -1,20 +0,0 @@
-Copyright (c) 2009-2010 Marcus Geelnard
-
-This software is provided 'as-is', without any express or implied
-warranty. In no event will the authors be held liable for any damages
-arising from the use of this software.
-
-Permission is granted to anyone to use this software for any purpose,
-including commercial applications, and to alter it and redistribute it
-freely, subject to the following restrictions:
-
-    1. The origin of this software must not be misrepresented; you must not
-    claim that you wrote the original software. If you use this software
-    in a product, an acknowledgment in the product documentation would be
-    appreciated but is not required.
-
-    2. Altered source versions must be plainly marked as such, and must not
-    be misrepresented as being the original software.
-
-    3. This notice may not be removed or altered from any source
-    distribution.

BIN
utils/converters/ctm/ctmconv.exe


+ 0 - 114
utils/converters/ctm/join_ctm.py

@@ -1,114 +0,0 @@
-"""Join multiple binary files into single file and generate JSON snippet with offsets
-
--------------------------------------
-How to use
--------------------------------------
-
-python join_ctm.py -i "part_*.ctm" -o joined.ctm [-j offsets.js]
-
-Will read multiple files following wildcard pattern (ordered lexicographically):
-
-part_000.ctm
-part_001.ctm
-part_002.ctm
-
-...
-
-part_XXX.ctm
-
-And generate single concatenated files:
-
-joined.ctm
-offsets.js (optional, offsets are also dumped to standard output)
-
-"""
-
-import getopt
-import glob
-import sys
-import os
-
-# #####################################################
-# Templates
-# #####################################################
-TEMPLATE_JSON = u"""\
-"offsets": [ %(offsets)s ],
-"""
-
-# #############################################################################
-# Helpers
-# #############################################################################
-def usage():
-    print 'Usage: %s -i "filename_*.ctm" -o filename.ctm [-j offsets.js]' % os.path.basename(sys.argv[0])
-
-# #####################################################
-# Main
-# #####################################################
-if __name__ == "__main__":
-
-    # get parameters from the command line
-
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "hi:o:j:", ["help", "input=", "output=", "json="])
-
-    except getopt.GetoptError:
-        usage()
-        sys.exit(2)
-
-
-    inpattern = ""
-    outname = ""
-    jsonname = ""
-
-    for o, a in opts:
-        if o in ("-h", "--help"):
-            usage()
-            sys.exit()
-
-        elif o in ("-i", "--input"):
-            inpattern = a
-
-        elif o in ("-o", "--output"):
-            outname = a
-
-        elif o in ("-j", "--json"):
-            jsonname = a
-
-    # quit if required parameters are missing
-
-    if inpattern == "" or outname == "":
-        usage()
-        sys.exit(2)
-
-    outfile = open(outname, "wb")
-
-    matches = glob.glob(inpattern)
-    matches.sort()
-
-    total = 0
-    offsets = []
-
-    for filename in matches:
-        filesize = os.path.getsize(filename)
-        offsets.append(total)
-        total += filesize
-
-        print filename, filesize
-
-        infile = open(filename, "rb")
-        buffer = infile.read()
-        outfile.write(buffer)
-        infile.close()
-
-    outfile.close()
-
-    json_str = TEMPLATE_JSON % {
-    "offsets" : ", ".join(["%d" % o for o in offsets])
-    }
-
-    print json_str
-
-    if jsonname:
-        jsonfile = open(jsonname, "w")
-        jsonfile.write(json_str)
-        jsonfile.close()

BIN
utils/converters/ctm/openctm.dll


+ 0 - 21
utils/converters/fbx/LICENSE

@@ -1,21 +0,0 @@
-The MIT License
-
-Copyright (c) 2012 convert-to-three.py authors.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

+ 0 - 74
utils/converters/fbx/README.md

@@ -1,74 +0,0 @@
-## convert-to-threejs
-
-Utility for converting model files to the Three.js JSON format
-
-## Supported Formats
-
-* Fbx (.fbx)
-* Collada (.dae) 
-* Wavefront/Alias (.obj)
-* 3D Studio Max (.3ds)
-
-## Usage 
-
-```
-convert_to_threejs.py [source_file] [output_file] [options]
-
-Options:
-  -t, --triangulate       force non-triangle geometry into triangles
-  -x, --ignore-textures   don't include texture references in output file
-  -u, --force-prefix      prefix all object names in output file to ensure uniqueness
-  -f, --flatten-scene     merge all geometries and apply node transforms
-  -c, --add-camera        include default camera in output scene
-  -l, --add-light         include default light in output scene
-  -p, --pretty-print      prefix all object names in output file
-```
-
-## Current Limitations
-
-* No animation support
-* Only Lambert and Phong materials are supported
-* Some camera properties are not converted correctly
-* Some light properties are not converted correctly
-* Some material properties are not converted correctly
-* Textures must be put in asset's folder, and use relative path in the material
-
-## Dependencies
-
-### FBX SDK
-* Requires Autodesk FBX SDK Python 2013.3 bindings. 
-
-```
-You can download the python bindings from the Autodesk website: 
-  http://usa.autodesk.com/fbx/
-```
-
-```
-Don't forget the visit the FBX SDK documentation website:
-  http://docs.autodesk.com/FBX/2013/ENU/FBX-SDK-Documentation/cpp_ref/index.html
-```
-
-*Note:* If you use the OSX installer, it will install the Python packages into the following folder.
-
-```
-/Applications/Autodesk/FBX Python SDK/[VERSION]/lib/
-```
-
-If the tool still can't find the FBX SDK, you may need to copy the `fbx.so`, `FbxCommon.py` and `sip.so` files into your site_packages folder. 
-
-If you don't know your site_packages folder, run `python` from shell and paste this:
-
-```py
-import site; site.getsitepackages()
-```
-
-### Python
-* Requires Python 2.6, 2.7 or 3.1 (The FBX SDK requires one of these versions)
-
-``` bash
-sudo apt-get install build-essential
-wget http://www.python.org/ftp/python/2.6.8/Python-2.6.8.tar.bz2
-tar jxf ./Python-2.6.8.tar.bz2
-cd ./Python-2.6.8
-./configure --prefix=/opt/python2.6.8 && make && make install
-```

+ 0 - 2188
utils/converters/fbx/convert_to_threejs.py

@@ -1,2188 +0,0 @@
-# @author zfedoran / http://github.com/zfedoran
-
-import os
-import sys
-import math
-import operator
-import re
-import json
-import types
-import shutil
-
-# #####################################################
-# Globals
-# #####################################################
-option_triangulate = True
-option_textures = True
-option_copy_textures = True
-option_prefix = True
-option_geometry = False
-option_forced_y_up = False
-option_default_camera = False
-option_default_light = False
-option_pretty_print = False
-
-converter = None
-inputFolder = ""
-outputFolder = ""
-
-# #####################################################
-# Pretty Printing Hacks
-# #####################################################
-
-# Force an array to be printed fully on a single line
-class NoIndent(object):
-    def __init__(self, value, separator = ','):
-        self.separator = separator
-        self.value = value
-    def encode(self):
-        if not self.value:
-            return None
-        return '[ %s ]' % self.separator.join(str(f) for f in self.value)
-
-# Force an array into chunks rather than printing each element on a new line
-class ChunkedIndent(object):
-    def __init__(self, value, chunk_size = 15, force_rounding = False):
-        self.value = value
-        self.size = chunk_size
-        self.force_rounding = force_rounding
-    def encode(self):
-        # Turn the flat array into an array of arrays where each subarray is of
-        # length chunk_size. Then string concat the values in the chunked
-        # arrays, delimited with a ', ' and round the values finally append
-        # '{CHUNK}' so that we can find the strings with regex later
-        if not self.value:
-            return None
-        if self.force_rounding:
-            return ['{CHUNK}%s' % ', '.join(str(round(f, 6)) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
-        else:
-            return ['{CHUNK}%s' % ', '.join(str(f) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
-
-# This custom encoder looks for instances of NoIndent or ChunkedIndent.
-# When it finds
-class CustomEncoder(json.JSONEncoder):
-    def default(self, obj):
-        if isinstance(obj, NoIndent) or isinstance(obj, ChunkedIndent):
-            return obj.encode()
-        else:
-            return json.JSONEncoder.default(self, obj)
-
-def executeRegexHacks(output_string):
-    # turn strings of arrays into arrays (remove the double quotes)
-    output_string = re.sub(':\s*\"(\[.*\])\"', r': \1', output_string)
-    output_string = re.sub('(\n\s*)\"(\[.*\])\"', r'\1\2', output_string)
-    output_string = re.sub('(\n\s*)\"{CHUNK}(.*)\"', r'\1\2', output_string)
-
-    # replace '0metadata' with metadata
-    output_string = re.sub('0metadata', r'metadata', output_string)
-    # replace 'zchildren' with children
-    output_string = re.sub('zchildren', r'children', output_string)
-
-    # add an extra newline after '"children": {'
-    output_string = re.sub('(children.*{\s*\n)', r'\1\n', output_string)
-    # add an extra newline after '},'
-    output_string = re.sub('},\s*\n', r'},\n\n', output_string)
-    # add an extra newline after '\n\s*],'
-    output_string = re.sub('(\n\s*)],\s*\n', r'\1],\n\n', output_string)
-
-    return output_string
-
-# #####################################################
-# Object Serializers
-# #####################################################
-
-# FbxVector2 is not JSON serializable
-def serializeVector2(v, round_vector = False):
-    # JSON does not support NaN or Inf
-    if math.isnan(v[0]) or math.isinf(v[0]):
-        v[0] = 0
-    if math.isnan(v[1]) or math.isinf(v[1]):
-        v[1] = 0
-    if round_vector or option_pretty_print:
-        v = (round(v[0], 5), round(v[1], 5))
-    if option_pretty_print:
-        return NoIndent([v[0], v[1]], ', ')
-    else:
-        return [v[0], v[1]]
-
-# FbxVector3 is not JSON serializable
-def serializeVector3(v, round_vector = False):
-    # JSON does not support NaN or Inf
-    if math.isnan(v[0]) or math.isinf(v[0]):
-        v[0] = 0
-    if math.isnan(v[1]) or math.isinf(v[1]):
-        v[1] = 0
-    if math.isnan(v[2]) or math.isinf(v[2]):
-        v[2] = 0
-    if round_vector or option_pretty_print:
-        v = (round(v[0], 5), round(v[1], 5), round(v[2], 5))
-    if option_pretty_print:
-        return NoIndent([v[0], v[1], v[2]], ', ')
-    else:
-        return [v[0], v[1], v[2]]
-
-# FbxVector4 is not JSON serializable
-def serializeVector4(v, round_vector = False):
-    # JSON does not support NaN or Inf
-    if math.isnan(v[0]) or math.isinf(v[0]):
-        v[0] = 0
-    if math.isnan(v[1]) or math.isinf(v[1]):
-        v[1] = 0
-    if math.isnan(v[2]) or math.isinf(v[2]):
-        v[2] = 0
-    if math.isnan(v[3]) or math.isinf(v[3]):
-        v[3] = 0
-    if round_vector or option_pretty_print:
-        v = (round(v[0], 5), round(v[1], 5), round(v[2], 5), round(v[3], 5))
-    if option_pretty_print:
-        return NoIndent([v[0], v[1], v[2], v[3]], ', ')
-    else:
-        return [v[0], v[1], v[2], v[3]]
-
-# #####################################################
-# Helpers
-# #####################################################
-def getRadians(v):
-    return ((v[0]*math.pi)/180, (v[1]*math.pi)/180, (v[2]*math.pi)/180)
-
-def getHex(c):
-    color = (int(c[0]*255) << 16) + (int(c[1]*255) << 8) + int(c[2]*255)
-    return int(color)
-
-def setBit(value, position, on):
-    if on:
-        mask = 1 << position
-        return (value | mask)
-    else:
-        mask = ~(1 << position)
-        return (value & mask)
-
-def generate_uvs(uv_layers):
-    layers = []
-    for uvs in uv_layers:
-        tmp = []
-        for uv in uvs:
-            tmp.append(uv[0])
-            tmp.append(uv[1])
-        if option_pretty_print:
-            layer = ChunkedIndent(tmp)
-        else:
-            layer = tmp
-        layers.append(layer)
-    return layers
-
-# #####################################################
-# Object Name Helpers
-# #####################################################
-def hasUniqueName(o, class_id):
-    scene = o.GetScene()
-    object_name = o.GetName()
-    object_id = o.GetUniqueID()
-
-    object_count = scene.GetSrcObjectCount(FbxCriteria.ObjectType(class_id))
-
-    for i in range(object_count):
-        other = scene.GetSrcObject(FbxCriteria.ObjectType(class_id), i)
-        other_id = other.GetUniqueID()
-        other_name = other.GetName()
-
-        if other_id == object_id:
-            continue
-        if other_name == object_name:
-            return False
-
-    return True
-
-def getObjectName(o, force_prefix = False):
-    if not o:
-        return ""
-
-    object_name = o.GetName()
-    object_id = o.GetUniqueID()
-
-    if not force_prefix:
-        force_prefix = not hasUniqueName(o, FbxNode.ClassId)
-
-    prefix = ""
-    if option_prefix or force_prefix:
-        prefix = "Object_%s_" % object_id
-
-    return prefix + object_name
-
-def getMaterialName(o, force_prefix = False):
-    object_name = o.GetName()
-    object_id = o.GetUniqueID()
-
-    if not force_prefix:
-        force_prefix = not hasUniqueName(o, FbxSurfaceMaterial.ClassId)
-
-    prefix = ""
-    if option_prefix or force_prefix:
-        prefix = "Material_%s_" % object_id
-
-    return prefix + object_name
-
-def getTextureName(t, force_prefix = False):
-    if type(t) is FbxFileTexture:
-        texture_file = t.GetFileName()
-        texture_id = os.path.splitext(os.path.basename(texture_file))[0]
-    else:
-        texture_id = t.GetName()
-        if texture_id == "_empty_":
-            texture_id = ""
-    prefix = ""
-    if option_prefix or force_prefix:
-        prefix = "Texture_%s_" % t.GetUniqueID()
-        if len(texture_id) == 0:
-            prefix = prefix[0:len(prefix)-1]
-    return prefix + texture_id
-
-def getMtlTextureName(texture_name, texture_id, force_prefix = False):
-    texture_name = os.path.splitext(texture_name)[0]
-    prefix = ""
-    if option_prefix or force_prefix:
-        prefix = "Texture_%s_" % texture_id
-    return prefix + texture_name
-
-def getPrefixedName(o, prefix):
-    return (prefix + '_%s_') % o.GetUniqueID() + o.GetName()
-
-# #####################################################
-# Triangulation
-# #####################################################
-def triangulate_node_hierarchy(node):
-    node_attribute = node.GetNodeAttribute();
-
-    if node_attribute:
-        if node_attribute.GetAttributeType() == FbxNodeAttribute.eMesh or \
-           node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbs or \
-           node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbsSurface or \
-           node_attribute.GetAttributeType() == FbxNodeAttribute.ePatch:
-            converter.Triangulate(node.GetNodeAttribute(), True);
-
-        child_count = node.GetChildCount()
-        for i in range(child_count):
-            triangulate_node_hierarchy(node.GetChild(i))
-
-def triangulate_scene(scene):
-    node = scene.GetRootNode()
-    if node:
-        for i in range(node.GetChildCount()):
-            triangulate_node_hierarchy(node.GetChild(i))
-
-# #####################################################
-# Generate Material Object
-# #####################################################
-def generate_texture_bindings(material_property, material_params):
-    # FBX to Three.js texture types
-    binding_types = {
-        "DiffuseColor": "map",
-        "DiffuseFactor": "diffuseFactor",
-        "EmissiveColor": "emissiveMap",
-        "EmissiveFactor": "emissiveFactor",
-        "AmbientColor": "lightMap", # "ambientMap",
-        "AmbientFactor": "ambientFactor",
-        "SpecularColor": "specularMap",
-        "SpecularFactor": "specularFactor",
-        "ShininessExponent": "shininessExponent",
-        "NormalMap": "normalMap",
-        "Bump": "bumpMap",
-        "TransparentColor": "transparentMap",
-        "TransparencyFactor": "transparentFactor",
-        "ReflectionColor": "reflectionMap",
-        "ReflectionFactor": "reflectionFactor",
-        "DisplacementColor": "displacementMap",
-        "VectorDisplacementColor": "vectorDisplacementMap"
-    }
-
-    if material_property.IsValid():
-        #Here we have to check if it's layeredtextures, or just textures:
-        layered_texture_count = material_property.GetSrcObjectCount(FbxCriteria.ObjectType(FbxLayeredTexture.ClassId))
-        if layered_texture_count > 0:
-            for j in range(layered_texture_count):
-                layered_texture = material_property.GetSrcObject(FbxCriteria.ObjectType(FbxLayeredTexture.ClassId), j)
-                texture_count = layered_texture.GetSrcObjectCount(FbxCriteria.ObjectType(FbxTexture.ClassId))
-                for k in range(texture_count):
-                    texture = layered_texture.GetSrcObject(FbxCriteria.ObjectType(FbxTexture.ClassId),k)
-                    if texture:
-                        texture_id = getTextureName(texture, True)
-                        material_params[binding_types[str(material_property.GetName())]] = texture_id
-        else:
-            # no layered texture simply get on the property
-            texture_count = material_property.GetSrcObjectCount(FbxCriteria.ObjectType(FbxTexture.ClassId))
-            for j in range(texture_count):
-                texture = material_property.GetSrcObject(FbxCriteria.ObjectType(FbxTexture.ClassId),j)
-                if texture:
-                    texture_id = getTextureName(texture, True)
-                    material_params[binding_types[str(material_property.GetName())]] = texture_id
-
-def generate_material_object(material):
-    #Get the implementation to see if it's a hardware shader.
-    implementation = GetImplementation(material, "ImplementationHLSL")
-    implementation_type = "HLSL"
-    if not implementation:
-        implementation = GetImplementation(material, "ImplementationCGFX")
-        implementation_type = "CGFX"
-
-    output = None
-    material_params = None
-    material_type = None
-
-    if implementation:
-        print("Shader materials are not supported")
-
-    elif material.GetClassId().Is(FbxSurfaceLambert.ClassId):
-
-        ambient   = getHex(material.Ambient.Get())
-        diffuse   = getHex(material.Diffuse.Get())
-        emissive  = getHex(material.Emissive.Get())
-        opacity   = 1.0 - material.TransparencyFactor.Get()
-        opacity   = 1.0 if opacity == 0 else opacity
-        transparent = False
-        reflectivity = 1
-
-        material_type = 'MeshBasicMaterial'
-#        material_type = 'MeshLambertMaterial'
-        material_params = {
-
-          'color' : diffuse,
-          'ambient' : ambient,
-          'emissive' : emissive,
-          'reflectivity' : reflectivity,
-          'transparent' : transparent,
-          'opacity' : opacity
-
-        }
-
-    elif material.GetClassId().Is(FbxSurfacePhong.ClassId):
-
-        ambient   = getHex(material.Ambient.Get())
-        diffuse   = getHex(material.Diffuse.Get())
-        emissive  = getHex(material.Emissive.Get())
-        specular  = getHex(material.Specular.Get())
-        opacity   = 1.0 - material.TransparencyFactor.Get()
-        opacity   = 1.0 if opacity == 0 else opacity
-        shininess = material.Shininess.Get()
-        transparent = False
-        reflectivity = 1
-        bumpScale = 1
-
-        material_type = 'MeshPhongMaterial'
-        material_params = {
-
-          'color' : diffuse,
-          'ambient' : ambient,
-          'emissive' : emissive,
-          'specular' : specular,
-          'shininess' : shininess,
-          'bumpScale' : bumpScale,
-          'reflectivity' : reflectivity,
-          'transparent' : transparent,
-          'opacity' : opacity
-
-        }
-
-    else:
-        print ("Unknown type of Material"), getMaterialName(material)
-
-    # default to Lambert Material if the current Material type cannot be handeled
-    if not material_type:
-        ambient   = getHex((0,0,0))
-        diffuse   = getHex((0.5,0.5,0.5))
-        emissive  = getHex((0,0,0))
-        opacity   = 1
-        transparent = False
-        reflectivity = 1
-
-        material_type = 'MeshLambertMaterial'
-        material_params = {
-
-          'color' : diffuse,
-          'ambient' : ambient,
-          'emissive' : emissive,
-          'reflectivity' : reflectivity,
-          'transparent' : transparent,
-          'opacity' : opacity
-
-        }
-
-    if option_textures:
-        texture_count = FbxLayerElement.sTypeTextureCount()
-        for texture_index in range(texture_count):
-            material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
-            generate_texture_bindings(material_property, material_params)
-
-    material_params['wireframe'] = False
-    material_params['wireframeLinewidth'] = 1
-
-    output = {
-      'type' : material_type,
-      'parameters' : material_params
-    }
-
-    return output
-
-def generate_proxy_material_object(node, material_names):
-
-    material_type = 'MultiMaterial'
-    material_params = {
-      'materials' : material_names
-    }
-
-    output = {
-      'type' : material_type,
-      'parameters' : material_params
-    }
-
-    return output
-
-# #####################################################
-# Find Scene Materials
-# #####################################################
-def extract_materials_from_node(node, material_dict):
-    name = node.GetName()
-    mesh = node.GetNodeAttribute()
-
-    node = None
-    if mesh:
-        node = mesh.GetNode()
-        if node:
-            material_count = node.GetMaterialCount()
-
-    material_names = []
-    for l in range(mesh.GetLayerCount()):
-        materials = mesh.GetLayer(l).GetMaterials()
-        if materials:
-            if materials.GetReferenceMode() == FbxLayerElement.eIndex:
-                #Materials are in an undefined external table
-                continue
-            for i in range(material_count):
-                material = node.GetMaterial(i)
-                material_names.append(getMaterialName(material))
-
-    if material_count > 1:
-        proxy_material = generate_proxy_material_object(node, material_names)
-        proxy_name = getMaterialName(node, True)
-        material_dict[proxy_name] = proxy_material
-
-def generate_materials_from_hierarchy(node, material_dict):
-    if node.GetNodeAttribute() == None:
-        pass
-    else:
-        attribute_type = (node.GetNodeAttribute().GetAttributeType())
-        if attribute_type == FbxNodeAttribute.eMesh:
-            extract_materials_from_node(node, material_dict)
-    for i in range(node.GetChildCount()):
-        generate_materials_from_hierarchy(node.GetChild(i), material_dict)
-
-def generate_material_dict(scene):
-    material_dict = {}
-
-    # generate all materials for this scene
-    material_count = scene.GetSrcObjectCount(FbxCriteria.ObjectType(FbxSurfaceMaterial.ClassId))
-    for i in range(material_count):
-        material = scene.GetSrcObject(FbxCriteria.ObjectType(FbxSurfaceMaterial.ClassId), i)
-        material_object = generate_material_object(material)
-        material_name = getMaterialName(material)
-        material_dict[material_name] = material_object
-
-    # generate material porxies
-    # Three.js does not support meshs with multiple materials, however it does
-    # support materials with multiple submaterials
-    node = scene.GetRootNode()
-    if node:
-        for i in range(node.GetChildCount()):
-            generate_materials_from_hierarchy(node.GetChild(i), material_dict)
-
-    return material_dict
-
-# #####################################################
-# Generate Texture Object
-# #####################################################
-def generate_texture_object(texture):
-
-    #TODO: extract more texture properties
-    wrap_u = texture.GetWrapModeU()
-    wrap_v = texture.GetWrapModeV()
-    offset = texture.GetUVTranslation()
-
-    if type(texture) is FbxFileTexture:
-        url = texture.GetFileName()
-    else:
-        url = getTextureName( texture )
-
-    #url = replace_inFolder2OutFolder( url )
-    #print( url )
-
-    index = url.rfind( '/' )
-    if index == -1:
-        index = url.rfind( '\\' )
-    filename = url[ index+1 : len(url) ]
-
-    output = {
-
-      'url': filename,
-      'fullpath': url,
-      'repeat': serializeVector2( (1,1) ),
-      'offset': serializeVector2( texture.GetUVTranslation() ),
-      'magFilter': 'LinearFilter',
-      'minFilter': 'LinearMipMapLinearFilter',
-      'anisotropy': True
-
-    }
-
-    return output
-
-# #####################################################
-# Replace Texture input path to output
-# #####################################################
-def replace_inFolder2OutFolder(url):
-    folderIndex =  url.find(inputFolder)
-
-    if  folderIndex != -1:
-        url = url[ folderIndex+len(inputFolder): ]
-        url = outputFolder + url
-
-    return url
-
-# #####################################################
-# Replace Texture output path to input
-# #####################################################
-def replace_OutFolder2inFolder(url):
-    folderIndex =  url.find(outputFolder)
-
-    if  folderIndex != -1:
-        url = url[ folderIndex+len(outputFolder): ]
-        url = inputFolder + url
-
-    return url
-
-# #####################################################
-# Find Scene Textures
-# #####################################################
-def extract_material_textures(material_property, texture_dict):
-    if material_property.IsValid():
-        #Here we have to check if it's layeredtextures, or just textures:
-        layered_texture_count = material_property.GetSrcObjectCount(FbxCriteria.ObjectType(FbxLayeredTexture.ClassId))
-        if layered_texture_count > 0:
-            for j in range(layered_texture_count):
-                layered_texture = material_property.GetSrcObject(FbxCriteria.ObjectType(FbxLayeredTexture.ClassId), j)
-                texture_count = layered_texture.GetSrcObjectCount(FbxCriteria.ObjectType(FbxTexture.ClassId))
-                for k in range(texture_count):
-                    texture = layered_texture.GetSrcObject(FbxCriteria.ObjectType(FbxTexture.ClassId),k)
-                    if texture:
-                        texture_object = generate_texture_object(texture)
-                        texture_name = getTextureName( texture, True )
-                        texture_dict[texture_name] = texture_object
-        else:
-            # no layered texture simply get on the property
-            texture_count = material_property.GetSrcObjectCount(FbxCriteria.ObjectType(FbxTexture.ClassId))
-            for j in range(texture_count):
-                texture = material_property.GetSrcObject(FbxCriteria.ObjectType(FbxTexture.ClassId),j)
-                if texture:
-                    texture_object = generate_texture_object(texture)
-                    texture_name = getTextureName( texture, True )
-                    texture_dict[texture_name] = texture_object
-
-def extract_textures_from_node(node, texture_dict):
-    name = node.GetName()
-    mesh = node.GetNodeAttribute()
-
-    #for all materials attached to this mesh
-    material_count = mesh.GetNode().GetSrcObjectCount(FbxCriteria.ObjectType(FbxSurfaceMaterial.ClassId))
-    for material_index in range(material_count):
-        material = mesh.GetNode().GetSrcObject(FbxCriteria.ObjectType(FbxSurfaceMaterial.ClassId), material_index)
-
-        #go through all the possible textures types
-        if material:
-            texture_count = FbxLayerElement.sTypeTextureCount()
-            for texture_index in range(texture_count):
-                material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
-                extract_material_textures(material_property, texture_dict)
-
-def generate_textures_from_hierarchy(node, texture_dict):
-    if node.GetNodeAttribute() == None:
-        pass
-    else:
-        attribute_type = (node.GetNodeAttribute().GetAttributeType())
-        if attribute_type == FbxNodeAttribute.eMesh:
-            extract_textures_from_node(node, texture_dict)
-    for i in range(node.GetChildCount()):
-        generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
-
-def generate_texture_dict(scene):
-    if not option_textures:
-        return {}
-
-    texture_dict = {}
-    node = scene.GetRootNode()
-    if node:
-        for i in range(node.GetChildCount()):
-            generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
-    return texture_dict
-
-# #####################################################
-# Extract Fbx SDK Mesh Data
-# #####################################################
-def extract_fbx_vertex_positions(mesh):
-    control_points_count = mesh.GetControlPointsCount()
-    control_points = mesh.GetControlPoints()
-
-    positions = []
-    for i in range(control_points_count):
-        tmp = control_points[i]
-        tmp = [tmp[0], tmp[1], tmp[2]]
-        positions.append(tmp)
-
-    node = mesh.GetNode()
-    if node:
-        t = node.GeometricTranslation.Get()
-        t = FbxVector4(t[0], t[1], t[2], 1)
-        r = node.GeometricRotation.Get()
-        r = FbxVector4(r[0], r[1], r[2], 1)
-        s = node.GeometricScaling.Get()
-        s = FbxVector4(s[0], s[1], s[2], 1)
-
-        hasGeometricTransform = False
-        if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
-           r[0] != 0 or r[1] != 0 or r[2] != 0 or \
-           s[0] != 1 or s[1] != 1 or s[2] != 1:
-            hasGeometricTransform = True
-
-        if hasGeometricTransform:
-            geo_transform = FbxMatrix(t,r,s)
-        else:
-            geo_transform = FbxMatrix()
-
-        transform = None
-
-        if option_geometry:
-            # FbxMeshes are local to their node, we need the vertices in global space
-            # when scene nodes are not exported
-            transform = node.EvaluateGlobalTransform()
-            transform = FbxMatrix(transform) * geo_transform
-
-        elif hasGeometricTransform:
-            transform = geo_transform
-
-        if transform:
-            for i in range(len(positions)):
-                v = positions[i]
-                position = FbxVector4(v[0], v[1], v[2])
-                position = transform.MultNormalize(position)
-                positions[i] = [position[0], position[1], position[2]]
-
-    return positions
-
-def extract_fbx_vertex_normals(mesh):
-#   eNone             The mapping is undetermined.
-#   eByControlPoint   There will be one mapping coordinate for each surface control point/vertex.
-#   eByPolygonVertex  There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
-#   eByPolygon        There can be only one mapping coordinate for the whole polygon.
-#   eByEdge           There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
-#   eAllSame          There can be only one mapping coordinate for the whole surface.
-
-    layered_normal_indices = []
-    layered_normal_values = []
-
-    poly_count = mesh.GetPolygonCount()
-    control_points = mesh.GetControlPoints()
-
-    for l in range(mesh.GetLayerCount()):
-        mesh_normals = mesh.GetLayer(l).GetNormals()
-        if not mesh_normals:
-            continue
-
-        normals_array = mesh_normals.GetDirectArray()
-        normals_count = normals_array.GetCount()
-
-        if normals_count == 0:
-            continue
-
-        normal_indices = []
-        normal_values = []
-
-        # values
-        for i in range(normals_count):
-            normal = normals_array.GetAt(i)
-            normal = [normal[0], normal[1], normal[2]]
-            normal_values.append(normal)
-
-        node = mesh.GetNode()
-        if node:
-            t = node.GeometricTranslation.Get()
-            t = FbxVector4(t[0], t[1], t[2], 1)
-            r = node.GeometricRotation.Get()
-            r = FbxVector4(r[0], r[1], r[2], 1)
-            s = node.GeometricScaling.Get()
-            s = FbxVector4(s[0], s[1], s[2], 1)
-
-            hasGeometricTransform = False
-            if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
-               r[0] != 0 or r[1] != 0 or r[2] != 0 or \
-               s[0] != 1 or s[1] != 1 or s[2] != 1:
-                hasGeometricTransform = True
-
-            if hasGeometricTransform:
-                geo_transform = FbxMatrix(t,r,s)
-            else:
-                geo_transform = FbxMatrix()
-
-            transform = None
-
-            if option_geometry:
-                # FbxMeshes are local to their node, we need the vertices in global space
-                # when scene nodes are not exported
-                transform = node.EvaluateGlobalTransform()
-                transform = FbxMatrix(transform) * geo_transform
-
-            elif hasGeometricTransform:
-                transform = geo_transform
-
-            if transform:
-                t = FbxVector4(0,0,0,1)
-                transform.SetRow(3, t)
-
-                for i in range(len(normal_values)):
-                    n = normal_values[i]
-                    normal = FbxVector4(n[0], n[1], n[2])
-                    normal = transform.MultNormalize(normal)
-                    normal.Normalize()
-                    normal = [normal[0], normal[1], normal[2]]
-                    normal_values[i] = normal
-
-        # indices
-        vertexId = 0
-        for p in range(poly_count):
-            poly_size = mesh.GetPolygonSize(p)
-            poly_normals = []
-
-            for v in range(poly_size):
-                control_point_index = mesh.GetPolygonVertex(p, v)
-
-                # mapping mode is by control points. The mesh should be smooth and soft.
-                # we can get normals by retrieving each control point
-                if mesh_normals.GetMappingMode() == FbxLayerElement.eByControlPoint:
-
-                    # reference mode is direct, the normal index is same as vertex index.
-                    # get normals by the index of control vertex
-                    if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
-                        poly_normals.append(control_point_index)
-
-                    elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
-                        index = mesh_normals.GetIndexArray().GetAt(control_point_index)
-                        poly_normals.append(index)
-
-                # mapping mode is by polygon-vertex.
-                # we can get normals by retrieving polygon-vertex.
-                elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
-
-                    if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
-                        poly_normals.append(vertexId)
-
-                    elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
-                        index = mesh_normals.GetIndexArray().GetAt(vertexId)
-                        poly_normals.append(index)
-
-                elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygon or \
-                     mesh_normals.GetMappingMode() ==  FbxLayerElement.eAllSame or \
-                     mesh_normals.GetMappingMode() ==  FbxLayerElement.eNone:
-                    print("unsupported normal mapping mode for polygon vertex")
-
-                vertexId += 1
-            normal_indices.append(poly_normals)
-
-        layered_normal_values.append(normal_values)
-        layered_normal_indices.append(normal_indices)
-
-    normal_values = []
-    normal_indices = []
-
-    # Three.js only supports one layer of normals
-    if len(layered_normal_values) > 0:
-        normal_values = layered_normal_values[0]
-        normal_indices = layered_normal_indices[0]
-
-    return normal_values, normal_indices
-
-def extract_fbx_vertex_colors(mesh):
-#   eNone             The mapping is undetermined.
-#   eByControlPoint   There will be one mapping coordinate for each surface control point/vertex.
-#   eByPolygonVertex  There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
-#   eByPolygon        There can be only one mapping coordinate for the whole polygon.
-#   eByEdge           There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
-#   eAllSame          There can be only one mapping coordinate for the whole surface.
-
-    layered_color_indices = []
-    layered_color_values = []
-
-    poly_count = mesh.GetPolygonCount()
-    control_points = mesh.GetControlPoints()
-
-    for l in range(mesh.GetLayerCount()):
-        mesh_colors = mesh.GetLayer(l).GetVertexColors()
-        if not mesh_colors:
-            continue
-
-        colors_array = mesh_colors.GetDirectArray()
-        colors_count = colors_array.GetCount()
-
-        if colors_count == 0:
-            continue
-
-        color_indices = []
-        color_values = []
-
-        # values
-        for i in range(colors_count):
-            color = colors_array.GetAt(i)
-            color = [color.mRed, color.mGreen, color.mBlue, color.mAlpha]
-            color_values.append(color)
-
-        # indices
-        vertexId = 0
-        for p in range(poly_count):
-            poly_size = mesh.GetPolygonSize(p)
-            poly_colors = []
-
-            for v in range(poly_size):
-                control_point_index = mesh.GetPolygonVertex(p, v)
-
-                if mesh_colors.GetMappingMode() == FbxLayerElement.eByControlPoint:
-                    if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
-                        poly_colors.append(control_point_index)
-                    elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
-                        index = mesh_colors.GetIndexArray().GetAt(control_point_index)
-                        poly_colors.append(index)
-                elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
-                    if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
-                        poly_colors.append(vertexId)
-                    elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
-                        index = mesh_colors.GetIndexArray().GetAt(vertexId)
-                        poly_colors.append(index)
-                elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygon or \
-                     mesh_colors.GetMappingMode() ==  FbxLayerElement.eAllSame or \
-                     mesh_colors.GetMappingMode() ==  FbxLayerElement.eNone:
-                    print("unsupported color mapping mode for polygon vertex")
-
-                vertexId += 1
-            color_indices.append(poly_colors)
-
-        layered_color_indices.append( color_indices )
-        layered_color_values.append( color_values )
-
-    color_values = []
-    color_indices = []
-
-    # Three.js only supports one layer of colors
-    if len(layered_color_values) > 0:
-        color_values = layered_color_values[0]
-        color_indices = layered_color_indices[0]
-
-    '''
-    # The Fbx SDK defaults mesh.Color to (0.8, 0.8, 0.8)
-    # This causes most models to receive incorrect vertex colors
-    if len(color_values) == 0:
-        color = mesh.Color.Get()
-        color_values = [[color[0], color[1], color[2]]]
-        color_indices = []
-        for p in range(poly_count):
-            poly_size = mesh.GetPolygonSize(p)
-            color_indices.append([0] * poly_size)
-    '''
-
-    return color_values, color_indices
-
-def extract_fbx_vertex_uvs(mesh):
-#   eNone             The mapping is undetermined.
-#   eByControlPoint   There will be one mapping coordinate for each surface control point/vertex.
-#   eByPolygonVertex  There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
-#   eByPolygon        There can be only one mapping coordinate for the whole polygon.
-#   eByEdge           There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
-#   eAllSame          There can be only one mapping coordinate for the whole surface.
-
-    layered_uv_indices = []
-    layered_uv_values = []
-
-    poly_count = mesh.GetPolygonCount()
-    control_points = mesh.GetControlPoints()
-
-    for l in range(mesh.GetLayerCount()):
-        mesh_uvs = mesh.GetLayer(l).GetUVs()
-        if not mesh_uvs:
-            continue
-
-        uvs_array = mesh_uvs.GetDirectArray()
-        uvs_count = uvs_array.GetCount()
-
-        if uvs_count == 0:
-            continue
-
-        uv_indices = []
-        uv_values = []
-
-        # values
-        for i in range(uvs_count):
-            uv = uvs_array.GetAt(i)
-            uv = [uv[0], uv[1]]
-            uv_values.append(uv)
-
-        # indices
-        vertexId = 0
-        for p in range(poly_count):
-            poly_size = mesh.GetPolygonSize(p)
-            poly_uvs = []
-
-            for v in range(poly_size):
-                control_point_index = mesh.GetPolygonVertex(p, v)
-
-                if mesh_uvs.GetMappingMode() == FbxLayerElement.eByControlPoint:
-                    if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect:
-                        poly_uvs.append(control_point_index)
-                    elif mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
-                        index = mesh_uvs.GetIndexArray().GetAt(control_point_index)
-                        poly_uvs.append(index)
-                elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
-                    uv_texture_index = mesh_uvs.GetIndexArray().GetAt(vertexId)
-
-                    if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect or \
-                       mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
-                        poly_uvs.append(uv_texture_index)
-                elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygon or \
-                     mesh_uvs.GetMappingMode() ==  FbxLayerElement.eAllSame or \
-                     mesh_uvs.GetMappingMode() ==  FbxLayerElement.eNone:
-                    print("unsupported uv mapping mode for polygon vertex")
-
-                vertexId += 1
-            uv_indices.append(poly_uvs)
-
-        layered_uv_values.append(uv_values)
-        layered_uv_indices.append(uv_indices)
-
-    return layered_uv_values, layered_uv_indices
-
-# #####################################################
-# Process Mesh Geometry
-# #####################################################
-def generate_normal_key(normal):
-    return (round(normal[0], 6), round(normal[1], 6), round(normal[2], 6))
-
-def generate_color_key(color):
-    return getHex(color)
-
-def generate_uv_key(uv):
-    return (round(uv[0], 6), round(uv[1], 6))
-
-def append_non_duplicate_uvs(source_uvs, dest_uvs, counts):
-    source_layer_count = len(source_uvs)
-    for layer_index in range(source_layer_count):
-
-        dest_layer_count = len(dest_uvs)
-
-        if dest_layer_count <= layer_index:
-            dest_uv_layer = {}
-            count = 0
-            dest_uvs.append(dest_uv_layer)
-            counts.append(count)
-        else:
-            dest_uv_layer = dest_uvs[layer_index]
-            count = counts[layer_index]
-
-        source_uv_layer = source_uvs[layer_index]
-
-        for uv in source_uv_layer:
-            key = generate_uv_key(uv)
-            if key not in dest_uv_layer:
-                dest_uv_layer[key] = count
-                count += 1
-
-        counts[layer_index] = count
-
-    return counts
-
-def generate_unique_normals_dictionary(mesh_list):
-    normals_dictionary = {}
-    nnormals = 0
-
-    # Merge meshes, remove duplicate data
-    for mesh in mesh_list:
-        node = mesh.GetNode()
-        normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
-
-        if len(normal_values) > 0:
-            for normal in normal_values:
-                key = generate_normal_key(normal)
-                if key not in normals_dictionary:
-                    normals_dictionary[key] = nnormals
-                    nnormals += 1
-
-    return normals_dictionary
-
-def generate_unique_colors_dictionary(mesh_list):
-    colors_dictionary = {}
-    ncolors = 0
-
-    # Merge meshes, remove duplicate data
-    for mesh in mesh_list:
-        color_values, color_indices = extract_fbx_vertex_colors(mesh)
-
-        if len(color_values) > 0:
-            for color in color_values:
-                key = generate_color_key(color)
-                if key not in colors_dictionary:
-                    colors_dictionary[key] = ncolors
-                    ncolors += 1
-
-    return colors_dictionary
-
-def generate_unique_uvs_dictionary_layers(mesh_list):
-    uvs_dictionary_layers = []
-    nuvs_list = []
-
-    # Merge meshes, remove duplicate data
-    for mesh in mesh_list:
-        uv_values, uv_indices = extract_fbx_vertex_uvs(mesh)
-
-        if len(uv_values) > 0:
-            nuvs_list = append_non_duplicate_uvs(uv_values, uvs_dictionary_layers, nuvs_list)
-
-    return uvs_dictionary_layers
-
-def generate_normals_from_dictionary(normals_dictionary):
-    normal_values = []
-    for key, index in sorted(normals_dictionary.items(), key = operator.itemgetter(1)):
-        normal_values.append(key)
-
-    return normal_values
-
-def generate_colors_from_dictionary(colors_dictionary):
-    color_values = []
-    for key, index in sorted(colors_dictionary.items(), key = operator.itemgetter(1)):
-        color_values.append(key)
-
-    return color_values
-
-def generate_uvs_from_dictionary_layers(uvs_dictionary_layers):
-    uv_values = []
-    for uvs_dictionary in uvs_dictionary_layers:
-        uv_values_layer = []
-        for key, index in sorted(uvs_dictionary.items(), key = operator.itemgetter(1)):
-            uv_values_layer.append(key)
-        uv_values.append(uv_values_layer)
-
-    return uv_values
-
-def generate_normal_indices_for_poly(poly_index, mesh_normal_values, mesh_normal_indices, normals_to_indices):
-    if len(mesh_normal_indices) <= 0:
-        return []
-
-    poly_normal_indices = mesh_normal_indices[poly_index]
-    poly_size = len(poly_normal_indices)
-
-    output_poly_normal_indices = []
-    for v in range(poly_size):
-        normal_index = poly_normal_indices[v]
-        normal_value = mesh_normal_values[normal_index]
-
-        key = generate_normal_key(normal_value)
-
-        output_index = normals_to_indices[key]
-        output_poly_normal_indices.append(output_index)
-
-    return output_poly_normal_indices
-
-def generate_color_indices_for_poly(poly_index, mesh_color_values, mesh_color_indices, colors_to_indices):
-    if len(mesh_color_indices) <= 0:
-        return []
-
-    poly_color_indices = mesh_color_indices[poly_index]
-    poly_size = len(poly_color_indices)
-
-    output_poly_color_indices = []
-    for v in range(poly_size):
-        color_index = poly_color_indices[v]
-        color_value = mesh_color_values[color_index]
-
-        key = generate_color_key(color_value)
-
-        output_index = colors_to_indices[key]
-        output_poly_color_indices.append(output_index)
-
-    return output_poly_color_indices
-
-def generate_uv_indices_for_poly(poly_index, mesh_uv_values, mesh_uv_indices, uvs_to_indices):
-    if len(mesh_uv_indices) <= 0:
-        return []
-
-    poly_uv_indices = mesh_uv_indices[poly_index]
-    poly_size = len(poly_uv_indices)
-
-    output_poly_uv_indices = []
-    for v in range(poly_size):
-        uv_index = poly_uv_indices[v]
-        uv_value = mesh_uv_values[uv_index]
-
-        key = generate_uv_key(uv_value)
-
-        output_index = uvs_to_indices[key]
-        output_poly_uv_indices.append(output_index)
-
-    return output_poly_uv_indices
-
-def process_mesh_vertices(mesh_list):
-    vertex_offset = 0
-    vertex_offset_list = [0]
-    vertices = []
-    for mesh in mesh_list:
-        node = mesh.GetNode()
-        mesh_vertices = extract_fbx_vertex_positions(mesh)
-
-        vertices.extend(mesh_vertices[:])
-        vertex_offset += len(mesh_vertices)
-        vertex_offset_list.append(vertex_offset)
-
-    return vertices, vertex_offset_list
-
-
-def process_mesh_materials(mesh_list):
-    material_offset = 0
-    material_offset_list = [0]
-    materials_list = []
-
-    #TODO: remove duplicate mesh references
-    for mesh in mesh_list:
-        node = mesh.GetNode()
-
-        material_count = node.GetMaterialCount()
-        if material_count > 0:
-            for l in range(mesh.GetLayerCount()):
-                materials = mesh.GetLayer(l).GetMaterials()
-                if materials:
-                    if materials.GetReferenceMode() == FbxLayerElement.eIndex:
-                        #Materials are in an undefined external table
-                        continue
-
-                    for i in range(material_count):
-                        material = node.GetMaterial(i)
-                        materials_list.append( material )
-
-                    material_offset += material_count
-                    material_offset_list.append(material_offset)
-
-    return materials_list, material_offset_list
-
-def process_mesh_polygons(mesh_list, normals_to_indices, colors_to_indices, uvs_to_indices_list, vertex_offset_list, material_offset_list):
-    faces = []
-    for mesh_index in range(len(mesh_list)):
-        mesh = mesh_list[mesh_index]
-
-        flipWindingOrder = False
-        node = mesh.GetNode()
-        if node:
-            local_scale = node.EvaluateLocalScaling()
-            if local_scale[0] < 0 or local_scale[1] < 0 or local_scale[2] < 0:
-                flipWindingOrder = True
-
-        poly_count = mesh.GetPolygonCount()
-        control_points = mesh.GetControlPoints()
-
-        normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
-        color_values, color_indices = extract_fbx_vertex_colors(mesh)
-        uv_values_layers, uv_indices_layers = extract_fbx_vertex_uvs(mesh)
-
-        for poly_index in range(poly_count):
-            poly_size = mesh.GetPolygonSize(poly_index)
-
-            face_normals = generate_normal_indices_for_poly(poly_index, normal_values, normal_indices, normals_to_indices)
-            face_colors = generate_color_indices_for_poly(poly_index, color_values, color_indices, colors_to_indices)
-
-            face_uv_layers = []
-            for l in range(len(uv_indices_layers)):
-                uv_values = uv_values_layers[l]
-                uv_indices = uv_indices_layers[l]
-                face_uv_indices = generate_uv_indices_for_poly(poly_index, uv_values, uv_indices, uvs_to_indices_list[l])
-                face_uv_layers.append(face_uv_indices)
-
-            face_vertices = []
-            for vertex_index in range(poly_size):
-                control_point_index = mesh.GetPolygonVertex(poly_index, vertex_index)
-                face_vertices.append(control_point_index)
-
-            #TODO: assign a default material to any mesh without one
-            if len(material_offset_list) <= mesh_index:
-                material_offset = 0
-            else:
-                material_offset = material_offset_list[mesh_index]
-
-            vertex_offset = vertex_offset_list[mesh_index]
-
-            if poly_size > 4:
-                new_face_normals = []
-                new_face_colors = []
-                new_face_uv_layers = []
-
-                for i in range(poly_size - 2):
-                    new_face_vertices = [face_vertices[0], face_vertices[i+1], face_vertices[i+2]]
-
-                    if len(face_normals):
-                        new_face_normals = [face_normals[0], face_normals[i+1], face_normals[i+2]]
-                    if len(face_colors):
-                        new_face_colors = [face_colors[0], face_colors[i+1], face_colors[i+2]]
-                    if len(face_uv_layers):
-                        new_face_uv_layers = []
-                        for layer in face_uv_layers:
-                            new_face_uv_layers.append([layer[0], layer[i+1], layer[i+2]])
-
-                    face = generate_mesh_face(mesh,
-                        poly_index,
-                        new_face_vertices,
-                        new_face_normals,
-                        new_face_colors,
-                        new_face_uv_layers,
-                        vertex_offset,
-                        material_offset,
-                        flipWindingOrder)
-                    faces.append(face)
-            else:
-                face = generate_mesh_face(mesh,
-                          poly_index,
-                          face_vertices,
-                          face_normals,
-                          face_colors,
-                          face_uv_layers,
-                          vertex_offset,
-                          material_offset,
-                          flipWindingOrder)
-                faces.append(face)
-
-    return faces
-
-def generate_mesh_face(mesh, polygon_index, vertex_indices, normals, colors, uv_layers, vertex_offset, material_offset, flipOrder):
-    isTriangle = ( len(vertex_indices) == 3 )
-    nVertices = 3 if isTriangle else 4
-
-    hasMaterial = False
-    for l in range(mesh.GetLayerCount()):
-        materials = mesh.GetLayer(l).GetMaterials()
-        if materials:
-            hasMaterial = True
-            break
-
-    hasFaceUvs = False
-    hasFaceVertexUvs = len(uv_layers) > 0
-    hasFaceNormals = False
-    hasFaceVertexNormals = len(normals) > 0
-    hasFaceColors = False
-    hasFaceVertexColors = len(colors) > 0
-
-    faceType = 0
-    faceType = setBit(faceType, 0, not isTriangle)
-    faceType = setBit(faceType, 1, hasMaterial)
-    faceType = setBit(faceType, 2, hasFaceUvs)
-    faceType = setBit(faceType, 3, hasFaceVertexUvs)
-    faceType = setBit(faceType, 4, hasFaceNormals)
-    faceType = setBit(faceType, 5, hasFaceVertexNormals)
-    faceType = setBit(faceType, 6, hasFaceColors)
-    faceType = setBit(faceType, 7, hasFaceVertexColors)
-
-    faceData = []
-
-    # order is important, must match order in JSONLoader
-
-    # face type
-    # vertex indices
-    # material index
-    # face uvs index
-    # face vertex uvs indices
-    # face color index
-    # face vertex colors indices
-
-    faceData.append(faceType)
-
-    if flipOrder:
-        if nVertices == 3:
-            vertex_indices = [vertex_indices[0], vertex_indices[2], vertex_indices[1]]
-            if hasFaceVertexNormals:
-                normals = [normals[0], normals[2], normals[1]]
-            if hasFaceVertexColors:
-                colors = [colors[0], colors[2], colors[1]]
-            if hasFaceVertexUvs:
-                tmp = []
-                for polygon_uvs in uv_layers:
-                    tmp.append([polygon_uvs[0], polygon_uvs[2], polygon_uvs[1]])
-                uv_layers = tmp
-        else:
-            vertex_indices = [vertex_indices[0], vertex_indices[3], vertex_indices[2], vertex_indices[1]]
-            if hasFaceVertexNormals:
-                normals = [normals[0], normals[3], normals[2], normals[1]]
-            if hasFaceVertexColors:
-                colors = [colors[0], colors[3], colors[2], colors[1]]
-            if hasFaceVertexUvs:
-                tmp = []
-                for polygon_uvs in uv_layers:
-                    tmp.append([polygon_uvs[0], polygon_uvs[3], polygon_uvs[2], polygon_uvs[3]])
-                uv_layers = tmp
-
-    for i in range(nVertices):
-        index = vertex_indices[i] + vertex_offset
-        faceData.append(index)
-
-    if hasMaterial:
-        material_id = 0
-        for l in range(mesh.GetLayerCount()):
-            materials = mesh.GetLayer(l).GetMaterials()
-            if materials:
-                material_id = materials.GetIndexArray().GetAt(polygon_index)
-                break
-        material_id += material_offset
-        faceData.append( material_id )
-
-    if hasFaceVertexUvs:
-        for polygon_uvs in uv_layers:
-            for i in range(nVertices):
-                index = polygon_uvs[i]
-                faceData.append(index)
-
-    if hasFaceVertexNormals:
-        for i in range(nVertices):
-            index = normals[i]
-            faceData.append(index)
-
-    if hasFaceVertexColors:
-        for i in range(nVertices):
-            index = colors[i]
-            faceData.append(index)
-
-    return faceData
-
-# #####################################################
-# Generate Mesh Object (for scene output format)
-# #####################################################
-def generate_scene_output(node):
-    mesh = node.GetNodeAttribute()
-
-    # This is done in order to keep the scene output and non-scene output code DRY
-    mesh_list = [ mesh ]
-
-    # Extract the mesh data into arrays
-    vertices, vertex_offsets = process_mesh_vertices(mesh_list)
-    materials, material_offsets = process_mesh_materials(mesh_list)
-
-    normals_to_indices = generate_unique_normals_dictionary(mesh_list)
-    colors_to_indices = generate_unique_colors_dictionary(mesh_list)
-    uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
-
-    normal_values = generate_normals_from_dictionary(normals_to_indices)
-    color_values = generate_colors_from_dictionary(colors_to_indices)
-    uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
-
-    # Generate mesh faces for the Three.js file format
-    faces = process_mesh_polygons(mesh_list,
-                normals_to_indices,
-                colors_to_indices,
-                uvs_to_indices_list,
-                vertex_offsets,
-                material_offsets)
-
-    # Generate counts for uvs, vertices, normals, colors, and faces
-    nuvs = []
-    for layer_index, uvs in enumerate(uv_values):
-        nuvs.append(str(len(uvs)))
-
-    nvertices = len(vertices)
-    nnormals = len(normal_values)
-    ncolors = len(color_values)
-    nfaces = len(faces)
-
-    # Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
-    vertices = [val for v in vertices for val in v]
-    normal_values = [val for n in normal_values for val in n]
-    color_values = [c for c in color_values]
-    faces = [val for f in faces for val in f]
-    uv_values = generate_uvs(uv_values)
-
-    # Disable automatic json indenting when pretty printing for the arrays
-    if option_pretty_print:
-        nuvs = NoIndent(nuvs)
-        vertices = ChunkedIndent(vertices, 15, True)
-        normal_values = ChunkedIndent(normal_values, 15, True)
-        color_values = ChunkedIndent(color_values, 15)
-        faces = ChunkedIndent(faces, 30)
-
-    metadata = {
-      'vertices' : nvertices,
-      'normals' : nnormals,
-      'colors' : ncolors,
-      'faces' : nfaces,
-      'uvs' : nuvs
-    }
-
-    output = {
-      'scale' : 1,
-      'materials' : [],
-      'vertices' : vertices,
-      'normals' : [] if nnormals <= 0 else normal_values,
-      'colors' : [] if ncolors <= 0 else color_values,
-      'uvs' : uv_values,
-      'faces' : faces
-    }
-
-    if option_pretty_print:
-        output['0metadata'] = metadata
-    else:
-        output['metadata'] = metadata
-
-    return output
-
-# #####################################################
-# Generate Mesh Object (for non-scene output)
-# #####################################################
-def generate_non_scene_output(scene):
-    mesh_list = generate_mesh_list(scene)
-
-    # Extract the mesh data into arrays
-    vertices, vertex_offsets = process_mesh_vertices(mesh_list)
-    materials, material_offsets = process_mesh_materials(mesh_list)
-
-    normals_to_indices = generate_unique_normals_dictionary(mesh_list)
-    colors_to_indices = generate_unique_colors_dictionary(mesh_list)
-    uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
-
-    normal_values = generate_normals_from_dictionary(normals_to_indices)
-    color_values = generate_colors_from_dictionary(colors_to_indices)
-    uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
-
-    # Generate mesh faces for the Three.js file format
-    faces = process_mesh_polygons(mesh_list,
-                normals_to_indices,
-                colors_to_indices,
-                uvs_to_indices_list,
-                vertex_offsets,
-                material_offsets)
-
-    # Generate counts for uvs, vertices, normals, colors, and faces
-    nuvs = []
-    for layer_index, uvs in enumerate(uv_values):
-        nuvs.append(str(len(uvs)))
-
-    nvertices = len(vertices)
-    nnormals = len(normal_values)
-    ncolors = len(color_values)
-    nfaces = len(faces)
-
-    # Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
-    vertices = [val for v in vertices for val in v]
-    normal_values = [val for n in normal_values for val in n]
-    color_values = [c for c in color_values]
-    faces = [val for f in faces for val in f]
-    uv_values = generate_uvs(uv_values)
-
-    # Disable json indenting when pretty printing for the arrays
-    if option_pretty_print:
-        nuvs = NoIndent(nuvs)
-        vertices = NoIndent(vertices)
-        normal_values = NoIndent(normal_values)
-        color_values = NoIndent(color_values)
-        faces = NoIndent(faces)
-
-    metadata = {
-      'formatVersion' : 3,
-      'type' : 'geometry',
-      'generatedBy' : 'convert-to-threejs.py',
-      'vertices' : nvertices,
-      'normals' : nnormals,
-      'colors' : ncolors,
-      'faces' : nfaces,
-      'uvs' : nuvs
-    }
-
-    output = {
-      'scale' : 1,
-      'materials' : [],
-      'vertices' : vertices,
-      'normals' : [] if nnormals <= 0 else normal_values,
-      'colors' : [] if ncolors <= 0 else color_values,
-      'uvs' : uv_values,
-      'faces' : faces,
-      'textures': {}
-    }
-
-    if option_pretty_print:
-        output['0metadata'] = metadata
-    else:
-        output['metadata'] = metadata
-
-    return output
-
-def generate_mesh_list_from_hierarchy(node, mesh_list):
-    if node.GetNodeAttribute() == None:
-        pass
-    else:
-        attribute_type = (node.GetNodeAttribute().GetAttributeType())
-        if attribute_type == FbxNodeAttribute.eMesh or \
-           attribute_type == FbxNodeAttribute.eNurbs or \
-           attribute_type == FbxNodeAttribute.eNurbsSurface or \
-           attribute_type == FbxNodeAttribute.ePatch:
-
-            if attribute_type != FbxNodeAttribute.eMesh:
-                converter.Triangulate(node.GetNodeAttribute(), True);
-
-            mesh_list.append(node.GetNodeAttribute())
-
-    for i in range(node.GetChildCount()):
-        generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
-
-def generate_mesh_list(scene):
-    mesh_list = []
-    node = scene.GetRootNode()
-    if node:
-        for i in range(node.GetChildCount()):
-            generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
-    return mesh_list
-
-# #####################################################
-# Generate Embed Objects
-# #####################################################
-def generate_embed_dict_from_hierarchy(node, embed_dict):
-    if node.GetNodeAttribute() == None:
-        pass
-    else:
-        attribute_type = (node.GetNodeAttribute().GetAttributeType())
-        if attribute_type == FbxNodeAttribute.eMesh or \
-           attribute_type == FbxNodeAttribute.eNurbs or \
-           attribute_type == FbxNodeAttribute.eNurbsSurface or \
-           attribute_type == FbxNodeAttribute.ePatch:
-
-            if attribute_type != FbxNodeAttribute.eMesh:
-                converter.Triangulate(node.GetNodeAttribute(), True);
-
-            embed_object = generate_scene_output(node)
-            embed_name = getPrefixedName(node, 'Embed')
-            embed_dict[embed_name] = embed_object
-
-    for i in range(node.GetChildCount()):
-        generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
-
-def generate_embed_dict(scene):
-    embed_dict = {}
-    node = scene.GetRootNode()
-    if node:
-        for i in range(node.GetChildCount()):
-            generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
-    return embed_dict
-
-# #####################################################
-# Generate Geometry Objects
-# #####################################################
-def generate_geometry_object(node):
-
-    output = {
-      'type' : 'embedded',
-      'id' : getPrefixedName( node, 'Embed' )
-    }
-
-    return output
-
-def generate_geometry_dict_from_hierarchy(node, geometry_dict):
-    if node.GetNodeAttribute() == None:
-        pass
-    else:
-        attribute_type = (node.GetNodeAttribute().GetAttributeType())
-        if attribute_type == FbxNodeAttribute.eMesh:
-            geometry_object = generate_geometry_object(node)
-            geometry_name = getPrefixedName( node, 'Geometry' )
-            geometry_dict[geometry_name] = geometry_object
-    for i in range(node.GetChildCount()):
-        generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
-
-def generate_geometry_dict(scene):
-    geometry_dict = {}
-    node = scene.GetRootNode()
-    if node:
-        for i in range(node.GetChildCount()):
-            generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
-    return geometry_dict
-
-
-# #####################################################
-# Generate Light Node Objects
-# #####################################################
-def generate_default_light():
-    direction = (1,1,1)
-    color = (1,1,1)
-    intensity = 80.0
-
-    output = {
-      'type': 'DirectionalLight',
-      'color': getHex(color),
-      'intensity': intensity/100.00,
-      'direction': serializeVector3( direction ),
-      'target': getObjectName( None )
-    }
-
-    return output
-
-def generate_light_object(node):
-    light = node.GetNodeAttribute()
-    light_types = ["point", "directional", "spot", "area", "volume"]
-    light_type = light_types[light.LightType.Get()]
-
-    transform = node.EvaluateLocalTransform()
-    position = transform.GetT()
-
-    output = None
-
-    if light_type == "directional":
-
-        # Three.js directional lights emit light from a point in 3d space to a target node or the origin.
-        # When there is no target, we need to take a point, one unit away from the origin, and move it
-        # into the right location so that the origin acts like the target
-
-        if node.GetTarget():
-            direction = position
-        else:
-            translation = FbxVector4(0,0,0,0)
-            scale = FbxVector4(1,1,1,1)
-            rotation = transform.GetR()
-            matrix = FbxMatrix(translation, rotation, scale)
-            direction = matrix.MultNormalize(FbxVector4(0,1,0,1))
-
-        output = {
-
-          'type': 'DirectionalLight',
-          'color': getHex(light.Color.Get()),
-          'intensity': light.Intensity.Get()/100.0,
-          'direction': serializeVector3( direction ),
-          'target': getObjectName( node.GetTarget() )
-
-        }
-
-    elif light_type == "point":
-
-        output = {
-
-          'type': 'PointLight',
-          'color': getHex(light.Color.Get()),
-          'intensity': light.Intensity.Get()/100.0,
-          'position': serializeVector3( position ),
-          'distance': light.FarAttenuationEnd.Get()
-
-        }
-
-    elif light_type == "spot":
-
-        output = {
-
-          'type': 'SpotLight',
-          'color': getHex(light.Color.Get()),
-          'intensity': light.Intensity.Get()/100.0,
-          'position': serializeVector3( position ),
-          'distance': light.FarAttenuationEnd.Get(),
-          'angle': light.OuterAngle.Get()*math.pi/180,
-          'exponent': light.DecayType.Get(),
-          'target': getObjectName( node.GetTarget() )
-
-        }
-
-    # TODO (abelnation): handle area lights
-
-    return output
-
-def generate_ambient_light(scene):
-
-    scene_settings = scene.GetGlobalSettings()
-    ambient_color = scene_settings.GetAmbientColor()
-    ambient_color = (ambient_color.mRed, ambient_color.mGreen, ambient_color.mBlue)
-
-    if ambient_color[0] == 0 and ambient_color[1] == 0 and ambient_color[2] == 0:
-        return None
-
-    output = {
-
-      'type': 'AmbientLight',
-      'color': getHex(ambient_color)
-
-    }
-
-    return output
-
-# #####################################################
-# Generate Camera Node Objects
-# #####################################################
-def generate_default_camera():
-    position = (100, 100, 100)
-    near = 0.1
-    far = 1000
-    fov = 75
-
-    output = {
-      'type': 'PerspectiveCamera',
-      'fov': fov,
-      'near': near,
-      'far': far,
-      'position': serializeVector3( position )
-    }
-
-    return output
-
-def generate_camera_object(node):
-    camera = node.GetNodeAttribute()
-    position = camera.Position.Get()
-
-    projection_types = [ "perspective", "orthogonal" ]
-    projection = projection_types[camera.ProjectionType.Get()]
-
-    near = camera.NearPlane.Get()
-    far = camera.FarPlane.Get()
-
-    name = getObjectName( node )
-    output = {}
-
-    if projection == "perspective":
-
-        aspect = camera.PixelAspectRatio.Get()
-        fov = camera.FieldOfView.Get()
-
-        output = {
-
-          'type': 'PerspectiveCamera',
-          'fov': fov,
-          'aspect': aspect,
-          'near': near,
-          'far': far,
-          'position': serializeVector3( position )
-
-        }
-
-    elif projection == "orthogonal":
-
-        left = ""
-        right = ""
-        top = ""
-        bottom = ""
-
-        output = {
-
-          'type': 'PerspectiveCamera',
-          'left': left,
-          'right': right,
-          'top': top,
-          'bottom': bottom,
-          'near': near,
-          'far': far,
-          'position': serializeVector3( position )
-
-        }
-
-    return output
-
-# #####################################################
-# Generate Camera Names
-# #####################################################
-def generate_camera_name_list_from_hierarchy(node, camera_list):
-    if node.GetNodeAttribute() == None:
-        pass
-    else:
-        attribute_type = (node.GetNodeAttribute().GetAttributeType())
-        if attribute_type == FbxNodeAttribute.eCamera:
-            camera_string = getObjectName(node)
-            camera_list.append(camera_string)
-    for i in range(node.GetChildCount()):
-        generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
-
-def generate_camera_name_list(scene):
-    camera_list = []
-    node = scene.GetRootNode()
-    if node:
-        for i in range(node.GetChildCount()):
-            generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
-    return camera_list
-
-# #####################################################
-# Generate Mesh Node Object
-# #####################################################
-def generate_mesh_object(node):
-    mesh = node.GetNodeAttribute()
-    transform = node.EvaluateLocalTransform()
-    position = transform.GetT()
-    scale = transform.GetS()
-    rotation = getRadians(transform.GetR())
-    quaternion = transform.GetQ()
-
-    material_count = node.GetMaterialCount()
-    material_name = ""
-
-    if material_count > 0:
-        material_names = []
-        for l in range(mesh.GetLayerCount()):
-            materials = mesh.GetLayer(l).GetMaterials()
-            if materials:
-                if materials.GetReferenceMode() == FbxLayerElement.eIndex:
-                    #Materials are in an undefined external table
-                    continue
-                for i in range(material_count):
-                    material = node.GetMaterial(i)
-                    material_names.append( getMaterialName(material) )
-
-        if not material_count > 1 and not len(material_names) > 0:
-            material_names.append('')
-
-        #If this mesh has more than one material, use a proxy material
-        material_name = getMaterialName( node, True) if material_count > 1 else material_names[0]
-
-    output = {
-      'geometry': getPrefixedName( node, 'Geometry' ),
-      'material': material_name,
-      'position': serializeVector3( position ),
-      'quaternion': serializeVector4( quaternion ),
-      'scale': serializeVector3( scale ),
-      'visible': True,
-    }
-
-    return output
-
-# #####################################################
-# Generate Node Object
-# #####################################################
-def generate_object(node):
-    node_types = ["Unknown", "Null", "Marker", "Skeleton", "Mesh", "Nurbs", "Patch", "Camera",
-    "CameraStereo", "CameraSwitcher", "Light", "OpticalReference", "OpticalMarker", "NurbsCurve",
-    "TrimNurbsSurface", "Boundary", "NurbsSurface", "Shape", "LODGroup", "SubDiv", "CachedEffect", "Line"]
-
-    transform = node.EvaluateLocalTransform()
-    position = transform.GetT()
-    scale = transform.GetS()
-    rotation = getRadians(transform.GetR())
-    quaternion = transform.GetQ()
-
-    node_type = ""
-    if node.GetNodeAttribute() == None:
-        node_type = "Null"
-    else:
-        node_type = node_types[node.GetNodeAttribute().GetAttributeType()]
-
-    name = getObjectName( node )
-    output = {
-      'fbx_type': node_type,
-      'position': serializeVector3( position ),
-      'quaternion': serializeVector4( quaternion ),
-      'scale': serializeVector3( scale ),
-      'visible': True
-    }
-
-    return output
-
-# #####################################################
-# Parse Scene Node Objects
-# #####################################################
-def generate_object_hierarchy(node, object_dict):
-    object_count = 0
-    if node.GetNodeAttribute() == None:
-        object_data = generate_object(node)
-    else:
-        attribute_type = (node.GetNodeAttribute().GetAttributeType())
-        if attribute_type == FbxNodeAttribute.eMesh:
-            object_data = generate_mesh_object(node)
-        elif attribute_type == FbxNodeAttribute.eLight:
-            object_data = generate_light_object(node)
-        elif attribute_type == FbxNodeAttribute.eCamera:
-            object_data = generate_camera_object(node)
-        else:
-            object_data = generate_object(node)
-
-    object_count += 1
-    object_name = getObjectName(node)
-
-    object_children = {}
-    for i in range(node.GetChildCount()):
-        object_count += generate_object_hierarchy(node.GetChild(i), object_children)
-
-    if node.GetChildCount() > 0:
-        # Having 'children' above other attributes is hard to read.
-        # We can send it to the bottom using the last letter of the alphabet 'z'.
-        # This letter is removed from the final output.
-        if option_pretty_print:
-            object_data['zchildren'] = object_children
-        else:
-            object_data['children'] = object_children
-
-    object_dict[object_name] = object_data
-
-    return object_count
-
-def generate_scene_objects(scene):
-    object_count = 0
-    object_dict = {}
-
-    ambient_light = generate_ambient_light(scene)
-    if ambient_light:
-        object_dict['AmbientLight'] = ambient_light
-        object_count += 1
-
-    if option_default_light:
-        default_light = generate_default_light()
-        object_dict['DefaultLight'] = default_light
-        object_count += 1
-
-    if option_default_camera:
-        default_camera = generate_default_camera()
-        object_dict['DefaultCamera'] = default_camera
-        object_count += 1
-
-    node = scene.GetRootNode()
-    if node:
-        for i in range(node.GetChildCount()):
-            object_count += generate_object_hierarchy(node.GetChild(i), object_dict)
-
-    return object_dict, object_count
-
-# #####################################################
-# Generate Scene Output
-# #####################################################
-def extract_scene(scene, filename):
-    global_settings = scene.GetGlobalSettings()
-    objects, nobjects = generate_scene_objects(scene)
-
-    textures = generate_texture_dict(scene)
-    materials = generate_material_dict(scene)
-    geometries = generate_geometry_dict(scene)
-    embeds = generate_embed_dict(scene)
-
-    ntextures = len(textures)
-    nmaterials = len(materials)
-    ngeometries = len(geometries)
-
-    position = serializeVector3( (0,0,0) )
-    rotation = serializeVector3( (0,0,0) )
-    scale    = serializeVector3( (1,1,1) )
-
-    camera_names = generate_camera_name_list(scene)
-    scene_settings = scene.GetGlobalSettings()
-
-    # This does not seem to be any help here
-    # global_settings.GetDefaultCamera()
-
-    defcamera = camera_names[0] if len(camera_names) > 0 else ""
-    if option_default_camera:
-      defcamera = 'default_camera'
-
-    metadata = {
-      'formatVersion': 3.2,
-      'type': 'scene',
-      'generatedBy': 'convert-to-threejs.py',
-      'objects': nobjects,
-      'geometries': ngeometries,
-      'materials': nmaterials,
-      'textures': ntextures
-    }
-
-    transform = {
-      'position' : position,
-      'rotation' : rotation,
-      'scale' : scale
-    }
-
-    defaults = {
-      'bgcolor' : 0,
-      'camera' : defcamera,
-      'fog' : ''
-    }
-
-    output = {
-      'objects': objects,
-      'geometries': geometries,
-      'materials': materials,
-      'textures': textures,
-      'embeds': embeds,
-      'transform': transform,
-      'defaults': defaults,
-    }
-
-    if option_pretty_print:
-        output['0metadata'] = metadata
-    else:
-        output['metadata'] = metadata
-
-    return output
-
-# #####################################################
-# Generate Non-Scene Output
-# #####################################################
-def extract_geometry(scene, filename):
-    output = generate_non_scene_output(scene)
-    return output
-
-# #####################################################
-# File Helpers
-# #####################################################
-def write_file(filepath, content):
-    index = filepath.rfind('/')
-    dir = filepath[0:index]
-
-    #if not os.path.exists(dir):
-        #os.makedirs(dir)
-
-    out = open(filepath, "w")
-    out.write(content.encode('utf8', 'replace'))
-    out.close()
-
-def read_file(filepath):
-    f = open(filepath)
-    content = f.readlines()
-    f.close()
-    return content
-
-def copy_textures(textures):
-    texture_dict = {}
-
-    for key in textures:
-        url = textures[key]['fullpath']
-        #src = replace_OutFolder2inFolder(url)
-
-        #print( src )
-        #print( url )
-
-        if url in texture_dict:  # texture has been copied
-            continue
-
-        if not os.path.exists(url):
-            print("copy_texture error: we can't find this texture at " + url)
-            continue
-
-        try:
-            index = url.rfind('/')
-            if index == -1:
-                index = url.rfind( '\\' )
-            filename = url[index+1:len(url)]
-            saveFolder = "maps"
-            saveFilename = saveFolder + "/" + filename
-            #print( src )
-            #print( url )
-            #print( saveFilename )
-            if not os.path.exists(saveFolder):
-                os.makedirs(saveFolder)
-            shutil.copyfile(url, saveFilename)
-            texture_dict[url] = True
-        except IOError as e:
-            print ("I/O error({0}): {1} {2}").format(e.errno, e.strerror, url)
-
-def findFilesWithExt(directory, ext, include_path = True):
-    ext = ext.lower()
-    found = []
-    for root, dirs, files in os.walk(directory):
-        for filename in files:
-            current_ext = os.path.splitext(filename)[1].lower()
-            if current_ext == ext:
-                if include_path:
-                    found.append(os.path.join(root, filename))
-                else:
-                    found.append(filename)
-    return found
-
-# #####################################################
-# main
-# #####################################################
-if __name__ == "__main__":
-    from optparse import OptionParser
-
-    try:
-        from FbxCommon import *
-    except ImportError:
-        import platform
-        msg = 'Could not locate the python FBX SDK!\n'
-        msg += 'You need to copy the FBX SDK into your python install folder such as '
-        if platform.system() == 'Windows' or platform.system() == 'Microsoft':
-            msg += '"Python26/Lib/site-packages"'
-        elif platform.system() == 'Linux':
-            msg += '"/usr/local/lib/python2.6/site-packages"'
-        elif platform.system() == 'Darwin':
-            msg += '"/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages"'
-        msg += ' folder.'
-        print(msg)
-        sys.exit(1)
-
-    usage = "Usage: %prog [source_file.fbx] [output_file.js] [options]"
-    parser = OptionParser(usage=usage)
-
-    parser.add_option('-t', '--triangulate', action='store_true', dest='triangulate', help="force quad geometry into triangles", default=False)
-    parser.add_option('-x', '--ignore-textures', action='store_true', dest='notextures', help="don't include texture references in output file", default=False)
-    parser.add_option('-n', '--no-texture-copy', action='store_true', dest='notexturecopy', help="don't copy texture files", default=False)
-    parser.add_option('-u', '--force-prefix', action='store_true', dest='prefix', help="prefix all object names in output file to ensure uniqueness", default=False)
-    parser.add_option('-f', '--flatten-scene', action='store_true', dest='geometry', help="merge all geometries and apply node transforms", default=False)
-    parser.add_option('-y', '--force-y-up', action='store_true', dest='forceyup', help="ensure that the y axis shows up", default=False)
-    parser.add_option('-c', '--add-camera', action='store_true', dest='defcamera', help="include default camera in output scene", default=False)
-    parser.add_option('-l', '--add-light', action='store_true', dest='deflight', help="include default light in output scene", default=False)
-    parser.add_option('-p', '--pretty-print', action='store_true', dest='pretty', help="prefix all object names in output file", default=False)
-
-    (options, args) = parser.parse_args()
-
-    option_triangulate = options.triangulate
-    option_textures = True if not options.notextures else False
-    option_copy_textures = True if not options.notexturecopy else False
-    option_prefix = options.prefix
-    option_geometry = options.geometry
-    option_forced_y_up = options.forceyup
-    option_default_camera = options.defcamera
-    option_default_light = options.deflight
-    option_pretty_print = options.pretty
-
-    # Prepare the FBX SDK.
-    sdk_manager, scene = InitializeSdkObjects()
-    converter = FbxGeometryConverter(sdk_manager)
-
-    # The converter takes an FBX file as an argument.
-    if len(args) > 1:
-        print("\nLoading file: %s" % args[0])
-        result = LoadScene(sdk_manager, scene, args[0])
-    else:
-        result = False
-        print("\nUsage: convert_fbx_to_threejs [source_file.fbx] [output_file.js]\n")
-
-    if not result:
-        print("\nAn error occurred while loading the file...")
-    else:
-        if option_triangulate:
-            print("\nForcing geometry to triangles")
-            triangulate_scene(scene)
-
-        axis_system = FbxAxisSystem.MayaYUp
-
-        if not option_forced_y_up:
-            # According to asset's coordinate to convert scene
-            upVector = scene.GetGlobalSettings().GetAxisSystem().GetUpVector();
-            if upVector[0] == 3:
-                axis_system = FbxAxisSystem.MayaZUp
-
-        axis_system.ConvertScene(scene)
-
-        inputFolder = args[0].replace( "\\", "/" );
-        index = args[0].rfind( "/" );
-        inputFolder = inputFolder[:index]
-
-        outputFolder = args[1].replace( "\\", "/" );
-        index = args[1].rfind( "/" );
-        outputFolder = outputFolder[:index]
-
-        if option_geometry:
-            output_content = extract_geometry(scene, os.path.basename(args[0]))
-        else:
-            output_content = extract_scene(scene, os.path.basename(args[0]))
-
-        if option_pretty_print:
-            output_string = json.dumps(output_content, indent=4, cls=CustomEncoder, separators=(',', ': '), sort_keys=True)
-            output_string = executeRegexHacks(output_string)
-        else:
-            output_string = json.dumps(output_content, separators=(',', ': '), sort_keys=True)
-
-
-        output_path = os.path.join(os.getcwd(), args[1])
-        write_file(output_path, output_string)
-
-        if option_copy_textures:
-            copy_textures( output_content['textures'] )
-
-        print("\nExported Three.js file to:\n%s\n" % output_path)
-
-    # Destroy all objects created by the FBX SDK.
-    sdk_manager.Destroy()
-    sys.exit(0)

+ 0 - 56
utils/converters/msgpack/json2msgpack.py

@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-
-__doc__ = '''
-Convert a json file to msgpack.
-
-If fed only an input file the converted will write out a .pack file
-of the same base name in the same directory
-$ json2msgpack.py -i foo.json
-foo.json > foo.pack
-
-Specify an output file path
-$ json2msgpack.py -i foo.json -o /bar/tmp/bar.pack
-foo.json > /bar/tmp/bar.pack
-
-Dependencies:
-https://github.com/msgpack/msgpack-python
-'''
-
-import os
-import sys
-import json
-import argparse
-
-sys.path.append(os.path.dirname(os.path.realpath(__file__)))
-
-import msgpack
-
-EXT = '.pack'
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument('-i', '--infile', required=True,
-        help='Input json file to convert to msgpack')
-    parser.add_argument('-o', '--outfile',
-        help=('Optional output. If not specified the .pack file '\
-            'will write to the same director as the input file.'))
-    args = parser.parse_args()
-    convert(args.infile, args.outfile)
-
-def convert(infile, outfile):
-    if not outfile:
-        ext = infile.split('.')[-1]
-        outfile = '%s%s' % (infile[:-len(ext)-1], EXT)
-
-    print('%s > %s' % (infile, outfile))
-
-    print('reading in JSON')
-    with open(infile) as op:
-        data = json.load(op)
-
-    print('writing to msgpack')
-    with open(outfile, 'wb') as op:
-        msgpack.dump(data, op)
-
-if __name__ == '__main__':
-    main()

+ 0 - 54
utils/converters/msgpack/msgpack/__init__.py

@@ -1,54 +0,0 @@
-# coding: utf-8
-from msgpack._version import version
-from msgpack.exceptions import *
-
-from collections import namedtuple
-
-
-class ExtType(namedtuple('ExtType', 'code data')):
-    """ExtType represents ext type in msgpack."""
-    def __new__(cls, code, data):
-        if not isinstance(code, int):
-            raise TypeError("code must be int")
-        if not isinstance(data, bytes):
-            raise TypeError("data must be bytes")
-        if not 0 <= code <= 127:
-            raise ValueError("code must be 0~127")
-        return super(ExtType, cls).__new__(cls, code, data)
-
-
-import os
-if os.environ.get('MSGPACK_PUREPYTHON'):
-    from msgpack.fallback import Packer, unpack, unpackb, Unpacker
-else:
-    try:
-        from msgpack._packer import Packer
-        from msgpack._unpacker import unpack, unpackb, Unpacker
-    except ImportError:
-        from msgpack.fallback import Packer, unpack, unpackb, Unpacker
-
-
-def pack(o, stream, **kwargs):
-    """
-    Pack object `o` and write it to `stream`
-
-    See :class:`Packer` for options.
-    """
-    packer = Packer(**kwargs)
-    stream.write(packer.pack(o))
-
-
-def packb(o, **kwargs):
-    """
-    Pack object `o` and return packed bytes
-
-    See :class:`Packer` for options.
-    """
-    return Packer(**kwargs).pack(o)
-
-# alias for compatibility to simplejson/marshal/pickle.
-load = unpack
-loads = unpackb
-
-dump = pack
-dumps = packb

+ 0 - 295
utils/converters/msgpack/msgpack/_packer.pyx

@@ -1,295 +0,0 @@
-# coding: utf-8
-#cython: embedsignature=True
-
-from cpython cimport *
-from libc.stdlib cimport *
-from libc.string cimport *
-from libc.limits cimport *
-from libc.stdint cimport int8_t
-
-from msgpack.exceptions import PackValueError
-from msgpack import ExtType
-
-
-cdef extern from "pack.h":
-    struct msgpack_packer:
-        char* buf
-        size_t length
-        size_t buf_size
-        bint use_bin_type
-
-    int msgpack_pack_int(msgpack_packer* pk, int d)
-    int msgpack_pack_nil(msgpack_packer* pk)
-    int msgpack_pack_true(msgpack_packer* pk)
-    int msgpack_pack_false(msgpack_packer* pk)
-    int msgpack_pack_long(msgpack_packer* pk, long d)
-    int msgpack_pack_long_long(msgpack_packer* pk, long long d)
-    int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d)
-    int msgpack_pack_float(msgpack_packer* pk, float d)
-    int msgpack_pack_double(msgpack_packer* pk, double d)
-    int msgpack_pack_array(msgpack_packer* pk, size_t l)
-    int msgpack_pack_map(msgpack_packer* pk, size_t l)
-    int msgpack_pack_raw(msgpack_packer* pk, size_t l)
-    int msgpack_pack_bin(msgpack_packer* pk, size_t l)
-    int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l)
-    int msgpack_pack_ext(msgpack_packer* pk, int8_t typecode, size_t l)
-
-cdef int DEFAULT_RECURSE_LIMIT=511
-
-
-cdef class Packer(object):
-    """
-    MessagePack Packer
-
-    usage::
-
-        packer = Packer()
-        astream.write(packer.pack(a))
-        astream.write(packer.pack(b))
-
-    Packer's constructor has some keyword arguments:
-
-    :param callable default:
-        Convert user type to builtin type that Packer supports.
-        See also simplejson's document.
-    :param str encoding:
-        Convert unicode to bytes with this encoding. (default: 'utf-8')
-    :param str unicode_errors:
-        Error handler for encoding unicode. (default: 'strict')
-    :param bool use_single_float:
-        Use single precision float type for float. (default: False)
-    :param bool autoreset:
-        Reset buffer after each pack and return it's content as `bytes`. (default: True).
-        If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
-    :param bool use_bin_type:
-        Use bin type introduced in msgpack spec 2.0 for bytes.
-        It also enable str8 type for unicode.
-    """
-    cdef msgpack_packer pk
-    cdef object _default
-    cdef object _bencoding
-    cdef object _berrors
-    cdef char *encoding
-    cdef char *unicode_errors
-    cdef bool use_float
-    cdef bint autoreset
-
-    def __cinit__(self):
-        cdef int buf_size = 1024*1024
-        self.pk.buf = <char*> malloc(buf_size);
-        if self.pk.buf == NULL:
-            raise MemoryError("Unable to allocate internal buffer.")
-        self.pk.buf_size = buf_size
-        self.pk.length = 0
-
-    def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
-                 use_single_float=False, bint autoreset=1, bint use_bin_type=0):
-        """
-        """
-        self.use_float = use_single_float
-        self.autoreset = autoreset
-        self.pk.use_bin_type = use_bin_type
-        if default is not None:
-            if not PyCallable_Check(default):
-                raise TypeError("default must be a callable.")
-        self._default = default
-        if encoding is None:
-            self.encoding = NULL
-            self.unicode_errors = NULL
-        else:
-            if isinstance(encoding, unicode):
-                self._bencoding = encoding.encode('ascii')
-            else:
-                self._bencoding = encoding
-            self.encoding = PyBytes_AsString(self._bencoding)
-            if isinstance(unicode_errors, unicode):
-                self._berrors = unicode_errors.encode('ascii')
-            else:
-                self._berrors = unicode_errors
-            self.unicode_errors = PyBytes_AsString(self._berrors)
-
-    def __dealloc__(self):
-        free(self.pk.buf);
-
-    cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
-        cdef long long llval
-        cdef unsigned long long ullval
-        cdef long longval
-        cdef float fval
-        cdef double dval
-        cdef char* rawval
-        cdef int ret
-        cdef dict d
-        cdef size_t L
-        cdef int default_used = 0
-
-        if nest_limit < 0:
-            raise PackValueError("recursion limit exceeded.")
-
-        while True:
-            if o is None:
-                ret = msgpack_pack_nil(&self.pk)
-            elif isinstance(o, bool):
-                if o:
-                    ret = msgpack_pack_true(&self.pk)
-                else:
-                    ret = msgpack_pack_false(&self.pk)
-            elif PyLong_Check(o):
-                # PyInt_Check(long) is True for Python 3.
-                # Sow we should test long before int.
-                if o > 0:
-                    ullval = o
-                    ret = msgpack_pack_unsigned_long_long(&self.pk, ullval)
-                else:
-                    llval = o
-                    ret = msgpack_pack_long_long(&self.pk, llval)
-            elif PyInt_Check(o):
-                longval = o
-                ret = msgpack_pack_long(&self.pk, longval)
-            elif PyFloat_Check(o):
-                if self.use_float:
-                   fval = o
-                   ret = msgpack_pack_float(&self.pk, fval)
-                else:
-                   dval = o
-                   ret = msgpack_pack_double(&self.pk, dval)
-            elif PyBytes_Check(o):
-                L = len(o)
-                if L > (2**32)-1:
-                    raise ValueError("bytes is too large")
-                rawval = o
-                ret = msgpack_pack_bin(&self.pk, L)
-                if ret == 0:
-                    ret = msgpack_pack_raw_body(&self.pk, rawval, L)
-            elif PyUnicode_Check(o):
-                if not self.encoding:
-                    raise TypeError("Can't encode unicode string: no encoding is specified")
-                o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
-                L = len(o)
-                if L > (2**32)-1:
-                    raise ValueError("dict is too large")
-                rawval = o
-                ret = msgpack_pack_raw(&self.pk, len(o))
-                if ret == 0:
-                    ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
-            elif PyDict_CheckExact(o):
-                d = <dict>o
-                L = len(d)
-                if L > (2**32)-1:
-                    raise ValueError("dict is too large")
-                ret = msgpack_pack_map(&self.pk, L)
-                if ret == 0:
-                    for k, v in d.iteritems():
-                        ret = self._pack(k, nest_limit-1)
-                        if ret != 0: break
-                        ret = self._pack(v, nest_limit-1)
-                        if ret != 0: break
-            elif PyDict_Check(o):
-                L = len(o)
-                if L > (2**32)-1:
-                    raise ValueError("dict is too large")
-                ret = msgpack_pack_map(&self.pk, L)
-                if ret == 0:
-                    for k, v in o.items():
-                        ret = self._pack(k, nest_limit-1)
-                        if ret != 0: break
-                        ret = self._pack(v, nest_limit-1)
-                        if ret != 0: break
-            elif isinstance(o, ExtType):
-                # This should be before Tuple because ExtType is namedtuple.
-                longval = o.code
-                rawval = o.data
-                L = len(o.data)
-                if L > (2**32)-1:
-                    raise ValueError("EXT data is too large")
-                ret = msgpack_pack_ext(&self.pk, longval, L)
-                ret = msgpack_pack_raw_body(&self.pk, rawval, L)
-            elif PyTuple_Check(o) or PyList_Check(o):
-                L = len(o)
-                if L > (2**32)-1:
-                    raise ValueError("list is too large")
-                ret = msgpack_pack_array(&self.pk, L)
-                if ret == 0:
-                    for v in o:
-                        ret = self._pack(v, nest_limit-1)
-                        if ret != 0: break
-            elif not default_used and self._default:
-                o = self._default(o)
-                default_used = 1
-                continue
-            else:
-                raise TypeError("can't serialize %r" % (o,))
-            return ret
-
-    cpdef pack(self, object obj):
-        cdef int ret
-        ret = self._pack(obj, DEFAULT_RECURSE_LIMIT)
-        if ret == -1:
-            raise MemoryError
-        elif ret:  # should not happen.
-            raise TypeError
-        if self.autoreset:
-            buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
-            self.pk.length = 0
-            return buf
-
-    def pack_ext_type(self, typecode, data):
-        msgpack_pack_ext(&self.pk, typecode, len(data))
-        msgpack_pack_raw_body(&self.pk, data, len(data))
-
-    def pack_array_header(self, size_t size):
-        if size > (2**32-1):
-            raise ValueError
-        cdef int ret = msgpack_pack_array(&self.pk, size)
-        if ret == -1:
-            raise MemoryError
-        elif ret:  # should not happen
-            raise TypeError
-        if self.autoreset:
-            buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
-            self.pk.length = 0
-            return buf
-
-    def pack_map_header(self, size_t size):
-        if size > (2**32-1):
-            raise ValueError
-        cdef int ret = msgpack_pack_map(&self.pk, size)
-        if ret == -1:
-            raise MemoryError
-        elif ret:  # should not happen
-            raise TypeError
-        if self.autoreset:
-            buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
-            self.pk.length = 0
-            return buf
-
-    def pack_map_pairs(self, object pairs):
-        """
-        Pack *pairs* as msgpack map type.
-
-        *pairs* should sequence of pair.
-        (`len(pairs)` and `for k, v in pairs:` should be supported.)
-        """
-        cdef int ret = msgpack_pack_map(&self.pk, len(pairs))
-        if ret == 0:
-            for k, v in pairs:
-                ret = self._pack(k)
-                if ret != 0: break
-                ret = self._pack(v)
-                if ret != 0: break
-        if ret == -1:
-            raise MemoryError
-        elif ret:  # should not happen
-            raise TypeError
-        if self.autoreset:
-            buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
-            self.pk.length = 0
-            return buf
-
-    def reset(self):
-        """Clear internal buffer."""
-        self.pk.length = 0
-
-    def bytes(self):
-        """Return buffer content."""
-        return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)

+ 0 - 426
utils/converters/msgpack/msgpack/_unpacker.pyx

@@ -1,426 +0,0 @@
-# coding: utf-8
-#cython: embedsignature=True
-
-from cpython cimport *
-cdef extern from "Python.h":
-    ctypedef struct PyObject
-    cdef int PyObject_AsReadBuffer(object o, const void** buff, Py_ssize_t* buf_len) except -1
-
-from libc.stdlib cimport *
-from libc.string cimport *
-from libc.limits cimport *
-
-from msgpack.exceptions import (
-        BufferFull,
-        OutOfData,
-        UnpackValueError,
-        ExtraData,
-        )
-from msgpack import ExtType
-
-
-cdef extern from "unpack.h":
-    ctypedef struct msgpack_user:
-        bint use_list
-        PyObject* object_hook
-        bint has_pairs_hook # call object_hook with k-v pairs
-        PyObject* list_hook
-        PyObject* ext_hook
-        char *encoding
-        char *unicode_errors
-
-    ctypedef struct unpack_context:
-        msgpack_user user
-        PyObject* obj
-        size_t count
-
-    ctypedef int (*execute_fn)(unpack_context* ctx, const char* data,
-                               size_t len, size_t* off) except? -1
-    execute_fn unpack_construct
-    execute_fn unpack_skip
-    execute_fn read_array_header
-    execute_fn read_map_header
-    void unpack_init(unpack_context* ctx)
-    object unpack_data(unpack_context* ctx)
-
-cdef inline init_ctx(unpack_context *ctx,
-                     object object_hook, object object_pairs_hook,
-                     object list_hook, object ext_hook,
-                     bint use_list, char* encoding, char* unicode_errors):
-    unpack_init(ctx)
-    ctx.user.use_list = use_list
-    ctx.user.object_hook = ctx.user.list_hook = <PyObject*>NULL
-
-    if object_hook is not None and object_pairs_hook is not None:
-        raise TypeError("object_pairs_hook and object_hook are mutually exclusive.")
-
-    if object_hook is not None:
-        if not PyCallable_Check(object_hook):
-            raise TypeError("object_hook must be a callable.")
-        ctx.user.object_hook = <PyObject*>object_hook
-
-    if object_pairs_hook is None:
-        ctx.user.has_pairs_hook = False
-    else:
-        if not PyCallable_Check(object_pairs_hook):
-            raise TypeError("object_pairs_hook must be a callable.")
-        ctx.user.object_hook = <PyObject*>object_pairs_hook
-        ctx.user.has_pairs_hook = True
-
-    if list_hook is not None:
-        if not PyCallable_Check(list_hook):
-            raise TypeError("list_hook must be a callable.")
-        ctx.user.list_hook = <PyObject*>list_hook
-
-    if ext_hook is not None:
-        if not PyCallable_Check(ext_hook):
-            raise TypeError("ext_hook must be a callable.")
-        ctx.user.ext_hook = <PyObject*>ext_hook
-
-    ctx.user.encoding = encoding
-    ctx.user.unicode_errors = unicode_errors
-
-def default_read_extended_type(typecode, data):
-    raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode)
-
-def unpackb(object packed, object object_hook=None, object list_hook=None,
-            bint use_list=1, encoding=None, unicode_errors="strict",
-            object_pairs_hook=None, ext_hook=ExtType):
-    """
-    Unpack packed_bytes to object. Returns an unpacked object.
-
-    Raises `ValueError` when `packed` contains extra bytes.
-
-    See :class:`Unpacker` for options.
-    """
-    cdef unpack_context ctx
-    cdef size_t off = 0
-    cdef int ret
-
-    cdef char* buf
-    cdef Py_ssize_t buf_len
-    cdef char* cenc = NULL
-    cdef char* cerr = NULL
-
-    PyObject_AsReadBuffer(packed, <const void**>&buf, &buf_len)
-
-    if encoding is not None:
-        if isinstance(encoding, unicode):
-            encoding = encoding.encode('ascii')
-        cenc = PyBytes_AsString(encoding)
-
-    if unicode_errors is not None:
-        if isinstance(unicode_errors, unicode):
-            unicode_errors = unicode_errors.encode('ascii')
-        cerr = PyBytes_AsString(unicode_errors)
-
-    init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook,
-             use_list, cenc, cerr)
-    ret = unpack_construct(&ctx, buf, buf_len, &off)
-    if ret == 1:
-        obj = unpack_data(&ctx)
-        if off < buf_len:
-            raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off))
-        return obj
-    else:
-        raise UnpackValueError("Unpack failed: error = %d" % (ret,))
-
-
-def unpack(object stream, object object_hook=None, object list_hook=None,
-           bint use_list=1, encoding=None, unicode_errors="strict",
-           object_pairs_hook=None,
-           ):
-    """
-    Unpack an object from `stream`.
-
-    Raises `ValueError` when `stream` has extra bytes.
-
-    See :class:`Unpacker` for options.
-    """
-    return unpackb(stream.read(), use_list=use_list,
-                   object_hook=object_hook, object_pairs_hook=object_pairs_hook, list_hook=list_hook,
-                   encoding=encoding, unicode_errors=unicode_errors,
-                   )
-
-
-cdef class Unpacker(object):
-    """
-    Streaming unpacker.
-
-    arguments:
-
-    :param file_like:
-        File-like object having `.read(n)` method.
-        If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable.
-
-    :param int read_size:
-        Used as `file_like.read(read_size)`. (default: `min(1024**2, max_buffer_size)`)
-
-    :param bool use_list:
-        If true, unpack msgpack array to Python list.
-        Otherwise, unpack to Python tuple. (default: True)
-
-    :param callable object_hook:
-        When specified, it should be callable.
-        Unpacker calls it with a dict argument after unpacking msgpack map.
-        (See also simplejson)
-
-    :param callable object_pairs_hook:
-        When specified, it should be callable.
-        Unpacker calls it with a list of key-value pairs after unpacking msgpack map.
-        (See also simplejson)
-
-    :param str encoding:
-        Encoding used for decoding msgpack raw.
-        If it is None (default), msgpack raw is deserialized to Python bytes.
-
-    :param str unicode_errors:
-        Used for decoding msgpack raw with *encoding*.
-        (default: `'strict'`)
-
-    :param int max_buffer_size:
-        Limits size of data waiting unpacked.  0 means system's INT_MAX (default).
-        Raises `BufferFull` exception when it is insufficient.
-        You shoud set this parameter when unpacking data from untrasted source.
-
-    example of streaming deserialize from file-like object::
-
-        unpacker = Unpacker(file_like)
-        for o in unpacker:
-            process(o)
-
-    example of streaming deserialize from socket::
-
-        unpacker = Unpacker()
-        while True:
-            buf = sock.recv(1024**2)
-            if not buf:
-                break
-            unpacker.feed(buf)
-            for o in unpacker:
-                process(o)
-    """
-    cdef unpack_context ctx
-    cdef char* buf
-    cdef size_t buf_size, buf_head, buf_tail
-    cdef object file_like
-    cdef object file_like_read
-    cdef Py_ssize_t read_size
-    # To maintain refcnt.
-    cdef object object_hook, object_pairs_hook, list_hook, ext_hook
-    cdef object encoding, unicode_errors
-    cdef size_t max_buffer_size
-
-    def __cinit__(self):
-        self.buf = NULL
-
-    def __dealloc__(self):
-        free(self.buf)
-        self.buf = NULL
-
-    def __init__(self, file_like=None, Py_ssize_t read_size=0, bint use_list=1,
-                 object object_hook=None, object object_pairs_hook=None, object list_hook=None,
-                 str encoding=None, str unicode_errors='strict', int max_buffer_size=0,
-                 object ext_hook=ExtType):
-        cdef char *cenc=NULL,
-        cdef char *cerr=NULL
-
-        self.object_hook = object_hook
-        self.object_pairs_hook = object_pairs_hook
-        self.list_hook = list_hook
-        self.ext_hook = ext_hook
-
-        self.file_like = file_like
-        if file_like:
-            self.file_like_read = file_like.read
-            if not PyCallable_Check(self.file_like_read):
-                raise TypeError("`file_like.read` must be a callable.")
-        if not max_buffer_size:
-            max_buffer_size = INT_MAX
-        if read_size > max_buffer_size:
-            raise ValueError("read_size should be less or equal to max_buffer_size")
-        if not read_size:
-            read_size = min(max_buffer_size, 1024**2)
-        self.max_buffer_size = max_buffer_size
-        self.read_size = read_size
-        self.buf = <char*>malloc(read_size)
-        if self.buf == NULL:
-            raise MemoryError("Unable to allocate internal buffer.")
-        self.buf_size = read_size
-        self.buf_head = 0
-        self.buf_tail = 0
-
-        if encoding is not None:
-            if isinstance(encoding, unicode):
-                self.encoding = encoding.encode('ascii')
-            else:
-                self.encoding = encoding
-            cenc = PyBytes_AsString(self.encoding)
-
-        if unicode_errors is not None:
-            if isinstance(unicode_errors, unicode):
-                self.unicode_errors = unicode_errors.encode('ascii')
-            else:
-                self.unicode_errors = unicode_errors
-            cerr = PyBytes_AsString(self.unicode_errors)
-
-        init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook,
-                 ext_hook, use_list, cenc, cerr)
-
-    def feed(self, object next_bytes):
-        """Append `next_bytes` to internal buffer."""
-        cdef Py_buffer pybuff
-        if self.file_like is not None:
-            raise AssertionError(
-                    "unpacker.feed() is not be able to use with `file_like`.")
-        PyObject_GetBuffer(next_bytes, &pybuff, PyBUF_SIMPLE)
-        try:
-            self.append_buffer(<char*>pybuff.buf, pybuff.len)
-        finally:
-            PyBuffer_Release(&pybuff)
-
-    cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len):
-        cdef:
-            char* buf = self.buf
-            char* new_buf
-            size_t head = self.buf_head
-            size_t tail = self.buf_tail
-            size_t buf_size = self.buf_size
-            size_t new_size
-
-        if tail + _buf_len > buf_size:
-            if ((tail - head) + _buf_len) <= buf_size:
-                # move to front.
-                memmove(buf, buf + head, tail - head)
-                tail -= head
-                head = 0
-            else:
-                # expand buffer.
-                new_size = (tail-head) + _buf_len
-                if new_size > self.max_buffer_size:
-                    raise BufferFull
-                new_size = min(new_size*2, self.max_buffer_size)
-                new_buf = <char*>malloc(new_size)
-                if new_buf == NULL:
-                    # self.buf still holds old buffer and will be freed during
-                    # obj destruction
-                    raise MemoryError("Unable to enlarge internal buffer.")
-                memcpy(new_buf, buf + head, tail - head)
-                free(buf)
-
-                buf = new_buf
-                buf_size = new_size
-                tail -= head
-                head = 0
-
-        memcpy(buf + tail, <char*>(_buf), _buf_len)
-        self.buf = buf
-        self.buf_head = head
-        self.buf_size = buf_size
-        self.buf_tail = tail + _buf_len
-
-    cdef read_from_file(self):
-        next_bytes = self.file_like_read(
-                min(self.read_size,
-                    self.max_buffer_size - (self.buf_tail - self.buf_head)
-                    ))
-        if next_bytes:
-            self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes))
-        else:
-            self.file_like = None
-
-    cdef object _unpack(self, execute_fn execute, object write_bytes, bint iter=0):
-        cdef int ret
-        cdef object obj
-        cdef size_t prev_head
-
-        if self.buf_head >= self.buf_tail and self.file_like is not None:
-            self.read_from_file()
-
-        while 1:
-            prev_head = self.buf_head
-            if prev_head >= self.buf_tail:
-                if iter:
-                    raise StopIteration("No more data to unpack.")
-                else:
-                    raise OutOfData("No more data to unpack.")
-
-            ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
-            if write_bytes is not None:
-                write_bytes(PyBytes_FromStringAndSize(self.buf + prev_head, self.buf_head - prev_head))
-
-            if ret == 1:
-                obj = unpack_data(&self.ctx)
-                unpack_init(&self.ctx)
-                return obj
-            elif ret == 0:
-                if self.file_like is not None:
-                    self.read_from_file()
-                    continue
-                if iter:
-                    raise StopIteration("No more data to unpack.")
-                else:
-                    raise OutOfData("No more data to unpack.")
-            else:
-                raise ValueError("Unpack failed: error = %d" % (ret,))
-
-    def read_bytes(self, Py_ssize_t nbytes):
-        """read a specified number of raw bytes from the stream"""
-        cdef size_t nread
-        nread = min(self.buf_tail - self.buf_head, nbytes)
-        ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread)
-        self.buf_head += nread
-        if len(ret) < nbytes and self.file_like is not None:
-            ret += self.file_like.read(nbytes - len(ret))
-        return ret
-
-    def unpack(self, object write_bytes=None):
-        """
-        unpack one object
-
-        If write_bytes is not None, it will be called with parts of the raw
-        message as it is unpacked.
-
-        Raises `OutOfData` when there are no more bytes to unpack.
-        """
-        return self._unpack(unpack_construct, write_bytes)
-
-    def skip(self, object write_bytes=None):
-        """
-        read and ignore one object, returning None
-
-        If write_bytes is not None, it will be called with parts of the raw
-        message as it is unpacked.
-
-        Raises `OutOfData` when there are no more bytes to unpack.
-        """
-        return self._unpack(unpack_skip, write_bytes)
-
-    def read_array_header(self, object write_bytes=None):
-        """assuming the next object is an array, return its size n, such that
-        the next n unpack() calls will iterate over its contents.
-
-        Raises `OutOfData` when there are no more bytes to unpack.
-        """
-        return self._unpack(read_array_header, write_bytes)
-
-    def read_map_header(self, object write_bytes=None):
-        """assuming the next object is a map, return its size n, such that the
-        next n * 2 unpack() calls will iterate over its key-value pairs.
-
-        Raises `OutOfData` when there are no more bytes to unpack.
-        """
-        return self._unpack(read_map_header, write_bytes)
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        return self._unpack(unpack_construct, None, 1)
-
-    # for debug.
-    #def _buf(self):
-    #    return PyString_FromStringAndSize(self.buf, self.buf_tail)
-
-    #def _off(self):
-    #    return self.buf_head

+ 0 - 1
utils/converters/msgpack/msgpack/_version.py

@@ -1 +0,0 @@
-version = (0, 4, 2)

+ 0 - 29
utils/converters/msgpack/msgpack/exceptions.py

@@ -1,29 +0,0 @@
-class UnpackException(Exception):
-    pass
-
-
-class BufferFull(UnpackException):
-    pass
-
-
-class OutOfData(UnpackException):
-    pass
-
-
-class UnpackValueError(UnpackException, ValueError):
-    pass
-
-
-class ExtraData(ValueError):
-    def __init__(self, unpacked, extra):
-        self.unpacked = unpacked
-        self.extra = extra
-
-    def __str__(self):
-        return "unpack(b) received extra data."
-
-class PackException(Exception):
-    pass
-
-class PackValueError(PackException, ValueError):
-    pass

+ 0 - 714
utils/converters/msgpack/msgpack/fallback.py

@@ -1,714 +0,0 @@
-"""Fallback pure Python implementation of msgpack"""
-
-import sys
-import array
-import struct
-
-if sys.version_info[0] == 3:
-    PY3 = True
-    int_types = int
-    Unicode = str
-    xrange = range
-    def dict_iteritems(d):
-        return d.items()
-else:
-    PY3 = False
-    int_types = (int, long)
-    Unicode = unicode
-    def dict_iteritems(d):
-        return d.iteritems()
-
-
-if hasattr(sys, 'pypy_version_info'):
-    # cStringIO is slow on PyPy, StringIO is faster.  However: PyPy's own
-    # StringBuilder is fastest.
-    from __pypy__ import newlist_hint
-    from __pypy__.builders import StringBuilder
-    USING_STRINGBUILDER = True
-    class StringIO(object):
-        def __init__(self, s=b''):
-            if s:
-                self.builder = StringBuilder(len(s))
-                self.builder.append(s)
-            else:
-                self.builder = StringBuilder()
-        def write(self, s):
-            self.builder.append(s)
-        def getvalue(self):
-            return self.builder.build()
-else:
-    USING_STRINGBUILDER = False
-    from io import BytesIO as StringIO
-    newlist_hint = lambda size: []
-
-from msgpack.exceptions import (
-    BufferFull,
-    OutOfData,
-    UnpackValueError,
-    PackValueError,
-    ExtraData)
-
-from msgpack import ExtType
-
-
-EX_SKIP                 = 0
-EX_CONSTRUCT            = 1
-EX_READ_ARRAY_HEADER    = 2
-EX_READ_MAP_HEADER      = 3
-
-TYPE_IMMEDIATE          = 0
-TYPE_ARRAY              = 1
-TYPE_MAP                = 2
-TYPE_RAW                = 3
-TYPE_BIN                = 4
-TYPE_EXT                = 5
-
-DEFAULT_RECURSE_LIMIT = 511
-
-
-def unpack(stream, **kwargs):
-    """
-    Unpack an object from `stream`.
-
-    Raises `ExtraData` when `packed` contains extra bytes.
-    See :class:`Unpacker` for options.
-    """
-    unpacker = Unpacker(stream, **kwargs)
-    ret = unpacker._fb_unpack()
-    if unpacker._fb_got_extradata():
-        raise ExtraData(ret, unpacker._fb_get_extradata())
-    return ret
-
-
-def unpackb(packed, **kwargs):
-    """
-    Unpack an object from `packed`.
-
-    Raises `ExtraData` when `packed` contains extra bytes.
-    See :class:`Unpacker` for options.
-    """
-    unpacker = Unpacker(None, **kwargs)
-    unpacker.feed(packed)
-    try:
-        ret = unpacker._fb_unpack()
-    except OutOfData:
-        raise UnpackValueError("Data is not enough.")
-    if unpacker._fb_got_extradata():
-        raise ExtraData(ret, unpacker._fb_get_extradata())
-    return ret
-
-
-class Unpacker(object):
-    """
-    Streaming unpacker.
-
-    `file_like` is a file-like object having a `.read(n)` method.
-    When `Unpacker` is initialized with a `file_like`, `.feed()` is not
-    usable.
-
-    `read_size` is used for `file_like.read(read_size)`.
-
-    If `use_list` is True (default), msgpack lists are deserialized to Python
-    lists.  Otherwise they are deserialized to tuples.
-
-    `object_hook` is the same as in simplejson.  If it is not None, it should
-    be callable and Unpacker calls it with a dict argument after deserializing
-    a map.
-
-    `object_pairs_hook` is the same as in simplejson.  If it is not None, it
-    should be callable and Unpacker calls it with a list of key-value pairs
-    after deserializing a map.
-
-    `ext_hook` is callback for ext (User defined) type. It called with two
-    arguments: (code, bytes). default: `msgpack.ExtType`
-
-    `encoding` is the encoding used for decoding msgpack bytes.  If it is
-    None (default), msgpack bytes are deserialized to Python bytes.
-
-    `unicode_errors` is used for decoding bytes.
-
-    `max_buffer_size` limits the buffer size.  0 means INT_MAX (default).
-
-    Raises `BufferFull` exception when it is unsufficient.
-
-    You should set this parameter when unpacking data from an untrustred source.
-
-    example of streaming deserialization from file-like object::
-
-        unpacker = Unpacker(file_like)
-        for o in unpacker:
-            do_something(o)
-
-    example of streaming deserialization from socket::
-
-        unpacker = Unpacker()
-        while 1:
-            buf = sock.recv(1024*2)
-            if not buf:
-                break
-            unpacker.feed(buf)
-            for o in unpacker:
-                do_something(o)
-    """
-
-    def __init__(self, file_like=None, read_size=0, use_list=True,
-                 object_hook=None, object_pairs_hook=None, list_hook=None,
-                 encoding=None, unicode_errors='strict', max_buffer_size=0,
-                 ext_hook=ExtType):
-        if file_like is None:
-            self._fb_feeding = True
-        else:
-            if not callable(file_like.read):
-                raise TypeError("`file_like.read` must be callable")
-            self.file_like = file_like
-            self._fb_feeding = False
-        self._fb_buffers = []
-        self._fb_buf_o = 0
-        self._fb_buf_i = 0
-        self._fb_buf_n = 0
-        self._max_buffer_size = max_buffer_size or 2**31-1
-        if read_size > self._max_buffer_size:
-            raise ValueError("read_size must be smaller than max_buffer_size")
-        self._read_size = read_size or min(self._max_buffer_size, 2048)
-        self._encoding = encoding
-        self._unicode_errors = unicode_errors
-        self._use_list = use_list
-        self._list_hook = list_hook
-        self._object_hook = object_hook
-        self._object_pairs_hook = object_pairs_hook
-        self._ext_hook = ext_hook
-
-        if list_hook is not None and not callable(list_hook):
-            raise TypeError('`list_hook` is not callable')
-        if object_hook is not None and not callable(object_hook):
-            raise TypeError('`object_hook` is not callable')
-        if object_pairs_hook is not None and not callable(object_pairs_hook):
-            raise TypeError('`object_pairs_hook` is not callable')
-        if object_hook is not None and object_pairs_hook is not None:
-            raise TypeError("object_pairs_hook and object_hook are mutually "
-                            "exclusive")
-        if not callable(ext_hook):
-            raise TypeError("`ext_hook` is not callable")
-
-    def feed(self, next_bytes):
-        if isinstance(next_bytes, array.array):
-            next_bytes = next_bytes.tostring()
-        elif isinstance(next_bytes, bytearray):
-            next_bytes = bytes(next_bytes)
-        assert self._fb_feeding
-        if self._fb_buf_n + len(next_bytes) > self._max_buffer_size:
-            raise BufferFull
-        self._fb_buf_n += len(next_bytes)
-        self._fb_buffers.append(next_bytes)
-
-    def _fb_consume(self):
-        self._fb_buffers = self._fb_buffers[self._fb_buf_i:]
-        if self._fb_buffers:
-            self._fb_buffers[0] = self._fb_buffers[0][self._fb_buf_o:]
-        self._fb_buf_o = 0
-        self._fb_buf_i = 0
-        self._fb_buf_n = sum(map(len, self._fb_buffers))
-
-    def _fb_got_extradata(self):
-        if self._fb_buf_i != len(self._fb_buffers):
-            return True
-        if self._fb_feeding:
-            return False
-        if not self.file_like:
-            return False
-        if self.file_like.read(1):
-            return True
-        return False
-
-    def __iter__(self):
-        return self
-
-    def read_bytes(self, n):
-        return self._fb_read(n)
-
-    def _fb_rollback(self):
-        self._fb_buf_i = 0
-        self._fb_buf_o = 0
-
-    def _fb_get_extradata(self):
-        bufs = self._fb_buffers[self._fb_buf_i:]
-        if bufs:
-            bufs[0] = bufs[0][self._fb_buf_o:]
-        return b''.join(bufs)
-
-    def _fb_read(self, n, write_bytes=None):
-        buffs = self._fb_buffers
-        if (write_bytes is None and self._fb_buf_i < len(buffs) and
-                self._fb_buf_o + n < len(buffs[self._fb_buf_i])):
-            self._fb_buf_o += n
-            return buffs[self._fb_buf_i][self._fb_buf_o - n:self._fb_buf_o]
-
-        ret = b''
-        while len(ret) != n:
-            if self._fb_buf_i == len(buffs):
-                if self._fb_feeding:
-                    break
-                tmp = self.file_like.read(self._read_size)
-                if not tmp:
-                    break
-                buffs.append(tmp)
-                continue
-            sliced = n - len(ret)
-            ret += buffs[self._fb_buf_i][self._fb_buf_o:self._fb_buf_o + sliced]
-            self._fb_buf_o += sliced
-            if self._fb_buf_o >= len(buffs[self._fb_buf_i]):
-                self._fb_buf_o = 0
-                self._fb_buf_i += 1
-        if len(ret) != n:
-            self._fb_rollback()
-            raise OutOfData
-        if write_bytes is not None:
-            write_bytes(ret)
-        return ret
-
-    def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None):
-        typ = TYPE_IMMEDIATE
-        n = 0
-        obj = None
-        c = self._fb_read(1, write_bytes)
-        b = ord(c)
-        if   b & 0b10000000 == 0:
-            obj = b
-        elif b & 0b11100000 == 0b11100000:
-            obj = struct.unpack("b", c)[0]
-        elif b & 0b11100000 == 0b10100000:
-            n = b & 0b00011111
-            obj = self._fb_read(n, write_bytes)
-            typ = TYPE_RAW
-        elif b & 0b11110000 == 0b10010000:
-            n = b & 0b00001111
-            typ = TYPE_ARRAY
-        elif b & 0b11110000 == 0b10000000:
-            n = b & 0b00001111
-            typ = TYPE_MAP
-        elif b == 0xc0:
-            obj = None
-        elif b == 0xc2:
-            obj = False
-        elif b == 0xc3:
-            obj = True
-        elif b == 0xc4:
-            typ = TYPE_BIN
-            n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
-            obj = self._fb_read(n, write_bytes)
-        elif b == 0xc5:
-            typ = TYPE_BIN
-            n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
-            obj = self._fb_read(n, write_bytes)
-        elif b == 0xc6:
-            typ = TYPE_BIN
-            n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
-            obj = self._fb_read(n, write_bytes)
-        elif b == 0xc7:  # ext 8
-            typ = TYPE_EXT
-            L, n = struct.unpack('Bb', self._fb_read(2, write_bytes))
-            obj = self._fb_read(L, write_bytes)
-        elif b == 0xc8:  # ext 16
-            typ = TYPE_EXT
-            L, n = struct.unpack('>Hb', self._fb_read(3, write_bytes))
-            obj = self._fb_read(L, write_bytes)
-        elif b == 0xc9:  # ext 32
-            typ = TYPE_EXT
-            L, n = struct.unpack('>Ib', self._fb_read(5, write_bytes))
-            obj = self._fb_read(L, write_bytes)
-        elif b == 0xca:
-            obj = struct.unpack(">f", self._fb_read(4, write_bytes))[0]
-        elif b == 0xcb:
-            obj = struct.unpack(">d", self._fb_read(8, write_bytes))[0]
-        elif b == 0xcc:
-            obj = struct.unpack("B", self._fb_read(1, write_bytes))[0]
-        elif b == 0xcd:
-            obj = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
-        elif b == 0xce:
-            obj = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
-        elif b == 0xcf:
-            obj = struct.unpack(">Q", self._fb_read(8, write_bytes))[0]
-        elif b == 0xd0:
-            obj = struct.unpack("b", self._fb_read(1, write_bytes))[0]
-        elif b == 0xd1:
-            obj = struct.unpack(">h", self._fb_read(2, write_bytes))[0]
-        elif b == 0xd2:
-            obj = struct.unpack(">i", self._fb_read(4, write_bytes))[0]
-        elif b == 0xd3:
-            obj = struct.unpack(">q", self._fb_read(8, write_bytes))[0]
-        elif b == 0xd4:  # fixext 1
-            typ = TYPE_EXT
-            n, obj = struct.unpack('b1s', self._fb_read(2, write_bytes))
-        elif b == 0xd5:  # fixext 2
-            typ = TYPE_EXT
-            n, obj = struct.unpack('b2s', self._fb_read(3, write_bytes))
-        elif b == 0xd6:  # fixext 4
-            typ = TYPE_EXT
-            n, obj = struct.unpack('b4s', self._fb_read(5, write_bytes))
-        elif b == 0xd7:  # fixext 8
-            typ = TYPE_EXT
-            n, obj = struct.unpack('b8s', self._fb_read(9, write_bytes))
-        elif b == 0xd8:  # fixext 16
-            typ = TYPE_EXT
-            n, obj = struct.unpack('b16s', self._fb_read(17, write_bytes))
-        elif b == 0xd9:
-            typ = TYPE_RAW
-            n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
-            obj = self._fb_read(n, write_bytes)
-        elif b == 0xda:
-            typ = TYPE_RAW
-            n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
-            obj = self._fb_read(n, write_bytes)
-        elif b == 0xdb:
-            typ = TYPE_RAW
-            n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
-            obj = self._fb_read(n, write_bytes)
-        elif b == 0xdc:
-            n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
-            typ = TYPE_ARRAY
-        elif b == 0xdd:
-            n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
-            typ = TYPE_ARRAY
-        elif b == 0xde:
-            n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
-            typ = TYPE_MAP
-        elif b == 0xdf:
-            n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
-            typ = TYPE_MAP
-        else:
-            raise UnpackValueError("Unknown header: 0x%x" % b)
-        return typ, n, obj
-
-    def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None):
-        typ, n, obj = self._read_header(execute, write_bytes)
-
-        if execute == EX_READ_ARRAY_HEADER:
-            if typ != TYPE_ARRAY:
-                raise UnpackValueError("Expected array")
-            return n
-        if execute == EX_READ_MAP_HEADER:
-            if typ != TYPE_MAP:
-                raise UnpackValueError("Expected map")
-            return n
-        # TODO should we eliminate the recursion?
-        if typ == TYPE_ARRAY:
-            if execute == EX_SKIP:
-                for i in xrange(n):
-                    # TODO check whether we need to call `list_hook`
-                    self._fb_unpack(EX_SKIP, write_bytes)
-                return
-            ret = newlist_hint(n)
-            for i in xrange(n):
-                ret.append(self._fb_unpack(EX_CONSTRUCT, write_bytes))
-            if self._list_hook is not None:
-                ret = self._list_hook(ret)
-            # TODO is the interaction between `list_hook` and `use_list` ok?
-            return ret if self._use_list else tuple(ret)
-        if typ == TYPE_MAP:
-            if execute == EX_SKIP:
-                for i in xrange(n):
-                    # TODO check whether we need to call hooks
-                    self._fb_unpack(EX_SKIP, write_bytes)
-                    self._fb_unpack(EX_SKIP, write_bytes)
-                return
-            if self._object_pairs_hook is not None:
-                ret = self._object_pairs_hook(
-                    (self._fb_unpack(EX_CONSTRUCT, write_bytes),
-                     self._fb_unpack(EX_CONSTRUCT, write_bytes))
-                    for _ in xrange(n))
-            else:
-                ret = {}
-                for _ in xrange(n):
-                    key = self._fb_unpack(EX_CONSTRUCT, write_bytes)
-                    ret[key] = self._fb_unpack(EX_CONSTRUCT, write_bytes)
-                if self._object_hook is not None:
-                    ret = self._object_hook(ret)
-            return ret
-        if execute == EX_SKIP:
-            return
-        if typ == TYPE_RAW:
-            if self._encoding is not None:
-                obj = obj.decode(self._encoding, self._unicode_errors)
-            return obj
-        if typ == TYPE_EXT:
-            return self._ext_hook(n, obj)
-        if typ == TYPE_BIN:
-            return obj
-        assert typ == TYPE_IMMEDIATE
-        return obj
-
-    def next(self):
-        try:
-            ret = self._fb_unpack(EX_CONSTRUCT, None)
-            self._fb_consume()
-            return ret
-        except OutOfData:
-            raise StopIteration
-    __next__ = next
-
-    def skip(self, write_bytes=None):
-        self._fb_unpack(EX_SKIP, write_bytes)
-        self._fb_consume()
-
-    def unpack(self, write_bytes=None):
-        ret = self._fb_unpack(EX_CONSTRUCT, write_bytes)
-        self._fb_consume()
-        return ret
-
-    def read_array_header(self, write_bytes=None):
-        ret = self._fb_unpack(EX_READ_ARRAY_HEADER, write_bytes)
-        self._fb_consume()
-        return ret
-
-    def read_map_header(self, write_bytes=None):
-        ret = self._fb_unpack(EX_READ_MAP_HEADER, write_bytes)
-        self._fb_consume()
-        return ret
-
-
-class Packer(object):
-    """
-    MessagePack Packer
-
-    usage:
-
-        packer = Packer()
-        astream.write(packer.pack(a))
-        astream.write(packer.pack(b))
-
-    Packer's constructor has some keyword arguments:
-
-    :param callable default:
-        Convert user type to builtin type that Packer supports.
-        See also simplejson's document.
-    :param str encoding:
-            Convert unicode to bytes with this encoding. (default: 'utf-8')
-    :param str unicode_errors:
-        Error handler for encoding unicode. (default: 'strict')
-    :param bool use_single_float:
-        Use single precision float type for float. (default: False)
-    :param bool autoreset:
-        Reset buffer after each pack and return it's content as `bytes`. (default: True).
-        If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
-    :param bool use_bin_type:
-        Use bin type introduced in msgpack spec 2.0 for bytes.
-        It also enable str8 type for unicode.
-    """
-    def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
-                 use_single_float=False, autoreset=True, use_bin_type=False):
-        self._use_float = use_single_float
-        self._autoreset = autoreset
-        self._use_bin_type = use_bin_type
-        self._encoding = encoding
-        self._unicode_errors = unicode_errors
-        self._buffer = StringIO()
-        if default is not None:
-            if not callable(default):
-                raise TypeError("default must be callable")
-        self._default = default
-
-    def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance):
-        default_used = False
-        while True:
-            if nest_limit < 0:
-                raise PackValueError("recursion limit exceeded")
-            if obj is None:
-                return self._buffer.write(b"\xc0")
-            if isinstance(obj, bool):
-                if obj:
-                    return self._buffer.write(b"\xc3")
-                return self._buffer.write(b"\xc2")
-            if isinstance(obj, int_types):
-                if 0 <= obj < 0x80:
-                    return self._buffer.write(struct.pack("B", obj))
-                if -0x20 <= obj < 0:
-                    return self._buffer.write(struct.pack("b", obj))
-                if 0x80 <= obj <= 0xff:
-                    return self._buffer.write(struct.pack("BB", 0xcc, obj))
-                if -0x80 <= obj < 0:
-                    return self._buffer.write(struct.pack(">Bb", 0xd0, obj))
-                if 0xff < obj <= 0xffff:
-                    return self._buffer.write(struct.pack(">BH", 0xcd, obj))
-                if -0x8000 <= obj < -0x80:
-                    return self._buffer.write(struct.pack(">Bh", 0xd1, obj))
-                if 0xffff < obj <= 0xffffffff:
-                    return self._buffer.write(struct.pack(">BI", 0xce, obj))
-                if -0x80000000 <= obj < -0x8000:
-                    return self._buffer.write(struct.pack(">Bi", 0xd2, obj))
-                if 0xffffffff < obj <= 0xffffffffffffffff:
-                    return self._buffer.write(struct.pack(">BQ", 0xcf, obj))
-                if -0x8000000000000000 <= obj < -0x80000000:
-                    return self._buffer.write(struct.pack(">Bq", 0xd3, obj))
-                raise PackValueError("Integer value out of range")
-            if self._use_bin_type and isinstance(obj, bytes):
-                n = len(obj)
-                if n <= 0xff:
-                    self._buffer.write(struct.pack('>BB', 0xc4, n))
-                elif n <= 0xffff:
-                    self._buffer.write(struct.pack(">BH", 0xc5, n))
-                elif n <= 0xffffffff:
-                    self._buffer.write(struct.pack(">BI", 0xc6, n))
-                else:
-                    raise PackValueError("Bytes is too large")
-                return self._buffer.write(obj)
-            if isinstance(obj, (Unicode, bytes)):
-                if isinstance(obj, Unicode):
-                    if self._encoding is None:
-                        raise TypeError(
-                            "Can't encode unicode string: "
-                            "no encoding is specified")
-                    obj = obj.encode(self._encoding, self._unicode_errors)
-                n = len(obj)
-                if n <= 0x1f:
-                    self._buffer.write(struct.pack('B', 0xa0 + n))
-                elif self._use_bin_type and n <= 0xff:
-                    self._buffer.write(struct.pack('>BB', 0xd9, n))
-                elif n <= 0xffff:
-                    self._buffer.write(struct.pack(">BH", 0xda, n))
-                elif n <= 0xffffffff:
-                    self._buffer.write(struct.pack(">BI", 0xdb, n))
-                else:
-                    raise PackValueError("String is too large")
-                return self._buffer.write(obj)
-            if isinstance(obj, float):
-                if self._use_float:
-                    return self._buffer.write(struct.pack(">Bf", 0xca, obj))
-                return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
-            if isinstance(obj, ExtType):
-                code = obj.code
-                data = obj.data
-                assert isinstance(code, int)
-                assert isinstance(data, bytes)
-                L = len(data)
-                if L == 1:
-                    self._buffer.write(b'\xd4')
-                elif L == 2:
-                    self._buffer.write(b'\xd5')
-                elif L == 4:
-                    self._buffer.write(b'\xd6')
-                elif L == 8:
-                    self._buffer.write(b'\xd7')
-                elif L == 16:
-                    self._buffer.write(b'\xd8')
-                elif L <= 0xff:
-                    self._buffer.write(struct.pack(">BB", 0xc7, L))
-                elif L <= 0xffff:
-                    self._buffer.write(struct.pack(">BH", 0xc8, L))
-                else:
-                    self._buffer.write(struct.pack(">BI", 0xc9, L))
-                self._buffer.write(struct.pack("b", code))
-                self._buffer.write(data)
-                return
-            if isinstance(obj, (list, tuple)):
-                n = len(obj)
-                self._fb_pack_array_header(n)
-                for i in xrange(n):
-                    self._pack(obj[i], nest_limit - 1)
-                return
-            if isinstance(obj, dict):
-                return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj),
-                                               nest_limit - 1)
-            if not default_used and self._default is not None:
-                obj = self._default(obj)
-                default_used = 1
-                continue
-            raise TypeError("Cannot serialize %r" % obj)
-
-    def pack(self, obj):
-        self._pack(obj)
-        ret = self._buffer.getvalue()
-        if self._autoreset:
-            self._buffer = StringIO()
-        elif USING_STRINGBUILDER:
-            self._buffer = StringIO(ret)
-        return ret
-
-    def pack_map_pairs(self, pairs):
-        self._fb_pack_map_pairs(len(pairs), pairs)
-        ret = self._buffer.getvalue()
-        if self._autoreset:
-            self._buffer = StringIO()
-        elif USING_STRINGBUILDER:
-            self._buffer = StringIO(ret)
-        return ret
-
-    def pack_array_header(self, n):
-        if n >= 2**32:
-            raise ValueError
-        self._fb_pack_array_header(n)
-        ret = self._buffer.getvalue()
-        if self._autoreset:
-            self._buffer = StringIO()
-        elif USING_STRINGBUILDER:
-            self._buffer = StringIO(ret)
-        return ret
-
-    def pack_map_header(self, n):
-        if n >= 2**32:
-            raise ValueError
-        self._fb_pack_map_header(n)
-        ret = self._buffer.getvalue()
-        if self._autoreset:
-            self._buffer = StringIO()
-        elif USING_STRINGBUILDER:
-            self._buffer = StringIO(ret)
-        return ret
-
-    def pack_ext_type(self, typecode, data):
-        if not isinstance(typecode, int):
-            raise TypeError("typecode must have int type.")
-        if not 0 <= typecode <= 127:
-            raise ValueError("typecode should be 0-127")
-        if not isinstance(data, bytes):
-            raise TypeError("data must have bytes type")
-        L = len(data)
-        if L > 0xffffffff:
-            raise ValueError("Too large data")
-        if L == 1:
-            self._buffer.write(b'\xd4')
-        elif L == 2:
-            self._buffer.write(b'\xd5')
-        elif L == 4:
-            self._buffer.write(b'\xd6')
-        elif L == 8:
-            self._buffer.write(b'\xd7')
-        elif L == 16:
-            self._buffer.write(b'\xd8')
-        elif L <= 0xff:
-            self._buffer.write(b'\xc7' + struct.pack('B', L))
-        elif L <= 0xffff:
-            self._buffer.write(b'\xc8' + struct.pack('>H', L))
-        else:
-            self._buffer.write(b'\xc9' + struct.pack('>I', L))
-        self._buffer.write(struct.pack('B', typecode))
-        self._buffer.write(data)
-
-    def _fb_pack_array_header(self, n):
-        if n <= 0x0f:
-            return self._buffer.write(struct.pack('B', 0x90 + n))
-        if n <= 0xffff:
-            return self._buffer.write(struct.pack(">BH", 0xdc, n))
-        if n <= 0xffffffff:
-            return self._buffer.write(struct.pack(">BI", 0xdd, n))
-        raise PackValueError("Array is too large")
-
-    def _fb_pack_map_header(self, n):
-        if n <= 0x0f:
-            return self._buffer.write(struct.pack('B', 0x80 + n))
-        if n <= 0xffff:
-            return self._buffer.write(struct.pack(">BH", 0xde, n))
-        if n <= 0xffffffff:
-            return self._buffer.write(struct.pack(">BI", 0xdf, n))
-        raise PackValueError("Dict is too large")
-
-    def _fb_pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
-        self._fb_pack_map_header(n)
-        for (k, v) in pairs:
-            self._pack(k, nest_limit - 1)
-            self._pack(v, nest_limit - 1)
-
-    def bytes(self):
-        return self._buffer.getvalue()
-
-    def reset(self):
-        self._buffer = StringIO()

+ 0 - 103
utils/converters/msgpack/msgpack/pack.h

@@ -1,103 +0,0 @@
-/*
- * MessagePack for Python packing routine
- *
- * Copyright (C) 2009 Naoki INADA
- *
- *    Licensed under the Apache License, Version 2.0 (the "License");
- *    you may not use this file except in compliance with the License.
- *    You may obtain a copy of the License at
- *
- *        http://www.apache.org/licenses/LICENSE-2.0
- *
- *    Unless required by applicable law or agreed to in writing, software
- *    distributed under the License is distributed on an "AS IS" BASIS,
- *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *    See the License for the specific language governing permissions and
- *    limitations under the License.
- */
-
-#include <stddef.h>
-#include <stdlib.h>
-#include "sysdep.h"
-#include <limits.h>
-#include <string.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef _MSC_VER
-#define inline __inline
-#endif
-
-typedef struct msgpack_packer {
-    char *buf;
-    size_t length;
-    size_t buf_size;
-    bool use_bin_type;
-} msgpack_packer;
-
-typedef struct Packer Packer;
-
-static inline int msgpack_pack_int(msgpack_packer* pk, int d);
-static inline int msgpack_pack_long(msgpack_packer* pk, long d);
-static inline int msgpack_pack_long_long(msgpack_packer* pk, long long d);
-static inline int msgpack_pack_unsigned_short(msgpack_packer* pk, unsigned short d);
-static inline int msgpack_pack_unsigned_int(msgpack_packer* pk, unsigned int d);
-static inline int msgpack_pack_unsigned_long(msgpack_packer* pk, unsigned long d);
-//static inline int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d);
-
-static inline int msgpack_pack_uint8(msgpack_packer* pk, uint8_t d);
-static inline int msgpack_pack_uint16(msgpack_packer* pk, uint16_t d);
-static inline int msgpack_pack_uint32(msgpack_packer* pk, uint32_t d);
-static inline int msgpack_pack_uint64(msgpack_packer* pk, uint64_t d);
-static inline int msgpack_pack_int8(msgpack_packer* pk, int8_t d);
-static inline int msgpack_pack_int16(msgpack_packer* pk, int16_t d);
-static inline int msgpack_pack_int32(msgpack_packer* pk, int32_t d);
-static inline int msgpack_pack_int64(msgpack_packer* pk, int64_t d);
-
-static inline int msgpack_pack_float(msgpack_packer* pk, float d);
-static inline int msgpack_pack_double(msgpack_packer* pk, double d);
-
-static inline int msgpack_pack_nil(msgpack_packer* pk);
-static inline int msgpack_pack_true(msgpack_packer* pk);
-static inline int msgpack_pack_false(msgpack_packer* pk);
-
-static inline int msgpack_pack_array(msgpack_packer* pk, unsigned int n);
-
-static inline int msgpack_pack_map(msgpack_packer* pk, unsigned int n);
-
-static inline int msgpack_pack_raw(msgpack_packer* pk, size_t l);
-static inline int msgpack_pack_bin(msgpack_packer* pk, size_t l);
-static inline int msgpack_pack_raw_body(msgpack_packer* pk, const void* b, size_t l);
-
-static inline int msgpack_pack_ext(msgpack_packer* pk, int8_t typecode, size_t l);
-
-static inline int msgpack_pack_write(msgpack_packer* pk, const char *data, size_t l)
-{
-    char* buf = pk->buf;
-    size_t bs = pk->buf_size;
-    size_t len = pk->length;
-
-    if (len + l > bs) {
-        bs = (len + l) * 2;
-        buf = (char*)realloc(buf, bs);
-        if (!buf) return -1;
-    }
-    memcpy(buf + len, data, l);
-    len += l;
-
-    pk->buf = buf;
-    pk->buf_size = bs;
-    pk->length = len;
-    return 0;
-}
-
-#define msgpack_pack_append_buffer(user, buf, len) \
-        return msgpack_pack_write(user, (const char*)buf, len)
-
-#include "pack_template.h"
-
-#ifdef __cplusplus
-}
-#endif

+ 0 - 785
utils/converters/msgpack/msgpack/pack_template.h

@@ -1,785 +0,0 @@
-/*
- * MessagePack packing routine template
- *
- * Copyright (C) 2008-2010 FURUHASHI Sadayuki
- *
- *    Licensed under the Apache License, Version 2.0 (the "License");
- *    you may not use this file except in compliance with the License.
- *    You may obtain a copy of the License at
- *
- *        http://www.apache.org/licenses/LICENSE-2.0
- *
- *    Unless required by applicable law or agreed to in writing, software
- *    distributed under the License is distributed on an "AS IS" BASIS,
- *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *    See the License for the specific language governing permissions and
- *    limitations under the License.
- */
-
-#if defined(__LITTLE_ENDIAN__)
-#define TAKE8_8(d)  ((uint8_t*)&d)[0]
-#define TAKE8_16(d) ((uint8_t*)&d)[0]
-#define TAKE8_32(d) ((uint8_t*)&d)[0]
-#define TAKE8_64(d) ((uint8_t*)&d)[0]
-#elif defined(__BIG_ENDIAN__)
-#define TAKE8_8(d)  ((uint8_t*)&d)[0]
-#define TAKE8_16(d) ((uint8_t*)&d)[1]
-#define TAKE8_32(d) ((uint8_t*)&d)[3]
-#define TAKE8_64(d) ((uint8_t*)&d)[7]
-#endif
-
-#ifndef msgpack_pack_append_buffer
-#error msgpack_pack_append_buffer callback is not defined
-#endif
-
-
-/*
- * Integer
- */
-
-#define msgpack_pack_real_uint8(x, d) \
-do { \
-    if(d < (1<<7)) { \
-        /* fixnum */ \
-        msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
-    } else { \
-        /* unsigned 8 */ \
-        unsigned char buf[2] = {0xcc, TAKE8_8(d)}; \
-        msgpack_pack_append_buffer(x, buf, 2); \
-    } \
-} while(0)
-
-#define msgpack_pack_real_uint16(x, d) \
-do { \
-    if(d < (1<<7)) { \
-        /* fixnum */ \
-        msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
-    } else if(d < (1<<8)) { \
-        /* unsigned 8 */ \
-        unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
-        msgpack_pack_append_buffer(x, buf, 2); \
-    } else { \
-        /* unsigned 16 */ \
-        unsigned char buf[3]; \
-        buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
-        msgpack_pack_append_buffer(x, buf, 3); \
-    } \
-} while(0)
-
-#define msgpack_pack_real_uint32(x, d) \
-do { \
-    if(d < (1<<8)) { \
-        if(d < (1<<7)) { \
-            /* fixnum */ \
-            msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
-        } else { \
-            /* unsigned 8 */ \
-            unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
-            msgpack_pack_append_buffer(x, buf, 2); \
-        } \
-    } else { \
-        if(d < (1<<16)) { \
-            /* unsigned 16 */ \
-            unsigned char buf[3]; \
-            buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
-            msgpack_pack_append_buffer(x, buf, 3); \
-        } else { \
-            /* unsigned 32 */ \
-            unsigned char buf[5]; \
-            buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
-            msgpack_pack_append_buffer(x, buf, 5); \
-        } \
-    } \
-} while(0)
-
-#define msgpack_pack_real_uint64(x, d) \
-do { \
-    if(d < (1ULL<<8)) { \
-        if(d < (1ULL<<7)) { \
-            /* fixnum */ \
-            msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
-        } else { \
-            /* unsigned 8 */ \
-            unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
-            msgpack_pack_append_buffer(x, buf, 2); \
-        } \
-    } else { \
-        if(d < (1ULL<<16)) { \
-            /* unsigned 16 */ \
-            unsigned char buf[3]; \
-            buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
-            msgpack_pack_append_buffer(x, buf, 3); \
-        } else if(d < (1ULL<<32)) { \
-            /* unsigned 32 */ \
-            unsigned char buf[5]; \
-            buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
-            msgpack_pack_append_buffer(x, buf, 5); \
-        } else { \
-            /* unsigned 64 */ \
-            unsigned char buf[9]; \
-            buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
-            msgpack_pack_append_buffer(x, buf, 9); \
-        } \
-    } \
-} while(0)
-
-#define msgpack_pack_real_int8(x, d) \
-do { \
-    if(d < -(1<<5)) { \
-        /* signed 8 */ \
-        unsigned char buf[2] = {0xd0, TAKE8_8(d)}; \
-        msgpack_pack_append_buffer(x, buf, 2); \
-    } else { \
-        /* fixnum */ \
-        msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
-    } \
-} while(0)
-
-#define msgpack_pack_real_int16(x, d) \
-do { \
-    if(d < -(1<<5)) { \
-        if(d < -(1<<7)) { \
-            /* signed 16 */ \
-            unsigned char buf[3]; \
-            buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
-            msgpack_pack_append_buffer(x, buf, 3); \
-        } else { \
-            /* signed 8 */ \
-            unsigned char buf[2] = {0xd0, TAKE8_16(d)}; \
-            msgpack_pack_append_buffer(x, buf, 2); \
-        } \
-    } else if(d < (1<<7)) { \
-        /* fixnum */ \
-        msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
-    } else { \
-        if(d < (1<<8)) { \
-            /* unsigned 8 */ \
-            unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
-            msgpack_pack_append_buffer(x, buf, 2); \
-        } else { \
-            /* unsigned 16 */ \
-            unsigned char buf[3]; \
-            buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
-            msgpack_pack_append_buffer(x, buf, 3); \
-        } \
-    } \
-} while(0)
-
-#define msgpack_pack_real_int32(x, d) \
-do { \
-    if(d < -(1<<5)) { \
-        if(d < -(1<<15)) { \
-            /* signed 32 */ \
-            unsigned char buf[5]; \
-            buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
-            msgpack_pack_append_buffer(x, buf, 5); \
-        } else if(d < -(1<<7)) { \
-            /* signed 16 */ \
-            unsigned char buf[3]; \
-            buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
-            msgpack_pack_append_buffer(x, buf, 3); \
-        } else { \
-            /* signed 8 */ \
-            unsigned char buf[2] = {0xd0, TAKE8_32(d)}; \
-            msgpack_pack_append_buffer(x, buf, 2); \
-        } \
-    } else if(d < (1<<7)) { \
-        /* fixnum */ \
-        msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
-    } else { \
-        if(d < (1<<8)) { \
-            /* unsigned 8 */ \
-            unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
-            msgpack_pack_append_buffer(x, buf, 2); \
-        } else if(d < (1<<16)) { \
-            /* unsigned 16 */ \
-            unsigned char buf[3]; \
-            buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
-            msgpack_pack_append_buffer(x, buf, 3); \
-        } else { \
-            /* unsigned 32 */ \
-            unsigned char buf[5]; \
-            buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
-            msgpack_pack_append_buffer(x, buf, 5); \
-        } \
-    } \
-} while(0)
-
-#define msgpack_pack_real_int64(x, d) \
-do { \
-    if(d < -(1LL<<5)) { \
-        if(d < -(1LL<<15)) { \
-            if(d < -(1LL<<31)) { \
-                /* signed 64 */ \
-                unsigned char buf[9]; \
-                buf[0] = 0xd3; _msgpack_store64(&buf[1], d); \
-                msgpack_pack_append_buffer(x, buf, 9); \
-            } else { \
-                /* signed 32 */ \
-                unsigned char buf[5]; \
-                buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
-                msgpack_pack_append_buffer(x, buf, 5); \
-            } \
-        } else { \
-            if(d < -(1<<7)) { \
-                /* signed 16 */ \
-                unsigned char buf[3]; \
-                buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
-                msgpack_pack_append_buffer(x, buf, 3); \
-            } else { \
-                /* signed 8 */ \
-                unsigned char buf[2] = {0xd0, TAKE8_64(d)}; \
-                msgpack_pack_append_buffer(x, buf, 2); \
-            } \
-        } \
-    } else if(d < (1<<7)) { \
-        /* fixnum */ \
-        msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
-    } else { \
-        if(d < (1LL<<16)) { \
-            if(d < (1<<8)) { \
-                /* unsigned 8 */ \
-                unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
-                msgpack_pack_append_buffer(x, buf, 2); \
-            } else { \
-                /* unsigned 16 */ \
-                unsigned char buf[3]; \
-                buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
-                msgpack_pack_append_buffer(x, buf, 3); \
-            } \
-        } else { \
-            if(d < (1LL<<32)) { \
-                /* unsigned 32 */ \
-                unsigned char buf[5]; \
-                buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
-                msgpack_pack_append_buffer(x, buf, 5); \
-            } else { \
-                /* unsigned 64 */ \
-                unsigned char buf[9]; \
-                buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
-                msgpack_pack_append_buffer(x, buf, 9); \
-            } \
-        } \
-    } \
-} while(0)
-
-
-static inline int msgpack_pack_uint8(msgpack_packer* x, uint8_t d)
-{
-    msgpack_pack_real_uint8(x, d);
-}
-
-static inline int msgpack_pack_uint16(msgpack_packer* x, uint16_t d)
-{
-    msgpack_pack_real_uint16(x, d);
-}
-
-static inline int msgpack_pack_uint32(msgpack_packer* x, uint32_t d)
-{
-    msgpack_pack_real_uint32(x, d);
-}
-
-static inline int msgpack_pack_uint64(msgpack_packer* x, uint64_t d)
-{
-    msgpack_pack_real_uint64(x, d);
-}
-
-static inline int msgpack_pack_int8(msgpack_packer* x, int8_t d)
-{
-    msgpack_pack_real_int8(x, d);
-}
-
-static inline int msgpack_pack_int16(msgpack_packer* x, int16_t d)
-{
-    msgpack_pack_real_int16(x, d);
-}
-
-static inline int msgpack_pack_int32(msgpack_packer* x, int32_t d)
-{
-    msgpack_pack_real_int32(x, d);
-}
-
-static inline int msgpack_pack_int64(msgpack_packer* x, int64_t d)
-{
-    msgpack_pack_real_int64(x, d);
-}
-
-
-//#ifdef msgpack_pack_inline_func_cint
-
-static inline int msgpack_pack_short(msgpack_packer* x, short d)
-{
-#if defined(SIZEOF_SHORT)
-#if SIZEOF_SHORT == 2
-    msgpack_pack_real_int16(x, d);
-#elif SIZEOF_SHORT == 4
-    msgpack_pack_real_int32(x, d);
-#else
-    msgpack_pack_real_int64(x, d);
-#endif
-
-#elif defined(SHRT_MAX)
-#if SHRT_MAX == 0x7fff
-    msgpack_pack_real_int16(x, d);
-#elif SHRT_MAX == 0x7fffffff
-    msgpack_pack_real_int32(x, d);
-#else
-    msgpack_pack_real_int64(x, d);
-#endif
-
-#else
-if(sizeof(short) == 2) {
-    msgpack_pack_real_int16(x, d);
-} else if(sizeof(short) == 4) {
-    msgpack_pack_real_int32(x, d);
-} else {
-    msgpack_pack_real_int64(x, d);
-}
-#endif
-}
-
-static inline int msgpack_pack_int(msgpack_packer* x, int d)
-{
-#if defined(SIZEOF_INT)
-#if SIZEOF_INT == 2
-    msgpack_pack_real_int16(x, d);
-#elif SIZEOF_INT == 4
-    msgpack_pack_real_int32(x, d);
-#else
-    msgpack_pack_real_int64(x, d);
-#endif
-
-#elif defined(INT_MAX)
-#if INT_MAX == 0x7fff
-    msgpack_pack_real_int16(x, d);
-#elif INT_MAX == 0x7fffffff
-    msgpack_pack_real_int32(x, d);
-#else
-    msgpack_pack_real_int64(x, d);
-#endif
-
-#else
-if(sizeof(int) == 2) {
-    msgpack_pack_real_int16(x, d);
-} else if(sizeof(int) == 4) {
-    msgpack_pack_real_int32(x, d);
-} else {
-    msgpack_pack_real_int64(x, d);
-}
-#endif
-}
-
-static inline int msgpack_pack_long(msgpack_packer* x, long d)
-{
-#if defined(SIZEOF_LONG)
-#if SIZEOF_LONG == 2
-    msgpack_pack_real_int16(x, d);
-#elif SIZEOF_LONG == 4
-    msgpack_pack_real_int32(x, d);
-#else
-    msgpack_pack_real_int64(x, d);
-#endif
-
-#elif defined(LONG_MAX)
-#if LONG_MAX == 0x7fffL
-    msgpack_pack_real_int16(x, d);
-#elif LONG_MAX == 0x7fffffffL
-    msgpack_pack_real_int32(x, d);
-#else
-    msgpack_pack_real_int64(x, d);
-#endif
-
-#else
-if(sizeof(long) == 2) {
-    msgpack_pack_real_int16(x, d);
-} else if(sizeof(long) == 4) {
-    msgpack_pack_real_int32(x, d);
-} else {
-    msgpack_pack_real_int64(x, d);
-}
-#endif
-}
-
-static inline int msgpack_pack_long_long(msgpack_packer* x, long long d)
-{
-#if defined(SIZEOF_LONG_LONG)
-#if SIZEOF_LONG_LONG == 2
-    msgpack_pack_real_int16(x, d);
-#elif SIZEOF_LONG_LONG == 4
-    msgpack_pack_real_int32(x, d);
-#else
-    msgpack_pack_real_int64(x, d);
-#endif
-
-#elif defined(LLONG_MAX)
-#if LLONG_MAX == 0x7fffL
-    msgpack_pack_real_int16(x, d);
-#elif LLONG_MAX == 0x7fffffffL
-    msgpack_pack_real_int32(x, d);
-#else
-    msgpack_pack_real_int64(x, d);
-#endif
-
-#else
-if(sizeof(long long) == 2) {
-    msgpack_pack_real_int16(x, d);
-} else if(sizeof(long long) == 4) {
-    msgpack_pack_real_int32(x, d);
-} else {
-    msgpack_pack_real_int64(x, d);
-}
-#endif
-}
-
-static inline int msgpack_pack_unsigned_short(msgpack_packer* x, unsigned short d)
-{
-#if defined(SIZEOF_SHORT)
-#if SIZEOF_SHORT == 2
-    msgpack_pack_real_uint16(x, d);
-#elif SIZEOF_SHORT == 4
-    msgpack_pack_real_uint32(x, d);
-#else
-    msgpack_pack_real_uint64(x, d);
-#endif
-
-#elif defined(USHRT_MAX)
-#if USHRT_MAX == 0xffffU
-    msgpack_pack_real_uint16(x, d);
-#elif USHRT_MAX == 0xffffffffU
-    msgpack_pack_real_uint32(x, d);
-#else
-    msgpack_pack_real_uint64(x, d);
-#endif
-
-#else
-if(sizeof(unsigned short) == 2) {
-    msgpack_pack_real_uint16(x, d);
-} else if(sizeof(unsigned short) == 4) {
-    msgpack_pack_real_uint32(x, d);
-} else {
-    msgpack_pack_real_uint64(x, d);
-}
-#endif
-}
-
-static inline int msgpack_pack_unsigned_int(msgpack_packer* x, unsigned int d)
-{
-#if defined(SIZEOF_INT)
-#if SIZEOF_INT == 2
-    msgpack_pack_real_uint16(x, d);
-#elif SIZEOF_INT == 4
-    msgpack_pack_real_uint32(x, d);
-#else
-    msgpack_pack_real_uint64(x, d);
-#endif
-
-#elif defined(UINT_MAX)
-#if UINT_MAX == 0xffffU
-    msgpack_pack_real_uint16(x, d);
-#elif UINT_MAX == 0xffffffffU
-    msgpack_pack_real_uint32(x, d);
-#else
-    msgpack_pack_real_uint64(x, d);
-#endif
-
-#else
-if(sizeof(unsigned int) == 2) {
-    msgpack_pack_real_uint16(x, d);
-} else if(sizeof(unsigned int) == 4) {
-    msgpack_pack_real_uint32(x, d);
-} else {
-    msgpack_pack_real_uint64(x, d);
-}
-#endif
-}
-
-static inline int msgpack_pack_unsigned_long(msgpack_packer* x, unsigned long d)
-{
-#if defined(SIZEOF_LONG)
-#if SIZEOF_LONG == 2
-    msgpack_pack_real_uint16(x, d);
-#elif SIZEOF_LONG == 4
-    msgpack_pack_real_uint32(x, d);
-#else
-    msgpack_pack_real_uint64(x, d);
-#endif
-
-#elif defined(ULONG_MAX)
-#if ULONG_MAX == 0xffffUL
-    msgpack_pack_real_uint16(x, d);
-#elif ULONG_MAX == 0xffffffffUL
-    msgpack_pack_real_uint32(x, d);
-#else
-    msgpack_pack_real_uint64(x, d);
-#endif
-
-#else
-if(sizeof(unsigned long) == 2) {
-    msgpack_pack_real_uint16(x, d);
-} else if(sizeof(unsigned long) == 4) {
-    msgpack_pack_real_uint32(x, d);
-} else {
-    msgpack_pack_real_uint64(x, d);
-}
-#endif
-}
-
-static inline int msgpack_pack_unsigned_long_long(msgpack_packer* x, unsigned long long d)
-{
-#if defined(SIZEOF_LONG_LONG)
-#if SIZEOF_LONG_LONG == 2
-    msgpack_pack_real_uint16(x, d);
-#elif SIZEOF_LONG_LONG == 4
-    msgpack_pack_real_uint32(x, d);
-#else
-    msgpack_pack_real_uint64(x, d);
-#endif
-
-#elif defined(ULLONG_MAX)
-#if ULLONG_MAX == 0xffffUL
-    msgpack_pack_real_uint16(x, d);
-#elif ULLONG_MAX == 0xffffffffUL
-    msgpack_pack_real_uint32(x, d);
-#else
-    msgpack_pack_real_uint64(x, d);
-#endif
-
-#else
-if(sizeof(unsigned long long) == 2) {
-    msgpack_pack_real_uint16(x, d);
-} else if(sizeof(unsigned long long) == 4) {
-    msgpack_pack_real_uint32(x, d);
-} else {
-    msgpack_pack_real_uint64(x, d);
-}
-#endif
-}
-
-//#undef msgpack_pack_inline_func_cint
-//#endif
-
-
-
-/*
- * Float
- */
-
-static inline int msgpack_pack_float(msgpack_packer* x, float d)
-{
-    union { float f; uint32_t i; } mem;
-    mem.f = d;
-    unsigned char buf[5];
-    buf[0] = 0xca; _msgpack_store32(&buf[1], mem.i);
-    msgpack_pack_append_buffer(x, buf, 5);
-}
-
-static inline int msgpack_pack_double(msgpack_packer* x, double d)
-{
-    union { double f; uint64_t i; } mem;
-    mem.f = d;
-    unsigned char buf[9];
-    buf[0] = 0xcb;
-#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi
-    // https://github.com/msgpack/msgpack-perl/pull/1
-    mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL);
-#endif
-    _msgpack_store64(&buf[1], mem.i);
-    msgpack_pack_append_buffer(x, buf, 9);
-}
-
-
-/*
- * Nil
- */
-
-static inline int msgpack_pack_nil(msgpack_packer* x)
-{
-    static const unsigned char d = 0xc0;
-    msgpack_pack_append_buffer(x, &d, 1);
-}
-
-
-/*
- * Boolean
- */
-
-static inline int msgpack_pack_true(msgpack_packer* x)
-{
-    static const unsigned char d = 0xc3;
-    msgpack_pack_append_buffer(x, &d, 1);
-}
-
-static inline int msgpack_pack_false(msgpack_packer* x)
-{
-    static const unsigned char d = 0xc2;
-    msgpack_pack_append_buffer(x, &d, 1);
-}
-
-
-/*
- * Array
- */
-
-static inline int msgpack_pack_array(msgpack_packer* x, unsigned int n)
-{
-    if(n < 16) {
-        unsigned char d = 0x90 | n;
-        msgpack_pack_append_buffer(x, &d, 1);
-    } else if(n < 65536) {
-        unsigned char buf[3];
-        buf[0] = 0xdc; _msgpack_store16(&buf[1], (uint16_t)n);
-        msgpack_pack_append_buffer(x, buf, 3);
-    } else {
-        unsigned char buf[5];
-        buf[0] = 0xdd; _msgpack_store32(&buf[1], (uint32_t)n);
-        msgpack_pack_append_buffer(x, buf, 5);
-    }
-}
-
-
-/*
- * Map
- */
-
-static inline int msgpack_pack_map(msgpack_packer* x, unsigned int n)
-{
-    if(n < 16) {
-        unsigned char d = 0x80 | n;
-        msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
-    } else if(n < 65536) {
-        unsigned char buf[3];
-        buf[0] = 0xde; _msgpack_store16(&buf[1], (uint16_t)n);
-        msgpack_pack_append_buffer(x, buf, 3);
-    } else {
-        unsigned char buf[5];
-        buf[0] = 0xdf; _msgpack_store32(&buf[1], (uint32_t)n);
-        msgpack_pack_append_buffer(x, buf, 5);
-    }
-}
-
-
-/*
- * Raw
- */
-
-static inline int msgpack_pack_raw(msgpack_packer* x, size_t l)
-{
-    if (l < 32) {
-        unsigned char d = 0xa0 | (uint8_t)l;
-        msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
-    } else if (x->use_bin_type && l < 256) {  // str8 is new format introduced with bin.
-        unsigned char buf[2] = {0xd9, (uint8_t)l};
-        msgpack_pack_append_buffer(x, buf, 2);
-    } else if (l < 65536) {
-        unsigned char buf[3];
-        buf[0] = 0xda; _msgpack_store16(&buf[1], (uint16_t)l);
-        msgpack_pack_append_buffer(x, buf, 3);
-    } else {
-        unsigned char buf[5];
-        buf[0] = 0xdb; _msgpack_store32(&buf[1], (uint32_t)l);
-        msgpack_pack_append_buffer(x, buf, 5);
-    }
-}
-
-/*
- * bin
- */
-static inline int msgpack_pack_bin(msgpack_packer *x, size_t l)
-{
-    if (!x->use_bin_type) {
-        return msgpack_pack_raw(x, l);
-    }
-    if (l < 256) {
-        unsigned char buf[2] = {0xc4, (unsigned char)l};
-        msgpack_pack_append_buffer(x, buf, 2);
-    } else if (l < 65536) {
-        unsigned char buf[3] = {0xc5};
-        _msgpack_store16(&buf[1], (uint16_t)l);
-        msgpack_pack_append_buffer(x, buf, 3);
-    } else {
-        unsigned char buf[5] = {0xc6};
-        _msgpack_store32(&buf[1], (uint32_t)l);
-        msgpack_pack_append_buffer(x, buf, 5);
-    }
-}
-
-static inline int msgpack_pack_raw_body(msgpack_packer* x, const void* b, size_t l)
-{
-    if (l > 0) msgpack_pack_append_buffer(x, (const unsigned char*)b, l);
-    return 0;
-}
-
-/*
- * Ext
- */
-static inline int msgpack_pack_ext(msgpack_packer* x, int8_t typecode, size_t l)
-{
-    if (l == 1) {
-        unsigned char buf[2];
-        buf[0] = 0xd4;
-        buf[1] = (unsigned char)typecode;
-        msgpack_pack_append_buffer(x, buf, 2);
-    }
-    else if(l == 2) {
-        unsigned char buf[2];
-        buf[0] = 0xd5;
-        buf[1] = (unsigned char)typecode;
-        msgpack_pack_append_buffer(x, buf, 2);
-    }
-    else if(l == 4) {
-        unsigned char buf[2];
-        buf[0] = 0xd6;
-        buf[1] = (unsigned char)typecode;
-        msgpack_pack_append_buffer(x, buf, 2);
-    }
-    else if(l == 8) {
-        unsigned char buf[2];
-        buf[0] = 0xd7;
-        buf[1] = (unsigned char)typecode;
-        msgpack_pack_append_buffer(x, buf, 2);
-    }
-    else if(l == 16) {
-        unsigned char buf[2];
-        buf[0] = 0xd8;
-        buf[1] = (unsigned char)typecode;
-        msgpack_pack_append_buffer(x, buf, 2);
-    }
-    else if(l < 256) {
-        unsigned char buf[3];
-        buf[0] = 0xc7;
-        buf[1] = l;
-        buf[2] = (unsigned char)typecode;
-        msgpack_pack_append_buffer(x, buf, 3);
-    } else if(l < 65536) {
-        unsigned char buf[4];
-        buf[0] = 0xc8;
-        _msgpack_store16(&buf[1], (uint16_t)l);
-        buf[3] = (unsigned char)typecode;
-        msgpack_pack_append_buffer(x, buf, 4);
-    } else {
-        unsigned char buf[6];
-        buf[0] = 0xc9;
-        _msgpack_store32(&buf[1], (uint32_t)l);
-        buf[5] = (unsigned char)typecode;
-        msgpack_pack_append_buffer(x, buf, 6);
-    }
-
-}
-
-
-
-#undef msgpack_pack_append_buffer
-
-#undef TAKE8_8
-#undef TAKE8_16
-#undef TAKE8_32
-#undef TAKE8_64
-
-#undef msgpack_pack_real_uint8
-#undef msgpack_pack_real_uint16
-#undef msgpack_pack_real_uint32
-#undef msgpack_pack_real_uint64
-#undef msgpack_pack_real_int8
-#undef msgpack_pack_real_int16
-#undef msgpack_pack_real_int32
-#undef msgpack_pack_real_int64

+ 0 - 194
utils/converters/msgpack/msgpack/sysdep.h

@@ -1,194 +0,0 @@
-/*
- * MessagePack system dependencies
- *
- * Copyright (C) 2008-2010 FURUHASHI Sadayuki
- *
- *    Licensed under the Apache License, Version 2.0 (the "License");
- *    you may not use this file except in compliance with the License.
- *    You may obtain a copy of the License at
- *
- *        http://www.apache.org/licenses/LICENSE-2.0
- *
- *    Unless required by applicable law or agreed to in writing, software
- *    distributed under the License is distributed on an "AS IS" BASIS,
- *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *    See the License for the specific language governing permissions and
- *    limitations under the License.
- */
-#ifndef MSGPACK_SYSDEP_H__
-#define MSGPACK_SYSDEP_H__
-
-#include <stdlib.h>
-#include <stddef.h>
-#if defined(_MSC_VER) && _MSC_VER < 1600
-typedef __int8 int8_t;
-typedef unsigned __int8 uint8_t;
-typedef __int16 int16_t;
-typedef unsigned __int16 uint16_t;
-typedef __int32 int32_t;
-typedef unsigned __int32 uint32_t;
-typedef __int64 int64_t;
-typedef unsigned __int64 uint64_t;
-#elif defined(_MSC_VER)  // && _MSC_VER >= 1600
-#include <stdint.h>
-#else
-#include <stdint.h>
-#include <stdbool.h>
-#endif
-
-#ifdef _WIN32
-#define _msgpack_atomic_counter_header <windows.h>
-typedef long _msgpack_atomic_counter_t;
-#define _msgpack_sync_decr_and_fetch(ptr) InterlockedDecrement(ptr)
-#define _msgpack_sync_incr_and_fetch(ptr) InterlockedIncrement(ptr)
-#elif defined(__GNUC__) && ((__GNUC__*10 + __GNUC_MINOR__) < 41)
-#define _msgpack_atomic_counter_header "gcc_atomic.h"
-#else
-typedef unsigned int _msgpack_atomic_counter_t;
-#define _msgpack_sync_decr_and_fetch(ptr) __sync_sub_and_fetch(ptr, 1)
-#define _msgpack_sync_incr_and_fetch(ptr) __sync_add_and_fetch(ptr, 1)
-#endif
-
-#ifdef _WIN32
-
-#ifdef __cplusplus
-/* numeric_limits<T>::min,max */
-#ifdef max
-#undef max
-#endif
-#ifdef min
-#undef min
-#endif
-#endif
-
-#else
-#include <arpa/inet.h>  /* __BYTE_ORDER */
-#endif
-
-#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define __LITTLE_ENDIAN__
-#elif __BYTE_ORDER == __BIG_ENDIAN
-#define __BIG_ENDIAN__
-#elif _WIN32
-#define __LITTLE_ENDIAN__
-#endif
-#endif
-
-
-#ifdef __LITTLE_ENDIAN__
-
-#ifdef _WIN32
-#  if defined(ntohs)
-#    define _msgpack_be16(x) ntohs(x)
-#  elif defined(_byteswap_ushort) || (defined(_MSC_VER) && _MSC_VER >= 1400)
-#    define _msgpack_be16(x) ((uint16_t)_byteswap_ushort((unsigned short)x))
-#  else
-#    define _msgpack_be16(x) ( \
-        ((((uint16_t)x) <<  8) ) | \
-        ((((uint16_t)x) >>  8) ) )
-#  endif
-#else
-#  define _msgpack_be16(x) ntohs(x)
-#endif
-
-#ifdef _WIN32
-#  if defined(ntohl)
-#    define _msgpack_be32(x) ntohl(x)
-#  elif defined(_byteswap_ulong) || (defined(_MSC_VER) && _MSC_VER >= 1400)
-#    define _msgpack_be32(x) ((uint32_t)_byteswap_ulong((unsigned long)x))
-#  else
-#    define _msgpack_be32(x) \
-        ( ((((uint32_t)x) << 24)               ) | \
-          ((((uint32_t)x) <<  8) & 0x00ff0000U ) | \
-          ((((uint32_t)x) >>  8) & 0x0000ff00U ) | \
-          ((((uint32_t)x) >> 24)               ) )
-#  endif
-#else
-#  define _msgpack_be32(x) ntohl(x)
-#endif
-
-#if defined(_byteswap_uint64) || (defined(_MSC_VER) && _MSC_VER >= 1400)
-#  define _msgpack_be64(x) (_byteswap_uint64(x))
-#elif defined(bswap_64)
-#  define _msgpack_be64(x) bswap_64(x)
-#elif defined(__DARWIN_OSSwapInt64)
-#  define _msgpack_be64(x) __DARWIN_OSSwapInt64(x)
-#else
-#define _msgpack_be64(x) \
-    ( ((((uint64_t)x) << 56)                         ) | \
-      ((((uint64_t)x) << 40) & 0x00ff000000000000ULL ) | \
-      ((((uint64_t)x) << 24) & 0x0000ff0000000000ULL ) | \
-      ((((uint64_t)x) <<  8) & 0x000000ff00000000ULL ) | \
-      ((((uint64_t)x) >>  8) & 0x00000000ff000000ULL ) | \
-      ((((uint64_t)x) >> 24) & 0x0000000000ff0000ULL ) | \
-      ((((uint64_t)x) >> 40) & 0x000000000000ff00ULL ) | \
-      ((((uint64_t)x) >> 56)                         ) )
-#endif
-
-#define _msgpack_load16(cast, from) ((cast)( \
-        (((uint16_t)((uint8_t*)(from))[0]) << 8) | \
-        (((uint16_t)((uint8_t*)(from))[1])     ) ))
-
-#define _msgpack_load32(cast, from) ((cast)( \
-        (((uint32_t)((uint8_t*)(from))[0]) << 24) | \
-        (((uint32_t)((uint8_t*)(from))[1]) << 16) | \
-        (((uint32_t)((uint8_t*)(from))[2]) <<  8) | \
-        (((uint32_t)((uint8_t*)(from))[3])      ) ))
-
-#define _msgpack_load64(cast, from) ((cast)( \
-        (((uint64_t)((uint8_t*)(from))[0]) << 56) | \
-        (((uint64_t)((uint8_t*)(from))[1]) << 48) | \
-        (((uint64_t)((uint8_t*)(from))[2]) << 40) | \
-        (((uint64_t)((uint8_t*)(from))[3]) << 32) | \
-        (((uint64_t)((uint8_t*)(from))[4]) << 24) | \
-        (((uint64_t)((uint8_t*)(from))[5]) << 16) | \
-        (((uint64_t)((uint8_t*)(from))[6]) << 8)  | \
-        (((uint64_t)((uint8_t*)(from))[7])     )  ))
-
-#else
-
-#define _msgpack_be16(x) (x)
-#define _msgpack_be32(x) (x)
-#define _msgpack_be64(x) (x)
-
-#define _msgpack_load16(cast, from) ((cast)( \
-        (((uint16_t)((uint8_t*)from)[0]) << 8) | \
-        (((uint16_t)((uint8_t*)from)[1])     ) ))
-
-#define _msgpack_load32(cast, from) ((cast)( \
-        (((uint32_t)((uint8_t*)from)[0]) << 24) | \
-        (((uint32_t)((uint8_t*)from)[1]) << 16) | \
-        (((uint32_t)((uint8_t*)from)[2]) <<  8) | \
-        (((uint32_t)((uint8_t*)from)[3])      ) ))
-
-#define _msgpack_load64(cast, from) ((cast)( \
-        (((uint64_t)((uint8_t*)from)[0]) << 56) | \
-        (((uint64_t)((uint8_t*)from)[1]) << 48) | \
-        (((uint64_t)((uint8_t*)from)[2]) << 40) | \
-        (((uint64_t)((uint8_t*)from)[3]) << 32) | \
-        (((uint64_t)((uint8_t*)from)[4]) << 24) | \
-        (((uint64_t)((uint8_t*)from)[5]) << 16) | \
-        (((uint64_t)((uint8_t*)from)[6]) << 8)  | \
-        (((uint64_t)((uint8_t*)from)[7])     )  ))
-#endif
-
-
-#define _msgpack_store16(to, num) \
-    do { uint16_t val = _msgpack_be16(num); memcpy(to, &val, 2); } while(0)
-#define _msgpack_store32(to, num) \
-    do { uint32_t val = _msgpack_be32(num); memcpy(to, &val, 4); } while(0)
-#define _msgpack_store64(to, num) \
-    do { uint64_t val = _msgpack_be64(num); memcpy(to, &val, 8); } while(0)
-
-/*
-#define _msgpack_load16(cast, from) \
-    ({ cast val; memcpy(&val, (char*)from, 2); _msgpack_be16(val); })
-#define _msgpack_load32(cast, from) \
-    ({ cast val; memcpy(&val, (char*)from, 4); _msgpack_be32(val); })
-#define _msgpack_load64(cast, from) \
-    ({ cast val; memcpy(&val, (char*)from, 8); _msgpack_be64(val); })
-*/
-
-
-#endif /* msgpack/sysdep.h */

+ 0 - 263
utils/converters/msgpack/msgpack/unpack.h

@@ -1,263 +0,0 @@
-/*
- * MessagePack for Python unpacking routine
- *
- * Copyright (C) 2009 Naoki INADA
- *
- *    Licensed under the Apache License, Version 2.0 (the "License");
- *    you may not use this file except in compliance with the License.
- *    You may obtain a copy of the License at
- *
- *        http://www.apache.org/licenses/LICENSE-2.0
- *
- *    Unless required by applicable law or agreed to in writing, software
- *    distributed under the License is distributed on an "AS IS" BASIS,
- *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *    See the License for the specific language governing permissions and
- *    limitations under the License.
- */
-
-#define MSGPACK_EMBED_STACK_SIZE  (1024)
-#include "unpack_define.h"
-
-typedef struct unpack_user {
-    int use_list;
-    PyObject *object_hook;
-    bool has_pairs_hook;
-    PyObject *list_hook;
-    PyObject *ext_hook;
-    const char *encoding;
-    const char *unicode_errors;
-} unpack_user;
-
-typedef PyObject* msgpack_unpack_object;
-struct unpack_context;
-typedef struct unpack_context unpack_context;
-typedef int (*execute_fn)(unpack_context *ctx, const char* data, size_t len, size_t* off);
-
-static inline msgpack_unpack_object unpack_callback_root(unpack_user* u)
-{
-    return NULL;
-}
-
-static inline int unpack_callback_uint16(unpack_user* u, uint16_t d, msgpack_unpack_object* o)
-{
-    PyObject *p = PyInt_FromLong((long)d);
-    if (!p)
-        return -1;
-    *o = p;
-    return 0;
-}
-static inline int unpack_callback_uint8(unpack_user* u, uint8_t d, msgpack_unpack_object* o)
-{
-    return unpack_callback_uint16(u, d, o);
-}
-
-
-static inline int unpack_callback_uint32(unpack_user* u, uint32_t d, msgpack_unpack_object* o)
-{
-    PyObject *p;
-#if UINT32_MAX > LONG_MAX
-    if (d > LONG_MAX) {
-        p = PyLong_FromUnsignedLong((unsigned long)d);
-    } else
-#endif
-    {
-        p = PyInt_FromLong((long)d);
-    }
-    if (!p)
-        return -1;
-    *o = p;
-    return 0;
-}
-
-static inline int unpack_callback_uint64(unpack_user* u, uint64_t d, msgpack_unpack_object* o)
-{
-    PyObject *p;
-    if (d > LONG_MAX) {
-        p = PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)d);
-    } else {
-        p = PyInt_FromLong((long)d);
-    }
-    if (!p)
-        return -1;
-    *o = p;
-    return 0;
-}
-
-static inline int unpack_callback_int32(unpack_user* u, int32_t d, msgpack_unpack_object* o)
-{
-    PyObject *p = PyInt_FromLong(d);
-    if (!p)
-        return -1;
-    *o = p;
-    return 0;
-}
-
-static inline int unpack_callback_int16(unpack_user* u, int16_t d, msgpack_unpack_object* o)
-{
-    return unpack_callback_int32(u, d, o);
-}
-
-static inline int unpack_callback_int8(unpack_user* u, int8_t d, msgpack_unpack_object* o)
-{
-    return unpack_callback_int32(u, d, o);
-}
-
-static inline int unpack_callback_int64(unpack_user* u, int64_t d, msgpack_unpack_object* o)
-{
-    PyObject *p;
-    if (d > LONG_MAX || d < LONG_MIN) {
-        p = PyLong_FromLongLong((unsigned PY_LONG_LONG)d);
-    } else {
-        p = PyInt_FromLong((long)d);
-    }
-    *o = p;
-    return 0;
-}
-
-static inline int unpack_callback_double(unpack_user* u, double d, msgpack_unpack_object* o)
-{
-    PyObject *p = PyFloat_FromDouble(d);
-    if (!p)
-        return -1;
-    *o = p;
-    return 0;
-}
-
-static inline int unpack_callback_float(unpack_user* u, float d, msgpack_unpack_object* o)
-{
-    return unpack_callback_double(u, d, o);
-}
-
-static inline int unpack_callback_nil(unpack_user* u, msgpack_unpack_object* o)
-{ Py_INCREF(Py_None); *o = Py_None; return 0; }
-
-static inline int unpack_callback_true(unpack_user* u, msgpack_unpack_object* o)
-{ Py_INCREF(Py_True); *o = Py_True; return 0; }
-
-static inline int unpack_callback_false(unpack_user* u, msgpack_unpack_object* o)
-{ Py_INCREF(Py_False); *o = Py_False; return 0; }
-
-static inline int unpack_callback_array(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
-{
-    PyObject *p = u->use_list ? PyList_New(n) : PyTuple_New(n);
-
-    if (!p)
-        return -1;
-    *o = p;
-    return 0;
-}
-
-static inline int unpack_callback_array_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object o)
-{
-    if (u->use_list)
-        PyList_SET_ITEM(*c, current, o);
-    else
-        PyTuple_SET_ITEM(*c, current, o);
-    return 0;
-}
-
-static inline int unpack_callback_array_end(unpack_user* u, msgpack_unpack_object* c)
-{
-    if (u->list_hook) {
-        PyObject *new_c = PyObject_CallFunctionObjArgs(u->list_hook, *c, NULL);
-        if (!new_c)
-            return -1;
-        Py_DECREF(*c);
-        *c = new_c;
-    }
-    return 0;
-}
-
-static inline int unpack_callback_map(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
-{
-    PyObject *p;
-    if (u->has_pairs_hook) {
-        p = PyList_New(n); // Or use tuple?
-    }
-    else {
-        p = PyDict_New();
-    }
-    if (!p)
-        return -1;
-    *o = p;
-    return 0;
-}
-
-static inline int unpack_callback_map_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object k, msgpack_unpack_object v)
-{
-    if (u->has_pairs_hook) {
-        msgpack_unpack_object item = PyTuple_Pack(2, k, v);
-        if (!item)
-            return -1;
-        Py_DECREF(k);
-        Py_DECREF(v);
-        PyList_SET_ITEM(*c, current, item);
-        return 0;
-    }
-    else if (PyDict_SetItem(*c, k, v) == 0) {
-        Py_DECREF(k);
-        Py_DECREF(v);
-        return 0;
-    }
-    return -1;
-}
-
-static inline int unpack_callback_map_end(unpack_user* u, msgpack_unpack_object* c)
-{
-    if (u->object_hook) {
-        PyObject *new_c = PyObject_CallFunctionObjArgs(u->object_hook, *c, NULL);
-        if (!new_c)
-            return -1;
-
-        Py_DECREF(*c);
-        *c = new_c;
-    }
-    return 0;
-}
-
-static inline int unpack_callback_raw(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o)
-{
-    PyObject *py;
-    if(u->encoding) {
-        py = PyUnicode_Decode(p, l, u->encoding, u->unicode_errors);
-    } else {
-        py = PyBytes_FromStringAndSize(p, l);
-    }
-    if (!py)
-        return -1;
-    *o = py;
-    return 0;
-}
-
-static inline int unpack_callback_bin(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o)
-{
-    PyObject *py = PyBytes_FromStringAndSize(p, l);
-    if (!py)
-        return -1;
-    *o = py;
-    return 0;
-}
-
-static inline int unpack_callback_ext(unpack_user* u, const char* base, const char* pos,
-                                      unsigned int lenght, msgpack_unpack_object* o)
-{
-    PyObject *py;
-    int8_t typecode = (int8_t)*pos++;
-    if (!u->ext_hook) {
-        PyErr_SetString(PyExc_AssertionError, "u->ext_hook cannot be NULL");
-        return -1;
-    }
-    // length also includes the typecode, so the actual data is lenght-1
-#if PY_MAJOR_VERSION == 2
-    py = PyObject_CallFunction(u->ext_hook, "(is#)", typecode, pos, lenght-1);
-#else
-    py = PyObject_CallFunction(u->ext_hook, "(iy#)", typecode, pos, lenght-1);
-#endif
-    if (!py)
-        return -1;
-    *o = py;
-    return 0;
-}
-
-#include "unpack_template.h"

+ 0 - 95
utils/converters/msgpack/msgpack/unpack_define.h

@@ -1,95 +0,0 @@
-/*
- * MessagePack unpacking routine template
- *
- * Copyright (C) 2008-2010 FURUHASHI Sadayuki
- *
- *    Licensed under the Apache License, Version 2.0 (the "License");
- *    you may not use this file except in compliance with the License.
- *    You may obtain a copy of the License at
- *
- *        http://www.apache.org/licenses/LICENSE-2.0
- *
- *    Unless required by applicable law or agreed to in writing, software
- *    distributed under the License is distributed on an "AS IS" BASIS,
- *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *    See the License for the specific language governing permissions and
- *    limitations under the License.
- */
-#ifndef MSGPACK_UNPACK_DEFINE_H__
-#define MSGPACK_UNPACK_DEFINE_H__
-
-#include "msgpack/sysdep.h"
-#include <stdlib.h>
-#include <string.h>
-#include <assert.h>
-#include <stdio.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-#ifndef MSGPACK_EMBED_STACK_SIZE
-#define MSGPACK_EMBED_STACK_SIZE 32
-#endif
-
-
-// CS is first byte & 0x1f
-typedef enum {
-    CS_HEADER            = 0x00,  // nil
-
-    //CS_                = 0x01,
-    //CS_                = 0x02,  // false
-    //CS_                = 0x03,  // true
-
-    CS_BIN_8             = 0x04,
-    CS_BIN_16            = 0x05,
-    CS_BIN_32            = 0x06,
-
-    CS_EXT_8             = 0x07,
-    CS_EXT_16            = 0x08,
-    CS_EXT_32            = 0x09,
-
-    CS_FLOAT             = 0x0a,
-    CS_DOUBLE            = 0x0b,
-    CS_UINT_8            = 0x0c,
-    CS_UINT_16           = 0x0d,
-    CS_UINT_32           = 0x0e,
-    CS_UINT_64           = 0x0f,
-    CS_INT_8             = 0x10,
-    CS_INT_16            = 0x11,
-    CS_INT_32            = 0x12,
-    CS_INT_64            = 0x13,
-
-    //CS_FIXEXT1           = 0x14,
-    //CS_FIXEXT2           = 0x15,
-    //CS_FIXEXT4           = 0x16,
-    //CS_FIXEXT8           = 0x17,
-    //CS_FIXEXT16          = 0x18,
-
-    CS_RAW_8             = 0x19,
-    CS_RAW_16            = 0x1a,
-    CS_RAW_32            = 0x1b,
-    CS_ARRAY_16          = 0x1c,
-    CS_ARRAY_32          = 0x1d,
-    CS_MAP_16            = 0x1e,
-    CS_MAP_32            = 0x1f,
-
-    ACS_RAW_VALUE,
-    ACS_BIN_VALUE,
-    ACS_EXT_VALUE,
-} msgpack_unpack_state;
-
-
-typedef enum {
-    CT_ARRAY_ITEM,
-    CT_MAP_KEY,
-    CT_MAP_VALUE,
-} msgpack_container_type;
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* msgpack/unpack_define.h */

+ 0 - 475
utils/converters/msgpack/msgpack/unpack_template.h

@@ -1,475 +0,0 @@
-/*
- * MessagePack unpacking routine template
- *
- * Copyright (C) 2008-2010 FURUHASHI Sadayuki
- *
- *    Licensed under the Apache License, Version 2.0 (the "License");
- *    you may not use this file except in compliance with the License.
- *    You may obtain a copy of the License at
- *
- *        http://www.apache.org/licenses/LICENSE-2.0
- *
- *    Unless required by applicable law or agreed to in writing, software
- *    distributed under the License is distributed on an "AS IS" BASIS,
- *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *    See the License for the specific language governing permissions and
- *    limitations under the License.
- */
-
-#ifndef USE_CASE_RANGE
-#if !defined(_MSC_VER)
-#define USE_CASE_RANGE
-#endif
-#endif
-
-typedef struct unpack_stack {
-    PyObject* obj;
-    size_t size;
-    size_t count;
-    unsigned int ct;
-    PyObject* map_key;
-} unpack_stack;
-
-struct unpack_context {
-    unpack_user user;
-    unsigned int cs;
-    unsigned int trail;
-    unsigned int top;
-    /*
-    unpack_stack* stack;
-    unsigned int stack_size;
-    unpack_stack embed_stack[MSGPACK_EMBED_STACK_SIZE];
-    */
-    unpack_stack stack[MSGPACK_EMBED_STACK_SIZE];
-};
-
-
-static inline void unpack_init(unpack_context* ctx)
-{
-    ctx->cs = CS_HEADER;
-    ctx->trail = 0;
-    ctx->top = 0;
-    /*
-    ctx->stack = ctx->embed_stack;
-    ctx->stack_size = MSGPACK_EMBED_STACK_SIZE;
-    */
-    ctx->stack[0].obj = unpack_callback_root(&ctx->user);
-}
-
-/*
-static inline void unpack_destroy(unpack_context* ctx)
-{
-    if(ctx->stack_size != MSGPACK_EMBED_STACK_SIZE) {
-        free(ctx->stack);
-    }
-}
-*/
-
-static inline PyObject* unpack_data(unpack_context* ctx)
-{
-    return (ctx)->stack[0].obj;
-}
-
-
-template <bool construct>
-static inline int unpack_execute(unpack_context* ctx, const char* data, size_t len, size_t* off)
-{
-    assert(len >= *off);
-
-    const unsigned char* p = (unsigned char*)data + *off;
-    const unsigned char* const pe = (unsigned char*)data + len;
-    const void* n = NULL;
-
-    unsigned int trail = ctx->trail;
-    unsigned int cs = ctx->cs;
-    unsigned int top = ctx->top;
-    unpack_stack* stack = ctx->stack;
-    /*
-    unsigned int stack_size = ctx->stack_size;
-    */
-    unpack_user* user = &ctx->user;
-
-    PyObject* obj;
-    unpack_stack* c = NULL;
-
-    int ret;
-
-#define construct_cb(name) \
-    construct && unpack_callback ## name
-
-#define push_simple_value(func) \
-    if(construct_cb(func)(user, &obj) < 0) { goto _failed; } \
-    goto _push
-#define push_fixed_value(func, arg) \
-    if(construct_cb(func)(user, arg, &obj) < 0) { goto _failed; } \
-    goto _push
-#define push_variable_value(func, base, pos, len) \
-    if(construct_cb(func)(user, \
-        (const char*)base, (const char*)pos, len, &obj) < 0) { goto _failed; } \
-    goto _push
-
-#define again_fixed_trail(_cs, trail_len) \
-    trail = trail_len; \
-    cs = _cs; \
-    goto _fixed_trail_again
-#define again_fixed_trail_if_zero(_cs, trail_len, ifzero) \
-    trail = trail_len; \
-    if(trail == 0) { goto ifzero; } \
-    cs = _cs; \
-    goto _fixed_trail_again
-
-#define start_container(func, count_, ct_) \
-    if(top >= MSGPACK_EMBED_STACK_SIZE) { goto _failed; } /* FIXME */ \
-    if(construct_cb(func)(user, count_, &stack[top].obj) < 0) { goto _failed; } \
-    if((count_) == 0) { obj = stack[top].obj; \
-        if (construct_cb(func##_end)(user, &obj) < 0) { goto _failed; } \
-        goto _push; } \
-    stack[top].ct = ct_; \
-    stack[top].size  = count_; \
-    stack[top].count = 0; \
-    ++top; \
-    /*printf("container %d count %d stack %d\n",stack[top].obj,count_,top);*/ \
-    /*printf("stack push %d\n", top);*/ \
-    /* FIXME \
-    if(top >= stack_size) { \
-        if(stack_size == MSGPACK_EMBED_STACK_SIZE) { \
-            size_t csize = sizeof(unpack_stack) * MSGPACK_EMBED_STACK_SIZE; \
-            size_t nsize = csize * 2; \
-            unpack_stack* tmp = (unpack_stack*)malloc(nsize); \
-            if(tmp == NULL) { goto _failed; } \
-            memcpy(tmp, ctx->stack, csize); \
-            ctx->stack = stack = tmp; \
-            ctx->stack_size = stack_size = MSGPACK_EMBED_STACK_SIZE * 2; \
-        } else { \
-            size_t nsize = sizeof(unpack_stack) * ctx->stack_size * 2; \
-            unpack_stack* tmp = (unpack_stack*)realloc(ctx->stack, nsize); \
-            if(tmp == NULL) { goto _failed; } \
-            ctx->stack = stack = tmp; \
-            ctx->stack_size = stack_size = stack_size * 2; \
-        } \
-    } \
-    */ \
-    goto _header_again
-
-#define NEXT_CS(p)  ((unsigned int)*p & 0x1f)
-
-#ifdef USE_CASE_RANGE
-#define SWITCH_RANGE_BEGIN     switch(*p) {
-#define SWITCH_RANGE(FROM, TO) case FROM ... TO:
-#define SWITCH_RANGE_DEFAULT   default:
-#define SWITCH_RANGE_END       }
-#else
-#define SWITCH_RANGE_BEGIN     { if(0) {
-#define SWITCH_RANGE(FROM, TO) } else if(FROM <= *p && *p <= TO) {
-#define SWITCH_RANGE_DEFAULT   } else {
-#define SWITCH_RANGE_END       } }
-#endif
-
-    if(p == pe) { goto _out; }
-    do {
-        switch(cs) {
-        case CS_HEADER:
-            SWITCH_RANGE_BEGIN
-            SWITCH_RANGE(0x00, 0x7f)  // Positive Fixnum
-                push_fixed_value(_uint8, *(uint8_t*)p);
-            SWITCH_RANGE(0xe0, 0xff)  // Negative Fixnum
-                push_fixed_value(_int8, *(int8_t*)p);
-            SWITCH_RANGE(0xc0, 0xdf)  // Variable
-                switch(*p) {
-                case 0xc0:  // nil
-                    push_simple_value(_nil);
-                //case 0xc1:  // never used
-                case 0xc2:  // false
-                    push_simple_value(_false);
-                case 0xc3:  // true
-                    push_simple_value(_true);
-                case 0xc4:  // bin 8
-                    again_fixed_trail(NEXT_CS(p), 1);
-                case 0xc5:  // bin 16
-                    again_fixed_trail(NEXT_CS(p), 2);
-                case 0xc6:  // bin 32
-                    again_fixed_trail(NEXT_CS(p), 4);
-                case 0xc7:  // ext 8
-                    again_fixed_trail(NEXT_CS(p), 1);
-                case 0xc8:  // ext 16
-                    again_fixed_trail(NEXT_CS(p), 2);
-                case 0xc9:  // ext 32
-                    again_fixed_trail(NEXT_CS(p), 4);
-                case 0xca:  // float
-                case 0xcb:  // double
-                case 0xcc:  // unsigned int  8
-                case 0xcd:  // unsigned int 16
-                case 0xce:  // unsigned int 32
-                case 0xcf:  // unsigned int 64
-                case 0xd0:  // signed int  8
-                case 0xd1:  // signed int 16
-                case 0xd2:  // signed int 32
-                case 0xd3:  // signed int 64
-                    again_fixed_trail(NEXT_CS(p), 1 << (((unsigned int)*p) & 0x03));
-                case 0xd4:  // fixext 1
-                case 0xd5:  // fixext 2
-                case 0xd6:  // fixext 4
-                case 0xd7:  // fixext 8
-                    again_fixed_trail_if_zero(ACS_EXT_VALUE, 
-                                              (1 << (((unsigned int)*p) & 0x03))+1,
-                                              _ext_zero);
-                case 0xd8:  // fixext 16
-                    again_fixed_trail_if_zero(ACS_EXT_VALUE, 16+1, _ext_zero);
-                case 0xd9:  // str 8
-                    again_fixed_trail(NEXT_CS(p), 1);
-                case 0xda:  // raw 16
-                case 0xdb:  // raw 32
-                case 0xdc:  // array 16
-                case 0xdd:  // array 32
-                case 0xde:  // map 16
-                case 0xdf:  // map 32
-                    again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01));
-                default:
-                    goto _failed;
-                }
-            SWITCH_RANGE(0xa0, 0xbf)  // FixRaw
-                again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero);
-            SWITCH_RANGE(0x90, 0x9f)  // FixArray
-                start_container(_array, ((unsigned int)*p) & 0x0f, CT_ARRAY_ITEM);
-            SWITCH_RANGE(0x80, 0x8f)  // FixMap
-                start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY);
-
-            SWITCH_RANGE_DEFAULT
-                goto _failed;
-            SWITCH_RANGE_END
-            // end CS_HEADER
-
-
-        _fixed_trail_again:
-            ++p;
-
-        default:
-            if((size_t)(pe - p) < trail) { goto _out; }
-            n = p;  p += trail - 1;
-            switch(cs) {
-            case CS_EXT_8:
-                again_fixed_trail_if_zero(ACS_EXT_VALUE, *(uint8_t*)n+1, _ext_zero);
-            case CS_EXT_16:
-                again_fixed_trail_if_zero(ACS_EXT_VALUE,
-                                          _msgpack_load16(uint16_t,n)+1,
-                                          _ext_zero);
-            case CS_EXT_32:
-                again_fixed_trail_if_zero(ACS_EXT_VALUE,
-                                          _msgpack_load32(uint32_t,n)+1,
-                                          _ext_zero);
-            case CS_FLOAT: {
-                    union { uint32_t i; float f; } mem;
-                    mem.i = _msgpack_load32(uint32_t,n);
-                    push_fixed_value(_float, mem.f); }
-            case CS_DOUBLE: {
-                    union { uint64_t i; double f; } mem;
-                    mem.i = _msgpack_load64(uint64_t,n);
-#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi
-                    // https://github.com/msgpack/msgpack-perl/pull/1
-                    mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL);
-#endif
-                    push_fixed_value(_double, mem.f); }
-            case CS_UINT_8:
-                push_fixed_value(_uint8, *(uint8_t*)n);
-            case CS_UINT_16:
-                push_fixed_value(_uint16, _msgpack_load16(uint16_t,n));
-            case CS_UINT_32:
-                push_fixed_value(_uint32, _msgpack_load32(uint32_t,n));
-            case CS_UINT_64:
-                push_fixed_value(_uint64, _msgpack_load64(uint64_t,n));
-
-            case CS_INT_8:
-                push_fixed_value(_int8, *(int8_t*)n);
-            case CS_INT_16:
-                push_fixed_value(_int16, _msgpack_load16(int16_t,n));
-            case CS_INT_32:
-                push_fixed_value(_int32, _msgpack_load32(int32_t,n));
-            case CS_INT_64:
-                push_fixed_value(_int64, _msgpack_load64(int64_t,n));
-
-            case CS_BIN_8:
-                again_fixed_trail_if_zero(ACS_BIN_VALUE, *(uint8_t*)n, _bin_zero);
-            case CS_BIN_16:
-                again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load16(uint16_t,n), _bin_zero);
-            case CS_BIN_32:
-                again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load32(uint32_t,n), _bin_zero);
-            case ACS_BIN_VALUE:
-            _bin_zero:
-                push_variable_value(_bin, data, n, trail);
-
-            case CS_RAW_8:
-                again_fixed_trail_if_zero(ACS_RAW_VALUE, *(uint8_t*)n, _raw_zero);
-            case CS_RAW_16:
-                again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load16(uint16_t,n), _raw_zero);
-            case CS_RAW_32:
-                again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load32(uint32_t,n), _raw_zero);
-            case ACS_RAW_VALUE:
-            _raw_zero:
-                push_variable_value(_raw, data, n, trail);
-
-            case ACS_EXT_VALUE:
-            _ext_zero:
-                push_variable_value(_ext, data, n, trail);
-
-            case CS_ARRAY_16:
-                start_container(_array, _msgpack_load16(uint16_t,n), CT_ARRAY_ITEM);
-            case CS_ARRAY_32:
-                /* FIXME security guard */
-                start_container(_array, _msgpack_load32(uint32_t,n), CT_ARRAY_ITEM);
-
-            case CS_MAP_16:
-                start_container(_map, _msgpack_load16(uint16_t,n), CT_MAP_KEY);
-            case CS_MAP_32:
-                /* FIXME security guard */
-                start_container(_map, _msgpack_load32(uint32_t,n), CT_MAP_KEY);
-
-            default:
-                goto _failed;
-            }
-        }
-
-_push:
-    if(top == 0) { goto _finish; }
-    c = &stack[top-1];
-    switch(c->ct) {
-    case CT_ARRAY_ITEM:
-        if(construct_cb(_array_item)(user, c->count, &c->obj, obj) < 0) { goto _failed; }
-        if(++c->count == c->size) {
-            obj = c->obj;
-            if (construct_cb(_array_end)(user, &obj) < 0) { goto _failed; }
-            --top;
-            /*printf("stack pop %d\n", top);*/
-            goto _push;
-        }
-        goto _header_again;
-    case CT_MAP_KEY:
-        c->map_key = obj;
-        c->ct = CT_MAP_VALUE;
-        goto _header_again;
-    case CT_MAP_VALUE:
-        if(construct_cb(_map_item)(user, c->count, &c->obj, c->map_key, obj) < 0) { goto _failed; }
-        if(++c->count == c->size) {
-            obj = c->obj;
-            if (construct_cb(_map_end)(user, &obj) < 0) { goto _failed; }
-            --top;
-            /*printf("stack pop %d\n", top);*/
-            goto _push;
-        }
-        c->ct = CT_MAP_KEY;
-        goto _header_again;
-
-    default:
-        goto _failed;
-    }
-
-_header_again:
-        cs = CS_HEADER;
-        ++p;
-    } while(p != pe);
-    goto _out;
-
-
-_finish:
-    if (!construct)
-        unpack_callback_nil(user, &obj);
-    stack[0].obj = obj;
-    ++p;
-    ret = 1;
-    /*printf("-- finish --\n"); */
-    goto _end;
-
-_failed:
-    /*printf("** FAILED **\n"); */
-    ret = -1;
-    goto _end;
-
-_out:
-    ret = 0;
-    goto _end;
-
-_end:
-    ctx->cs = cs;
-    ctx->trail = trail;
-    ctx->top = top;
-    *off = p - (const unsigned char*)data;
-
-    return ret;
-#undef construct_cb
-}
-
-#undef SWITCH_RANGE_BEGIN
-#undef SWITCH_RANGE
-#undef SWITCH_RANGE_DEFAULT
-#undef SWITCH_RANGE_END
-#undef push_simple_value
-#undef push_fixed_value
-#undef push_variable_value
-#undef again_fixed_trail
-#undef again_fixed_trail_if_zero
-#undef start_container
-
-template <unsigned int fixed_offset, unsigned int var_offset>
-static inline int unpack_container_header(unpack_context* ctx, const char* data, size_t len, size_t* off)
-{
-    assert(len >= *off);
-    uint32_t size;
-    const unsigned char *const p = (unsigned char*)data + *off;
-
-#define inc_offset(inc) \
-    if (len - *off < inc) \
-        return 0; \
-    *off += inc;
-
-    switch (*p) {
-    case var_offset:
-        inc_offset(3);
-        size = _msgpack_load16(uint16_t, p + 1);
-        break;
-    case var_offset + 1:
-        inc_offset(5);
-        size = _msgpack_load32(uint32_t, p + 1);
-        break;
-#ifdef USE_CASE_RANGE
-    case fixed_offset + 0x0 ... fixed_offset + 0xf:
-#else
-    case fixed_offset + 0x0:
-    case fixed_offset + 0x1:
-    case fixed_offset + 0x2:
-    case fixed_offset + 0x3:
-    case fixed_offset + 0x4:
-    case fixed_offset + 0x5:
-    case fixed_offset + 0x6:
-    case fixed_offset + 0x7:
-    case fixed_offset + 0x8:
-    case fixed_offset + 0x9:
-    case fixed_offset + 0xa:
-    case fixed_offset + 0xb:
-    case fixed_offset + 0xc:
-    case fixed_offset + 0xd:
-    case fixed_offset + 0xe:
-    case fixed_offset + 0xf:
-#endif
-        ++*off;
-        size = ((unsigned int)*p) & 0x0f;
-        break;
-    default:
-        PyErr_SetString(PyExc_ValueError, "Unexpected type header on stream");
-        return -1;
-    }
-    unpack_callback_uint32(&ctx->user, size, &ctx->stack[0].obj);
-    return 1;
-}
-
-#undef SWITCH_RANGE_BEGIN
-#undef SWITCH_RANGE
-#undef SWITCH_RANGE_DEFAULT
-#undef SWITCH_RANGE_END
-
-static const execute_fn unpack_construct = &unpack_execute<true>;
-static const execute_fn unpack_skip = &unpack_execute<false>;
-static const execute_fn read_array_header = &unpack_container_header<0x90, 0xdc>;
-static const execute_fn read_map_header = &unpack_container_header<0x80, 0xde>;
-
-#undef NEXT_CS
-
-/* vim: set ts=4 sw=4 sts=4 expandtab  */

+ 0 - 3
utils/converters/utf8/build.bat

@@ -1,3 +0,0 @@
-g++ ./src/objcompress.cc -O2 -Wall -o objcompress
-g++ ./src/obj2utf8.cc -O2 -Wall -o obj2utf8
-g++ ./src/obj2utf8x.cc -O2 -Wall -o obj2utf8x

BIN
utils/converters/utf8/obj2utf8.exe


BIN
utils/converters/utf8/obj2utf8x.exe


BIN
utils/converters/utf8/objcompress.exe


+ 0 - 20
utils/converters/utf8/src/README

@@ -1,20 +0,0 @@
-Usage: ./objcompress in.obj [out.utf8]
-
-        If 'out' is specified, then attempt to write out a compressed,
-        UTF-8 version to 'out.'
-
-        If not, write a JSON version to STDOUT.
-
-Usage: ./objanalyze in.obj [list of cache sizes]
-
-        Perform vertex cache analysis on in.obj using specified sizes.
-        For example: ./objanalyze in.obj 6 16 24 32
-        Maximum cache size is 32.
-
-Building:
-
-Since there are no external dependences outside of the C/C++ standard
-libraries, you can pretty much build this however you please. I've
-included a cheeky way to do this on POSIX-like systems by including a
-build shell script at the top of the file itself. You can build by
-making the .cc file executable, and running it on the command line.

+ 0 - 188
utils/converters/utf8/src/base.h

@@ -1,188 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you
-// may not use this file except in compliance with the License. You
-// may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied. See the License for the specific language governing
-// permissions and limitations under the License.
-#ifndef WEBGL_LOADER_BASE_H_
-#define WEBGL_LOADER_BASE_H_
-
-#include <ctype.h>
-#include <float.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <string>
-#include <vector>
-
-// TODO: consider using C99 spellings.
-typedef unsigned char uint8;
-typedef unsigned short uint16;
-typedef short int16;
-typedef unsigned int uint32;
-
-// printf format strings for size_t.
-#ifdef _WIN32
-# define PRIuS "%Iu"
-#else  // Most compilers use the C99 format string.
-# define PRIuS "%zu"
-#endif
-
-#ifndef isfinite
-# define isfinite _finite
-#endif
-
-typedef std::vector<float> AttribList;
-typedef std::vector<int> IndexList;
-typedef std::vector<uint16> QuantizedAttribList;
-typedef std::vector<uint16> OptimizedIndexList;
-
-// TODO: these data structures ought to go elsewhere.
-struct DrawMesh {
-  // Interleaved vertex format:
-  //  3-D Position
-  //  3-D Normal
-  //  2-D TexCoord
-  // Note that these
-  AttribList attribs;
-  // Indices are 0-indexed.
-  IndexList indices;
-};
-
-struct WebGLMesh {
-  QuantizedAttribList attribs;
-  OptimizedIndexList indices;
-};
-
-typedef std::vector<WebGLMesh> WebGLMeshList;
-
-static inline int strtoint(const char* str, const char** endptr) {
-  return static_cast<int>(strtol(str, const_cast<char**>(endptr), 10));
-}
-
-static inline const char* StripLeadingWhitespace(const char* str) {
-  while (isspace(*str)) {
-    ++str;
-  }
-  return str;
-}
-
-static inline char* StripLeadingWhitespace(char* str) {
-  while (isspace(*str)) {
-    ++str;
-  }
-  return str;
-}
-
-// Like basename.
-static inline const char* StripLeadingDir(const char* const str) {
-  const char* last_slash = NULL;
-  const char* pos = str;
-  while (const char ch = *pos) {
-    if (ch == '/' || ch == '\\') {
-      last_slash = pos;
-    }
-    ++pos;
-  }
-  return last_slash ? (last_slash + 1) : str;
-}
-
-static inline void TerminateAtNewlineOrComment(char* str) {
-  char* newline = strpbrk(str, "#\r\n");
-  if (newline) {
-    *newline = '\0';
-  }
-}
-
-static inline const char* ConsumeFirstToken(const char* const line,
-                                            std::string* token) {
-  const char* curr = line;
-  while (char ch = *curr) {
-    if (isspace(ch)) {
-      token->assign(line, curr);
-      return curr + 1;
-    }
-    ++curr;
-  }
-  if (curr == line) {
-    return NULL;
-  }
-  token->assign(line, curr);
-  return curr;
-}
-
-static inline void ToLower(const char* in, std::string* out) {
-  while (char ch = *in) {
-    out->push_back(tolower(ch));
-    ++in;
-  }
-}
-
-static inline void ToLowerInplace(std::string* in) {
-  std::string& s = *in;
-  for (size_t i = 0; i < s.size(); ++i) {
-    s[i] = tolower(s[i]);
-  }
-}
-
-// Jenkin's One-at-a-time Hash. Not the best, but simple and
-// portable.
-uint32 SimpleHash(char *key, size_t len, uint32 seed = 0) {
-  uint32 hash = seed;
-  for(size_t i = 0; i < len; ++i) {
-    hash += static_cast<unsigned char>(key[i]);
-    hash += (hash << 10);
-    hash ^= (hash >> 6);
-  }
-  hash += (hash << 3);
-  hash ^= (hash >> 11);
-  hash += (hash << 15);
-  return hash;
-}
-
-void ToHex(uint32 w, char out[9]) {
-  const char kOffset0 = '0';
-  const char kOffset10 = 'a' - 10;
-  out[8] = '\0';
-  for (size_t i = 8; i > 0;) {
-    uint32 bits = w & 0xF;
-    out[--i] = bits + ((bits < 10) ? kOffset0 : kOffset10);
-    w >>= 4;
-  }
-}
-
-uint16 Quantize(float f, float in_min, float in_scale, uint16 out_max) {
-  return static_cast<uint16>(out_max * ((f-in_min) / in_scale));
-}
-
-// TODO: Visual Studio calls this someting different.
-#ifdef putc_unlocked
-# define PutChar putc_unlocked
-#else
-# define PutChar putc
-#endif  // putc_unlocked
-
-#ifndef CHECK
-# define CHECK(PRED) if (!(PRED)) {                                     \
-    fprintf(stderr, "%s:%d CHECK failed: ", __FILE__, __LINE__);        \
-    fputs(#PRED "\n", stderr);                                          \
-    exit(-1); } else
-#endif  // CHECK
-
-#ifndef DCHECK
-# ifdef DEBUG
-#  define DCHECK(PRED) CHECK(PRED)
-# else
-#  define DCHECK(PRED)
-# endif  // DEBUG
-#endif  // DCHECK
-
-#endif  // WEBGL_LOADER_BASE_H_

+ 0 - 123
utils/converters/utf8/src/bounds.h

@@ -1,123 +0,0 @@
-// Copyright 2012 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you
-// may not use this file except in compliance with the License. You
-// may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-#ifndef WEBGL_LOADER_BOUNDS_H_
-#define WEBGL_LOADER_BOUNDS_H_
-
-#include <stdio.h>
-
-#include "base.h"
-
-namespace webgl_loader {
-
-// TODO: arbitrary vertex formats.
-
-struct Bounds {
-  float mins[8];
-  float maxes[8];
-
-  void Clear() {
-    for (size_t i = 0; i < 8; ++i) {
-      mins[i] = FLT_MAX;
-      maxes[i] = -FLT_MAX;
-    }
-  }
-
-  void EncloseAttrib(const float* attribs) {
-    for (size_t i = 0; i < 8; ++i) {
-      const float attrib = attribs[i];
-      if (mins[i] > attrib) {
-        mins[i] = attrib;
-      }
-      if (maxes[i] < attrib) {
-        maxes[i] = attrib;
-      }
-    }
-  }
-
-  void Enclose(const AttribList& attribs) {
-    for (size_t i = 0; i < attribs.size(); i += 8) {
-      EncloseAttrib(&attribs[i]);
-    }
-  }
-
-  float UniformScale() const {
-    const float x = maxes[0] - mins[0];
-    const float y = maxes[1] - mins[1];
-    const float z = maxes[2] - mins[2];
-    return (x > y)  // TODO: max3
-        ? ((x > z) ? x : z)
-        : ((y > z) ? y : z);
-  }
-};
-
-// TODO: make maxPosition et. al. configurable.
-struct BoundsParams {
-  static BoundsParams FromBounds(const Bounds& bounds) {
-    BoundsParams ret;
-    const float scale = bounds.UniformScale();
-    // Position. Use a uniform scale.
-    for (size_t i = 0; i < 3; ++i) {
-      const int maxPosition = (1 << 14) - 1;  // 16383;
-      ret.mins[i] = bounds.mins[i];
-      ret.scales[i] = scale;
-      ret.outputMaxes[i] = maxPosition;
-      ret.decodeOffsets[i] = maxPosition * bounds.mins[i] / scale;
-      ret.decodeScales[i] = scale / maxPosition;
-    }
-    // TexCoord.
-    // TODO: get bounds-dependent texcoords working!
-    for (size_t i = 3; i < 5; ++i) {
-      // const float texScale = bounds.maxes[i] - bounds.mins[i];
-      const int maxTexcoord = (1 << 10) - 1;  // 1023
-      ret.mins[i] = 0;  //bounds.mins[i];
-      ret.scales[i] = 1;  //texScale;
-      ret.outputMaxes[i] = maxTexcoord;
-      ret.decodeOffsets[i] = 0;  //maxTexcoord * bounds.mins[i] / texScale;
-      ret.decodeScales[i] = 1.0f / maxTexcoord;  // texScale / maxTexcoord;
-    }
-    // Normal. Always uniform range.
-    for (size_t i = 5; i < 8; ++i) {
-      ret.mins[i] = -1;
-      ret.scales[i] = 2.f;
-      ret.outputMaxes[i] = (1 << 10) - 1;  // 1023
-      ret.decodeOffsets[i] = 1 - (1 << 9);  // -511
-      ret.decodeScales[i] = 1.0 / 511;
-    }
-    return ret;
-  }
-
-  void DumpJson(FILE* out = stdout) {
-    // TODO: use JsonSink.
-    fputs("{\n", out);
-    fprintf(out, "    \"decodeOffsets\": [%d,%d,%d,%d,%d,%d,%d,%d],\n",
-            decodeOffsets[0], decodeOffsets[1], decodeOffsets[2],
-            decodeOffsets[3], decodeOffsets[4], decodeOffsets[5],
-            decodeOffsets[6], decodeOffsets[7]);
-    fprintf(out, "    \"decodeScales\": [%f,%f,%f,%f,%f,%f,%f,%f]\n",
-            decodeScales[0], decodeScales[1], decodeScales[2], decodeScales[3],
-            decodeScales[4], decodeScales[5], decodeScales[6], decodeScales[7]);
-    fputs("  }", out);
-  }
-
-  float mins[8];
-  float scales[8];
-  int outputMaxes[8];
-  int decodeOffsets[8];
-  float decodeScales[8];
-};
-
-}  // namespace webgl_loader
-
-#endif  // WEBGL_LOADER_BOUNDS_H_

+ 0 - 539
utils/converters/utf8/src/compress.h

@@ -1,539 +0,0 @@
-// Copyright 2012 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you
-// may not use this file except in compliance with the License. You
-// may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-#ifndef WEBGL_LOADER_COMPRESS_H_
-#define WEBGL_LOADER_COMPRESS_H_
-
-#include <math.h>
-
-#include "base.h"
-#include "bounds.h"
-#include "stream.h"
-#include "utf8.h"
-
-namespace webgl_loader {
-
-void AttribsToQuantizedAttribs(const AttribList& interleaved_attribs,
-                               const BoundsParams& bounds_params,
-                               QuantizedAttribList* quantized_attribs) {
-  quantized_attribs->resize(interleaved_attribs.size());
-  for (size_t i = 0; i < interleaved_attribs.size(); i += 8) {
-    for (size_t j = 0; j < 8; ++j) {
-      quantized_attribs->at(i + j) = Quantize(interleaved_attribs[i + j],
-                                              bounds_params.mins[j],
-                                              bounds_params.scales[j],
-                                              bounds_params.outputMaxes[j]);
-    }
-  }
-}
-
-uint16 ZigZag(int16 word) {
-  return (word >> 15) ^ (word << 1);
-}
-
-void CompressAABBToUtf8(const Bounds& bounds,
-                        const BoundsParams& total_bounds,
-                        ByteSinkInterface* utf8) {
-  const int maxPosition = (1 << 14) - 1;  // 16383;
-  uint16 mins[3] = { 0 };
-  uint16 maxes[3] = { 0 };
-  for (int i = 0; i < 3; ++i) {
-    float total_min = total_bounds.mins[i];
-    float total_scale = total_bounds.scales[i];
-    mins[i] = Quantize(bounds.mins[i], total_min, total_scale, maxPosition);
-    maxes[i] = Quantize(bounds.maxes[i], total_min, total_scale, maxPosition);
-  }
-  for (int i = 0; i < 3; ++i) {
-    Uint16ToUtf8(mins[i], utf8);
-  }
-  for (int i = 0; i < 3; ++i) {
-    Uint16ToUtf8(maxes[i] - mins[i], utf8);
-  }
-}
-
-void CompressIndicesToUtf8(const OptimizedIndexList& list,
-                           ByteSinkInterface* utf8) {
-  // For indices, we don't do delta from the most recent index, but
-  // from the high water mark. The assumption is that the high water
-  // mark only ever moves by one at a time. Foruntately, the vertex
-  // optimizer does that for us, to optimize for per-transform vertex
-  // fetch order.
-  uint16 index_high_water_mark = 0;
-  for (size_t i = 0; i < list.size(); ++i) {
-    const int index = list[i];
-    CHECK(index >= 0);
-    CHECK(index <= index_high_water_mark);
-    CHECK(Uint16ToUtf8(index_high_water_mark - index, utf8));
-    if (index == index_high_water_mark) {
-      ++index_high_water_mark;
-    }
-  }
-}
-
-void CompressQuantizedAttribsToUtf8(const QuantizedAttribList& attribs,
-                                    ByteSinkInterface* utf8) {
-  for (size_t i = 0; i < 8; ++i) {
-    // Use a transposed representation, and delta compression.
-    uint16 prev = 0;
-    for (size_t j = i; j < attribs.size(); j += 8) {
-      const uint16 word = attribs[j];
-      const uint16 za = ZigZag(static_cast<int16>(word - prev));
-      prev = word;
-      CHECK(Uint16ToUtf8(za, utf8));
-    }
-  }
-}
-
-class EdgeCachingCompressor {
- public:
-  // Assuming that the vertex cache optimizer LRU is 32 vertices, we
-  // expect ~64 triangles, and ~96 edges.
-  static const size_t kMaxLruSize = 96;
-  static const int kLruSentinel = -1;
-
-  EdgeCachingCompressor(const QuantizedAttribList& attribs,
-                        OptimizedIndexList& indices)
-      : attribs_(attribs),
-        indices_(indices),
-        deltas_(attribs.size()),
-        index_high_water_mark_(0),
-        lru_size_(0) {
-    memset(last_attrib_, 0, sizeof(last_attrib_));
-  }
-
-  // Work in progress. Does not remotely work.
-  void CompressWithLRU(ByteSinkInterface* utf8) {
-    size_t match_indices[3];
-    size_t match_winding[3];
-    for (size_t i = 0; i < indices_.size(); i += 3) {
-      const uint16* triangle = &indices_[i];
-      // Try to find edge matches to cheaply encode indices and employ
-      // parallelogram prediction.
-      const size_t num_matches = LruEdge(triangle,
-                                         match_indices, match_winding);
-      switch (num_matches) {
-        case 0: 
-          LruEdgeZero(triangle);
-          // No edges match, so use simple predictor.
-          continue;
-        case 1: 
-          LruEdgeOne(triangle[match_winding[1]], 
-                     triangle[match_winding[2]], match_indices[0]);
-          break;
-        case 2: 
-          LruEdgeTwo(triangle[match_winding[2]], 
-                     match_indices[0], match_indices[1]); 
-          break;
-        case 3: 
-          LruEdgeThree(match_indices[0], match_indices[1], match_indices[2]);
-          break;
-        default:
-          DumpDebug();
-          CHECK(false);
-      }
-    }
-  }
-
-  // Instead of using an LRU cache of edges, simply scan the history
-  // for matching edges.
-  void Compress(ByteSinkInterface* utf8) {
-    // TODO: do this pre-quantization.
-    // Normal prediction.
-    const size_t num_attribs = attribs_.size() / 8;
-    std::vector<int> crosses(3 * num_attribs);
-    for (size_t i = 0; i < indices_.size(); i += 3) {
-      // Compute face cross products.
-      const uint16 i0 = indices_[i + 0];
-      const uint16 i1 = indices_[i + 1];
-      const uint16 i2 = indices_[i + 2];
-      int e1[3], e2[3], cross[3];
-      e1[0] = attribs_[8*i1 + 0] - attribs_[8*i0 + 0];
-      e1[1] = attribs_[8*i1 + 1] - attribs_[8*i0 + 1];
-      e1[2] = attribs_[8*i1 + 2] - attribs_[8*i0 + 2];
-      e2[0] = attribs_[8*i2 + 0] - attribs_[8*i0 + 0];
-      e2[1] = attribs_[8*i2 + 1] - attribs_[8*i0 + 1];
-      e2[2] = attribs_[8*i2 + 2] - attribs_[8*i0 + 2];
-      cross[0] = e1[1] * e2[2] - e1[2] * e2[1];
-      cross[1] = e1[2] * e2[0] - e1[0] * e2[2];
-      cross[2] = e1[0] * e2[1] - e1[1] * e2[0];
-      // Accumulate face cross product into each vertex.
-      for (size_t j = 0; j < 3; ++j) {
-        crosses[3*i0 + j] += cross[j];
-        crosses[3*i1 + j] += cross[j];
-        crosses[3*i2 + j] += cross[j];
-      }
-    }
-    // Compute normal residues.
-    for (size_t idx = 0; idx < num_attribs; ++idx) {
-      float pnx = crosses[3*idx + 0];
-      float pny = crosses[3*idx + 1];
-      float pnz = crosses[3*idx + 2];
-      const float pnorm = 511.0 / sqrt(pnx*pnx + pny*pny + pnz*pnz);
-      pnx *= pnorm;
-      pny *= pnorm;
-      pnz *= pnorm;
-
-      float nx = attribs_[8*idx + 5] - 511;
-      float ny = attribs_[8*idx + 6] - 511;
-      float nz = attribs_[8*idx + 7] - 511;
-      const float norm = 511.0 / sqrt(nx*nx + ny*ny + nz*nz);
-      nx *= norm;
-      ny *= norm;
-      nz *= norm;
-
-      const uint16 dx = ZigZag(nx - pnx);
-      const uint16 dy = ZigZag(ny - pny);
-      const uint16 dz = ZigZag(nz - pnz);
-
-      deltas_[5*num_attribs + idx] = dx;
-      deltas_[6*num_attribs + idx] = dy;
-      deltas_[7*num_attribs + idx] = dz;
-    }
-    for (size_t triangle_start_index = 0; 
-         triangle_start_index < indices_.size(); triangle_start_index += 3) {
-      const uint16 i0 = indices_[triangle_start_index + 0];
-      const uint16 i1 = indices_[triangle_start_index + 1];
-      const uint16 i2 = indices_[triangle_start_index + 2];
-      // To force simple compression, set |max_backref| to 0 here
-      // and in loader.js.
-      // |max_backref| should be configurable and communicated.
-      const uint16 max_backref = triangle_start_index < kMaxLruSize ?
-          triangle_start_index : kMaxLruSize;
-      // Scan the index list for matching edges.
-      uint16 backref = 0;
-      for (; backref < max_backref; backref += 3) {
-        const size_t candidate_start_index = triangle_start_index - backref;
-        const uint16 j0 = indices_[candidate_start_index + 0];
-        const uint16 j1 = indices_[candidate_start_index + 1];
-        const uint16 j2 = indices_[candidate_start_index + 2];
-        // Compare input and candidate triangle edges in a
-        // winding-sensitive order. Matching edges must reference
-        // vertices in opposite order, and the first check sees if the
-        // triangles are in strip order. If necessary, re-order the
-        // triangle in |indices_| so that the matching edge appears
-        // first.
-        if (j1 == i1 && j2 == i0) {
-          ParallelogramPredictor(backref, j0, triangle_start_index);
-          break;
-        } else if (j1 == i0 && j2 == i2) {
-          indices_[triangle_start_index + 0] = i2;
-          indices_[triangle_start_index + 1] = i0;
-          indices_[triangle_start_index + 2] = i1;
-          ParallelogramPredictor(backref, j0, triangle_start_index);
-          break;
-        } else if (j1 == i2 && j2 == i1) {
-          indices_[triangle_start_index + 0] = i1;
-          indices_[triangle_start_index + 1] = i2;
-          indices_[triangle_start_index + 2] = i0;
-          ParallelogramPredictor(backref, j0, triangle_start_index);
-          break;
-        } else if (j2 == i1 && j0 == i0) {
-          ParallelogramPredictor(backref + 1, j1, triangle_start_index);
-          break;
-        } else if (j2 == i0 && j0 == i2) {
-          indices_[triangle_start_index + 0] = i2;
-          indices_[triangle_start_index + 1] = i0;
-          indices_[triangle_start_index + 2] = i1;
-          ParallelogramPredictor(backref + 1, j1, triangle_start_index);
-          break;
-        } else if (j2 == i2 && j0 == i1) {
-          indices_[triangle_start_index + 0] = i1;
-          indices_[triangle_start_index + 1] = i2;
-          indices_[triangle_start_index + 2] = i0;
-          ParallelogramPredictor(backref + 1, j1, triangle_start_index);
-          break;
-        } else if (j0 == i1 && j1 == i0) {
-          ParallelogramPredictor(backref + 2, j2, triangle_start_index);
-          break;
-        } else if (j0 == i0 && j1 == i2) {
-          indices_[triangle_start_index + 0] = i2;
-          indices_[triangle_start_index + 1] = i0;
-          indices_[triangle_start_index + 2] = i1;
-          ParallelogramPredictor(backref + 2, j2, triangle_start_index);
-          break;
-        } else if (j0 == i2 && j1 == i1) {
-          indices_[triangle_start_index + 0] = i1;
-          indices_[triangle_start_index + 1] = i2;
-          indices_[triangle_start_index + 2] = i0;
-          ParallelogramPredictor(backref + 2, j2, triangle_start_index);
-          break;
-        }
-      }
-      if (backref == max_backref) {
-        SimplePredictor(max_backref, triangle_start_index);
-      }
-    }
-    // Emit as UTF-8.
-    for (size_t i = 0; i < deltas_.size(); ++i) {
-      if (!Uint16ToUtf8(deltas_[i], utf8)) {
-        // TODO: bounds-dependent texcoords are still busted :(
-        Uint16ToUtf8(0, utf8);
-      }
-    }
-    for (size_t i = 0; i < codes_.size(); ++i) {
-      CHECK(Uint16ToUtf8(codes_[i], utf8));
-    }
-  }
-
-  const QuantizedAttribList& deltas() const { return deltas_; }
-
-  const OptimizedIndexList& codes() const { return codes_; }
-
-  void DumpDebug(FILE* fp = stdout) {
-    for (size_t i = 0; i < lru_size_; ++i) {
-      fprintf(fp, PRIuS ": %d\n", i, edge_lru_[i]);
-    }
-  }
-
- private:
-  // The simple predictor is slightly (maybe 5%) more effective than
-  // |CompressQuantizedAttribsToUtf8|. Instead of delta encoding in
-  // attribute order, we use the last referenced attribute as the
-  // predictor.
-  void SimplePredictor(size_t max_backref, size_t triangle_start_index) {
-    const uint16 i0 = indices_[triangle_start_index + 0];
-    const uint16 i1 = indices_[triangle_start_index + 1];
-    const uint16 i2 = indices_[triangle_start_index + 2];
-    if (HighWaterMark(i0, max_backref)) {
-      // Would it be faster to do the dumb delta, in this case?
-      EncodeDeltaAttrib(i0, last_attrib_);
-    }
-    if (HighWaterMark(i1)) {
-      EncodeDeltaAttrib(i1, &attribs_[8*i0]);
-    }
-    if (HighWaterMark(i2)) {
-      // We get a little frisky with the third vertex in the triangle.
-      // Instead of simply using the previous vertex, use the average
-      // of the first two.
-      for (size_t j = 0; j < 8; ++j) {
-        int average = attribs_[8*i0 + j];
-        average += attribs_[8*i1 + j];
-        average /= 2;
-        last_attrib_[j] = average;
-      }
-      EncodeDeltaAttrib(i2, last_attrib_);
-      // The above doesn't add much. Consider the simpler:
-      // EncodeDeltaAttrib(i2, &attribs_[8*i1]);
-    }
-  }
-
-  void EncodeDeltaAttrib(size_t index, const uint16* predicted) {
-    const size_t num_attribs = attribs_.size() / 8;
-    for (size_t i = 0; i < 5; ++i) {
-      const int delta = attribs_[8*index + i] - predicted[i];
-      const uint16 code = ZigZag(delta);
-      deltas_[num_attribs*i + index] = code;
-    }
-    UpdateLastAttrib(index);
-  }
-
-  void ParallelogramPredictor(uint16 backref_edge,
-                              size_t backref_vert,
-                              size_t triangle_start_index) {
-    codes_.push_back(backref_edge);  // Encoding matching edge.
-    const uint16 i2 = indices_[triangle_start_index + 2];
-    if (HighWaterMark(i2)) {  // Encode third vertex.
-      // Parallelogram prediction for the new vertex.
-      const uint16 i0 = indices_[triangle_start_index + 0];
-      const uint16 i1 = indices_[triangle_start_index + 1];
-      const size_t num_attribs = attribs_.size() / 8;
-      for (size_t j = 0; j < 5; ++j) {
-        const uint16 orig = attribs_[8*i2 + j]; 
-        int delta = attribs_[8*i0 + j];
-        delta += attribs_[8*i1 + j];
-        delta -= attribs_[8*backref_vert + j];
-        last_attrib_[j] = orig;
-        const uint16 code = ZigZag(orig - delta);
-        deltas_[num_attribs*j + i2] = code;
-      }
-    }
-  }
-
-  // Returns |true| if |index_high_water_mark_| is incremented, otherwise
-  // returns |false| and automatically updates |last_attrib_|. 
-  bool HighWaterMark(uint16 index, uint16 start_code = 0) {
-    codes_.push_back(index_high_water_mark_ - index + start_code);
-    if (index == index_high_water_mark_) {
-      ++index_high_water_mark_;
-      return true;
-    } else {
-      UpdateLastAttrib(index);
-    }
-    return false;
-  }
-
-  void UpdateLastAttrib(uint16 index) {
-    for (size_t i = 0; i < 8; ++i) {
-      last_attrib_[i] = attribs_[8*index + i];
-    }
-  }
-
-  // Find edge matches of |triangle| referenced in |edge_lru_|
-  // |match_indices| stores where the matches occur in |edge_lru_|
-  // |match_winding| stores where the matches occur in |triangle|
-  size_t LruEdge(const uint16* triangle,
-                 size_t* match_indices,
-                 size_t* match_winding) {
-    const uint16 i0 = triangle[0];
-    const uint16 i1 = triangle[1];
-    const uint16 i2 = triangle[2];
-    // The primary thing is to find the first matching edge, if
-    // any. If we assume that our mesh is mostly manifold, then each
-    // edge is shared by at most two triangles (with the indices in
-    // opposite order), so we actually want to eventually remove all
-    // matching edges. However, this means we have to continue
-    // scanning the array to find all matching edges, not just the
-    // first. The edges that don't match will then pushed to the
-    // front.
-    size_t num_matches = 0;
-    for (size_t i = 0; i < lru_size_ && num_matches < 3; ++i) {
-      const int edge_index = edge_lru_[i];
-      // |winding| is a tricky detail used to dereference the edge to
-      // yield |e0| and |e1|, since we must handle the edge that wraps
-      // the last and first vertex. For now, just implement this in a
-      // straightforward way using a switch, but since this code would
-      // actually also need to run in the decompressor, we must
-      // optimize it.
-      const int winding = edge_index % 3;
-      uint16 e0, e1;
-      switch (winding) {
-        case 0:
-          e0 = indices_[edge_index + 1];
-          e1 = indices_[edge_index + 2];
-          break;
-        case 1:
-          e0 = indices_[edge_index + 2];
-          e1 = indices_[edge_index];
-          break;
-        case 2:
-          e0 = indices_[edge_index];
-          e1 = indices_[edge_index + 1];
-          break;
-        default:
-          DumpDebug();
-          CHECK(false);
-      }
-
-      // Check each edge of the input triangle against |e0| and
-      // |e1|. Note that we reverse the winding of the input triangle.
-      // TODO: does this properly handle degenerate input?
-      if (e0 == i1 && e1 == i0) {
-        match_winding[num_matches] = 2;
-        match_indices[++num_matches] = i;
-      } else if (e0 == i2 && e1 == i1) {
-        match_winding[num_matches] = 0;
-        match_indices[++num_matches] = i;
-      } else if (e0 == i0 && e1 == i2) {
-        match_winding[num_matches] = 1;
-        match_indices[++num_matches] = i;
-      }
-    }
-    switch (num_matches) {
-      case 1:
-        match_winding[1] = (match_winding[0] + 1) % 3;  // Fall through.
-      case 2:
-        match_winding[2] = 3 - match_winding[1] - match_winding[0];
-      default: ;  // Do nothing.
-    }
-    return num_matches;
-  }
-
-  // If no edges were found in |triangle|, then simply push the edges
-  // onto |edge_lru_|.
-  void LruEdgeZero(const uint16* triangle) {
-    // Shift |edge_lru_| by three elements. Note that the |edge_lru_|
-    // array has at least three extra elements to make this simple.
-    lru_size_ += 3;
-    if (lru_size_ > kMaxLruSize) lru_size_ = kMaxLruSize;
-    memmove(edge_lru_ + 3, edge_lru_, lru_size_ * sizeof(int));
-    // Push |triangle| to front of |edge_lru_|
-    edge_lru_[0] = triangle[0];
-    edge_lru_[1] = triangle[1];
-    edge_lru_[2] = triangle[2];
-  }
-
-  // Remove one edge and add two.
-  void LruEdgeOne(size_t i0, size_t i1, size_t match_index) {
-    CHECK(match_index < lru_size_);
-    // Shift |edge_lru_| by one element, starting with |match_index| + 1.
-    memmove(edge_lru_ + match_index + 2, edge_lru_ + match_index + 1,
-            (lru_size_ - match_index) * sizeof(int));
-    // Shift |edge_lru_| by two elements until reaching |match_index|.
-    memmove(edge_lru_ + 2, edge_lru_, match_index * sizeof(int));
-    edge_lru_[0] = i0;
-    edge_lru_[1] = i1;
-  }
-
-  // Remove two edges and add one.
-  void LruEdgeTwo(int i0, size_t match_index0, size_t match_index1) {
-    CHECK(match_index0 < lru_size_);
-    CHECK(match_index1 < lru_size_);
-
-    // memmove 1
-    // memmove 2
-    edge_lru_[0] = i0;
-  }
-
-  // All edges were found, so just remove them from |edge_lru_|.
-  void LruEdgeThree(size_t match_index0, 
-                    size_t match_index1, 
-                    size_t match_index2) {
-    const size_t shift_two = match_index1 - 1;
-    for (size_t i = match_index0; i < shift_two; ++i) {
-      edge_lru_[i] = edge_lru_[i + 1];
-    }
-    const size_t shift_three = match_index2 - 2;
-    for (size_t i = shift_two; i < shift_three; ++i) {
-      edge_lru_[i] = edge_lru_[i + 2];
-    }
-    lru_size_ -= 3;
-    for (size_t i = shift_three; i < lru_size_; ++i) {
-      edge_lru_[i] = edge_lru_[i + 3];
-    }
-  }
-
-  // |attribs_| and |indices_| is the input mesh.
-  const QuantizedAttribList& attribs_;
-  // |indices_| are non-const because |Compress| may update triangle
-  // winding order.
-  OptimizedIndexList& indices_;
-  // |deltas_| contains the compressed attributes. They can be
-  // compressed in one of two ways:
-  // (1) with parallelogram prediction, compared with the predicted vertex,
-  // (2) otherwise, compared with the last referenced vertex.
-  // Note that even (2) is different and probably better than what
-  // |CompressQuantizedAttribsToutf8| does, which is comparing with
-  // the last encoded vertex.
-  QuantizedAttribList deltas_;
-  // |codes_| contains the compressed indices. Small values encode an
-  // edge match; that is, the first edge of the next triangle matches
-  // a recently-seen edge.
-  OptimizedIndexList codes_; 
-  // |index_high_water_mark_| is used as it is in |CompressIndicesToUtf8|.
-  uint16 index_high_water_mark_;
-  // |last_attrib_referenced_| is the index of the last referenced
-  // attribute. This is used to delta encode attributes when no edge match
-  // is found.
-  uint16 last_attrib_[8];
-  size_t lru_size_;
-  // |edge_lru_| contains the LRU lits of edge references. It stores
-  // indices to the input |indices_|. By convention, an edge points to
-  // the vertex opposite the edge in question. We pad the array by a
-  // triangle to simplify edge cases.
-  int edge_lru_[kMaxLruSize + 3];
-};
-
-}  // namespace webgl_loader
-
-#endif  // WEBGL_LOADER_COMPRESS_H_

+ 0 - 712
utils/converters/utf8/src/mesh.h

@@ -1,712 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you
-// may not use this file except in compliance with the License. You
-// may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-#ifndef WEBGL_LOADER_MESH_H_
-#define WEBGL_LOADER_MESH_H_
-
-#include <float.h>
-#include <limits.h>
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <map>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "base.h"
-#include "bounds.h"
-#include "stream.h"
-#include "utf8.h"
-
-// A short list of floats, useful for parsing a single vector
-// attribute.
-class ShortFloatList {
- public:
-  // MeshLab can create position attributes with
-  // color coordinates like: v x y z r g b
-  static const size_t kMaxNumFloats = 6;
-  ShortFloatList()
-      : size_(0)
-  {
-    clear();
-  }
-
-  void clear() {
-    for (size_t i = 0; i < kMaxNumFloats; ++i) {
-      a_[i] = 0.f;
-    }
-  }
-
-  // Parse up to kMaxNumFloats from C string.
-  // TODO: this should instead return endptr, since size
-  // is recoverable.
-  size_t ParseLine(const char* line) {
-    for (size_ = 0; size_ != kMaxNumFloats; ++size_) {
-      char* endptr = NULL;
-      a_[size_] = strtof(line, &endptr);
-      if (endptr == NULL || line == endptr) break;
-      line = endptr;
-    }
-    return size_;
-  }
-
-  float operator[](size_t idx) const {
-    return a_[idx];
-  }
-
-  void AppendTo(AttribList* attribs) const {
-    AppendNTo(attribs, size_);
-  }
-
-  void AppendNTo(AttribList* attribs, const size_t sz) const {
-    attribs->insert(attribs->end(), a_, a_ + sz);
-  }
-
-  bool empty() const { return size_ == 0; }
-
-  size_t size() const { return size_; }
- private:
-  float a_[kMaxNumFloats];
-  size_t size_;
-};
-
-class IndexFlattener {
- public:
-  explicit IndexFlattener(size_t num_positions)
-      : count_(0),
-        table_(num_positions) {
-  }
-
-  int count() const { return count_; }
-
-  void reserve(size_t size) {
-    table_.reserve(size);
-  }
-
-  // Returns a pair of: < flattened index, newly inserted >.
-  std::pair<int, bool> GetFlattenedIndex(int position_index,
-                                         int texcoord_index,
-                                         int normal_index) {
-    if (position_index >= static_cast<int>(table_.size())) {
-      table_.resize(position_index + 1);
-    }
-    // First, optimistically look up position_index in the table.
-    IndexType& index = table_[position_index];
-    if (index.position_or_flat == kIndexUnknown) {
-      // This is the first time we've seen this position in the table,
-      // so fill it. Since the table is indexed by position, we can
-      // use the position_or_flat_index field to store the flat index.
-      const int flat_index = count_++;
-      index.position_or_flat = flat_index;
-      index.texcoord = texcoord_index;
-      index.normal = normal_index;
-      return std::make_pair(flat_index, true);
-    } else if (index.position_or_flat == kIndexNotInTable) {
-      // There are multiple flattened indices at this position index,
-      // so resort to the map.
-      return GetFlattenedIndexFromMap(position_index,
-                                      texcoord_index,
-                                      normal_index);
-    } else if (index.texcoord == texcoord_index &&
-               index.normal == normal_index) {
-      // The other indices match, so we can use the value cached in
-      // the table.
-      return std::make_pair(index.position_or_flat, false);
-    }
-    // The other indices don't match, so we mark this table entry,
-    // and insert both the old and new indices into the map.
-    const IndexType old_index(position_index, index.texcoord, index.normal);
-    map_.insert(std::make_pair(old_index, index.position_or_flat));
-    index.position_or_flat = kIndexNotInTable;
-    const IndexType new_index(position_index, texcoord_index, normal_index);
-    const int flat_index = count_++;
-    map_.insert(std::make_pair(new_index, flat_index));
-    return std::make_pair(flat_index, true);
-  }
- private:
-  std::pair<int, bool> GetFlattenedIndexFromMap(int position_index,
-                                                int texcoord_index,
-                                                int normal_index) {
-    IndexType index(position_index, texcoord_index, normal_index);
-    MapType::iterator iter = map_.lower_bound(index);
-    if (iter == map_.end() || iter->first != index) {
-      const int flat_index = count_++;
-      map_.insert(iter, std::make_pair(index, flat_index));
-      return std::make_pair(flat_index, true);
-    } else {
-      return std::make_pair(iter->second, false);
-    }
-  }
-
-  static const int kIndexUnknown = -1;
-  static const int kIndexNotInTable = -2;
-
-  struct IndexType {
-    IndexType()
-        : position_or_flat(kIndexUnknown),
-          texcoord(kIndexUnknown),
-          normal(kIndexUnknown)
-    { }
-
-    IndexType(int position_index, int texcoord_index, int normal_index)
-        : position_or_flat(position_index),
-          texcoord(texcoord_index),
-          normal(normal_index)
-    { }
-
-    // I'm being tricky/lazy here. The table_ stores the flattened
-    // index in the first field, since it is indexed by position. The
-    // map_ stores position and uses this struct as a key to lookup the
-    // flattened index.
-    int position_or_flat;
-    int texcoord;
-    int normal;
-
-    // An ordering for std::map.
-    bool operator<(const IndexType& that) const {
-      if (position_or_flat == that.position_or_flat) {
-        if (texcoord == that.texcoord) {
-          return normal < that.normal;
-        } else {
-          return texcoord < that.texcoord;
-        }
-      } else {
-        return position_or_flat < that.position_or_flat;
-      }
-    }
-
-    bool operator==(const IndexType& that) const {
-      return position_or_flat == that.position_or_flat &&
-          texcoord == that.texcoord && normal == that.normal;
-    }
-
-    bool operator!=(const IndexType& that) const {
-      return !operator==(that);
-    }
-  };
-  typedef std::map<IndexType, int> MapType;
-
-  int count_;
-  std::vector<IndexType> table_;
-  MapType map_;
-};
-
-static inline size_t positionDim() { return 3; }
-static inline size_t texcoordDim() { return 2; }
-static inline size_t normalDim() { return 3; }
-
-// TODO(wonchun): Make a c'tor to properly initialize.
-struct GroupStart {
-  size_t offset;  // offset into draw_mesh_.indices.
-  unsigned int group_line;
-  int min_index, max_index;  // range into attribs.
-  webgl_loader::Bounds bounds;
-};
-
-class DrawBatch {
- public:
-  DrawBatch()
-      : flattener_(0),
-        current_group_line_(0xFFFFFFFF) {
-  }
-
-  const std::vector<GroupStart>& group_starts() const {
-    return group_starts_;
-  }
-
-  void Init(AttribList* positions, AttribList* texcoords, AttribList* normals) {
-    positions_ = positions;
-    texcoords_ = texcoords;
-    normals_ = normals;
-    flattener_.reserve(1024);
-  }
-
-  void AddTriangle(unsigned int group_line, int* indices) {
-    if (group_line != current_group_line_) {
-      current_group_line_ = group_line;
-      GroupStart group_start;
-      group_start.offset = draw_mesh_.indices.size();
-      group_start.group_line = group_line;
-      group_start.min_index = INT_MAX;
-      group_start.max_index = INT_MIN;
-      group_start.bounds.Clear();
-      group_starts_.push_back(group_start);
-    }
-    GroupStart& group = group_starts_.back();
-    for (size_t i = 0; i < 9; i += 3) {
-      // .OBJ files use 1-based indexing.
-      const int position_index = indices[i + 0] - 1;
-      const int texcoord_index = indices[i + 1] - 1;
-      const int normal_index = indices[i + 2] - 1;
-      const std::pair<int, bool> flattened = flattener_.GetFlattenedIndex(
-          position_index, texcoord_index, normal_index);
-      const int flat_index = flattened.first;
-      CHECK(flat_index >= 0);
-      draw_mesh_.indices.push_back(flat_index);
-      if (flattened.second) {
-        // This is a new index. Keep track of index ranges and vertex
-        // bounds.
-        if (flat_index > group.max_index) {
-          group.max_index = flat_index;
-        }
-        if (flat_index < group.min_index) {
-          group.min_index = flat_index;
-        }
-        const size_t new_loc = draw_mesh_.attribs.size();
-        CHECK(8*size_t(flat_index) == new_loc);
-        for (size_t i = 0; i < positionDim(); ++i) {
-          draw_mesh_.attribs.push_back(
-              positions_->at(positionDim() * position_index + i));
-        }
-        if (texcoord_index == -1) {
-          for (size_t i = 0; i < texcoordDim(); ++i) {
-            draw_mesh_.attribs.push_back(0);
-          }
-        } else {
-          for (size_t i = 0; i < texcoordDim(); ++i) {
-            draw_mesh_.attribs.push_back(
-                texcoords_->at(texcoordDim() * texcoord_index + i));
-          }
-        }
-        if (normal_index == -1) {
-          for (size_t i = 0; i < normalDim(); ++i) {
-            draw_mesh_.attribs.push_back(0);
-          }
-        } else {
-          for (size_t i = 0; i < normalDim(); ++i) {
-            draw_mesh_.attribs.push_back(
-                normals_->at(normalDim() * normal_index + i));
-          }
-        }
-        // TODO: is the covariance body useful for anything?
-        group.bounds.EncloseAttrib(&draw_mesh_.attribs[new_loc]);
-      }
-    }
-  }
-
-  const DrawMesh& draw_mesh() const {
-    return draw_mesh_;
-  }
- private:
-  AttribList* positions_, *texcoords_, *normals_;
-  DrawMesh draw_mesh_;
-  IndexFlattener flattener_;
-  unsigned int current_group_line_;
-  std::vector<GroupStart> group_starts_;
-};
-
-struct Material {
-  std::string name;
-  float Kd[3];
-  std::string map_Kd;
-  std::string d;
-  
-
-  void DumpJson(FILE* out = stdout) const {
-    fprintf(out, "    \"%s\": { ", name.c_str());
-    if (!d.empty()) {
-      fprintf(out, "\"d\": %s ,", d.c_str());
-    }    
-    if (map_Kd.empty()) {
-      fprintf(out, "\"Kd\": [%hu, %hu, %hu] }",
-              Quantize(Kd[0], 0, 1, 255),
-              Quantize(Kd[1], 0, 1, 255),
-              Quantize(Kd[2], 0, 1, 255));
-    } else {
-      fprintf(out, "\"map_Kd\": \"%s\" }", map_Kd.c_str());
-    }
-  }
-};
-
-typedef std::vector<Material> MaterialList;
-
-class WavefrontMtlFile {
- public:
-  explicit WavefrontMtlFile(FILE* fp) {
-    ParseFile(fp);
-  }
-
-  const MaterialList& materials() const {
-    return materials_;
-  }
-
- private:
-  // TODO: factor this parsing stuff out.
-  void ParseFile(FILE* fp) {
-    // TODO: don't use a fixed-size buffer.
-    const size_t kLineBufferSize = 256;
-    char buffer[kLineBufferSize];
-    unsigned int line_num = 1;
-    while (fgets(buffer, kLineBufferSize, fp) != NULL) {
-      char* stripped = StripLeadingWhitespace(buffer);
-      TerminateAtNewlineOrComment(stripped);
-      ParseLine(stripped, line_num++);
-    }
-  }
-
-  void ParseLine(const char* line, unsigned int line_num) {
-    switch (*line) {
-      case 'K':
-        ParseColor(line + 1, line_num);
-        break;
-      case 'd':
-        ParseD(line + 1, line_num);
-        break;
-      case 'm':
-        if (0 == strncmp(line + 1, "ap_Kd", 5)) {
-          ParseMapKd(line + 6, line_num);
-        }
-        break;
-      case 'n':
-        if (0 == strncmp(line + 1, "ewmtl", 5)) {
-          ParseNewmtl(line + 6, line_num);
-        }
-      default:
-        break;
-    }
-  }
-
-  void ParseColor(const char* line, unsigned int line_num) {
-    switch (*line) {
-      case 'd': {
-        ShortFloatList floats;
-        floats.ParseLine(line + 1);
-        float* Kd = current_->Kd;
-        Kd[0] = floats[0];
-        Kd[1] = floats[1];
-        Kd[2] = floats[2];
-        break;
-      }
-      default:
-        break;
-    }
-  }
-  void ParseD(const char* line, unsigned int line_num) {
-      current_->d = StripLeadingWhitespace(line);
-  }
-
-  void ParseMapKd(const char* line, unsigned int line_num) {
-    current_->map_Kd = StripLeadingWhitespace(line);
-  }
-
-  void ParseNewmtl(const char* line, unsigned int line_num) {
-    materials_.push_back(Material());
-    current_ = &materials_.back();
-    ToLower(StripLeadingWhitespace(line), &current_->name);
-  }
-
-  Material* current_;
-  MaterialList materials_;
-};
-
-typedef std::map<std::string, DrawBatch> MaterialBatches;
-
-// TODO: consider splitting this into a low-level parser and a high-level
-// object.
-class WavefrontObjFile {
- public:
-  explicit WavefrontObjFile(FILE* fp) {
-    current_batch_ = &material_batches_[""];
-    current_batch_->Init(&positions_, &texcoords_, &normals_);
-    current_group_line_ = 0;
-    line_to_groups_.insert(std::make_pair(0, "default"));
-    ParseFile(fp);
-  }
-
-  const MaterialList& materials() const {
-    return materials_;
-  }
-
-  const MaterialBatches& material_batches() const {
-    return material_batches_;
-  }
-
-  const std::string& LineToGroup(unsigned int line) const {
-    typedef LineToGroups::const_iterator Iterator;
-    typedef std::pair<Iterator, Iterator> EqualRange;
-    EqualRange equal_range = line_to_groups_.equal_range(line);
-    const std::string* best_group = NULL;
-    int best_count = 0;
-    for (Iterator iter = equal_range.first; iter != equal_range.second;
-         ++iter) {
-      const std::string& group = iter->second;
-      const int count = group_counts_.find(group)->second;
-      if (!best_group || (count < best_count)) {
-        best_group = &group;
-        best_count = count;
-      }
-    }
-    if (!best_group) {
-      ErrorLine("no suitable group found", line);
-    }
-    return *best_group;
-  }
-
-  void DumpDebug() const {
-    printf("positions size: " PRIuS "\n"
-	   "texcoords size: " PRIuS "\n"
-	   "normals size: " PRIuS "\n",
-           positions_.size(), texcoords_.size(), normals_.size());
-  }
- private:
-  WavefrontObjFile() { }  // For testing.
-
-  void ParseFile(FILE* fp) {
-    // TODO: don't use a fixed-size buffer.
-    const size_t kLineBufferSize = 256;
-    char buffer[kLineBufferSize] = { 0 };
-    unsigned int line_num = 1;
-    while (fgets(buffer, kLineBufferSize, fp) != NULL) {
-      char* stripped = StripLeadingWhitespace(buffer);
-      TerminateAtNewlineOrComment(stripped);
-      ParseLine(stripped, line_num++);
-    }
-  }
-
-  void ParseLine(const char* line, unsigned int line_num) {
-    switch (*line) {
-      case 'v':
-        ParseAttrib(line + 1, line_num);
-        break;
-      case 'f':
-        ParseFace(line + 1, line_num);
-        break;
-      case 'g':
-        if (isspace(line[1])) {
-          ParseGroup(line + 2, line_num);
-        } else {
-          goto unknown;
-        }
-        break;
-      case '\0':
-      case '#':
-        break;  // Do nothing for comments or blank lines.
-      case 'p':
-        WarnLine("point unsupported", line_num);
-        break;
-      case 'l':
-        WarnLine("line unsupported", line_num);
-        break;
-      case 'u':
-        if (0 == strncmp(line + 1, "semtl", 5)) {
-          ParseUsemtl(line + 6, line_num);
-        } else {
-          goto unknown;
-        }
-        break;
-      case 'm':
-        if (0 == strncmp(line + 1, "tllib", 5)) {
-          ParseMtllib(line + 6, line_num);
-        } else {
-          goto unknown;
-        }
-        break;
-      case 's':
-        ParseSmoothingGroup(line + 1, line_num);
-        break;
-      unknown:
-      default:
-        WarnLine("unknown keyword", line_num);
-        break;
-    }
-  }
-
-  void ParseAttrib(const char* line, unsigned int line_num) {
-    ShortFloatList floats;
-    floats.ParseLine(line + 1);
-    if (isspace(*line)) {
-      ParsePosition(floats, line_num);
-    } else if (*line == 't') {
-      ParseTexCoord(floats, line_num);
-    } else if (*line == 'n') {
-      ParseNormal(floats, line_num);
-    } else {
-      WarnLine("unknown attribute format", line_num);
-    }
-  }
-
-  void ParsePosition(const ShortFloatList& floats, unsigned int line_num) {
-    if (floats.size() != positionDim() &&
-        floats.size() != 6) {  // ignore r g b for now.
-      ErrorLine("bad position", line_num);
-    }
-    floats.AppendNTo(&positions_, positionDim());
-  }
-
-  void ParseTexCoord(const ShortFloatList& floats, unsigned int line_num) {
-    if ((floats.size() < 1) || (floats.size() > 3)) {
-      // TODO: correctly handle 3-D texcoords intead of just
-      // truncating.
-      ErrorLine("bad texcoord", line_num);
-    }
-    floats.AppendNTo(&texcoords_, texcoordDim());
-  }
-
-  void ParseNormal(const ShortFloatList& floats, unsigned int line_num) {
-    if (floats.size() != normalDim()) {
-      ErrorLine("bad normal", line_num);
-    }
-    // Normalize to avoid out-of-bounds quantization. This should be
-    // optional, in case someone wants to be using the normal magnitude as
-    // something meaningful.
-    const float x = floats[0];
-    const float y = floats[1];
-    const float z = floats[2];
-    const float scale = 1.0/sqrt(x*x + y*y + z*z);
-    if (isfinite(scale)) {
-      normals_.push_back(scale * x);
-      normals_.push_back(scale * y);
-      normals_.push_back(scale * z);
-    } else {
-      normals_.push_back(0);
-      normals_.push_back(0);
-      normals_.push_back(0);
-    }
-  }
-
-  // Parses faces and converts to triangle fans. This is not a
-  // particularly good tesselation in general case, but it is really
-  // simple, and is perfectly fine for triangles and quads.
-  void ParseFace(const char* line, unsigned int line_num) {
-    // Also handle face outlines as faces.
-    if (*line == 'o') ++line;
-
-    // TODO: instead of storing these indices as-is, it might make
-    // sense to flatten them right away. This can reduce memory
-    // consumption and improve access locality, especially since .OBJ
-    // face indices are so needlessly large.
-    int indices[9] = { 0 };
-    // The first index acts as the pivot for the triangle fan.
-    line = ParseIndices(line, line_num, indices + 0, indices + 1, indices + 2);
-    if (line == NULL) {
-      ErrorLine("bad first index", line_num);
-    }
-    line = ParseIndices(line, line_num, indices + 3, indices + 4, indices + 5);
-    if (line == NULL) {
-      ErrorLine("bad second index", line_num);
-    }
-    // After the first two indices, each index introduces a new
-    // triangle to the fan.
-    while ((line = ParseIndices(line, line_num,
-                                indices + 6, indices + 7, indices + 8))) {
-      current_batch_->AddTriangle(current_group_line_, indices);
-      // The most recent vertex is reused for the next triangle.
-      indices[3] = indices[6];
-      indices[4] = indices[7];
-      indices[5] = indices[8];
-      indices[6] = indices[7] = indices[8] = 0;
-    }
-  }
-
-  // Parse a single group of indices, separated by slashes ('/').
-  // TODO: convert negative indices (that is, relative to the end of
-  // the current vertex positions) to more conventional positive
-  // indices.
-  const char* ParseIndices(const char* line, unsigned int line_num,
-                           int* position_index, int* texcoord_index,
-                           int* normal_index) {
-    const char* endptr = NULL;
-    *position_index = strtoint(line, &endptr);
-    if (*position_index == 0) {
-      return NULL;
-    }
-    if (endptr != NULL && *endptr == '/') {
-      *texcoord_index = strtoint(endptr + 1, &endptr);
-    } else {
-      *texcoord_index = *normal_index = 0;
-    }
-    if (endptr != NULL && *endptr == '/') {
-      *normal_index = strtoint(endptr + 1, &endptr);
-    } else {
-      *normal_index = 0;
-    }
-    return endptr;
-  }
-
-  // .OBJ files can specify multiple groups for a set of faces. This
-  // implementation finds the "most unique" group for a set of faces
-  // and uses that for the batch. In the first pass, we use the line
-  // number of the "g" command to tag the faces. Afterwards, after we
-  // collect group populations, we can go back and give them real
-  // names.
-  void ParseGroup(const char* line, unsigned int line_num) {
-    std::string token;
-    while ((line = ConsumeFirstToken(line, &token))) {
-      ToLowerInplace(&token);
-      group_counts_[token]++;
-      line_to_groups_.insert(std::make_pair(line_num, token));
-    }
-    current_group_line_ = line_num;
-  }
-
-  void ParseSmoothingGroup(const char* line, unsigned int line_num) {
-    static bool once = true;
-    if (once) {
-      WarnLine("s ignored", line_num);
-      once = false;
-    }
-  }
-
-  void ParseMtllib(const char* line, unsigned int line_num) {
-    FILE* fp = fopen(StripLeadingWhitespace(line), "r");
-    if (!fp) {
-      WarnLine("mtllib not found", line_num);
-      return;
-    }
-    WavefrontMtlFile mtlfile(fp);
-    fclose(fp);
-    materials_ = mtlfile.materials();
-    for (size_t i = 0; i < materials_.size(); ++i) {
-      DrawBatch& draw_batch = material_batches_[materials_[i].name];
-      draw_batch.Init(&positions_, &texcoords_, &normals_);
-    }
-  }
-
-  void ParseUsemtl(const char* line, unsigned int line_num) {
-    std::string usemtl;
-    ToLower(StripLeadingWhitespace(line), &usemtl);
-    MaterialBatches::iterator iter = material_batches_.find(usemtl);
-    if (iter == material_batches_.end()) {
-      ErrorLine("material not found", line_num);
-    }
-    current_batch_ = &iter->second;
-  }
-
-  void WarnLine(const char* why, unsigned int line_num) const {
-    fprintf(stderr, "WARNING: %s at line %u\n", why, line_num);
-  }
-
-  void ErrorLine(const char* why, unsigned int line_num) const {
-    fprintf(stderr, "ERROR: %s at line %u\n", why, line_num);
-    exit(-1);
-  }
-
-  AttribList positions_;
-  AttribList texcoords_;
-  AttribList normals_;
-  MaterialList materials_;
-
-  // Currently, batch by texture (i.e. map_Kd).
-  MaterialBatches material_batches_;
-  DrawBatch* current_batch_;
-
-  typedef std::multimap<unsigned int, std::string> LineToGroups;
-  LineToGroups line_to_groups_;
-  std::map<std::string, int> group_counts_;
-  unsigned int current_group_line_;
-};
-
-#endif  // WEBGL_LOADER_MESH_H_

+ 0 - 135
utils/converters/utf8/src/obj2utf8.cc

@@ -1,135 +0,0 @@
-#if 0  // A cute trick to making this .cc self-building from shell.
-g++ $0 -O2 -Wall -Werror -o `basename $0 .cc`;
-exit;
-#endif
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you
-// may not use this file except in compliance with the License. You
-// may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-#include "bounds.h"
-#include "compress.h"
-#include "mesh.h"
-#include "optimize.h"
-#include "stream.h"
-
-int main(int argc, const char* argv[]) {
-  FILE* json_out = stdout;
-  if (argc != 3 && argc != 4) {
-    fprintf(stderr, "Usage: %s in.obj out.utf8\n\n"
-            "\tCompress in.obj to out.utf8 and writes JS to STDOUT.\n\n",
-            argv[0]);
-    return -1;
-  } else if (argc == 4) {
-    json_out = fopen(argv[3], "w");
-    CHECK(json_out != NULL);
-  }
-
-  FILE* fp = fopen(argv[1], "r");
-  WavefrontObjFile obj(fp);
-  fclose(fp);
-
-  fputs("{\n  \"materials\": {\n", json_out);
-  const MaterialList& materials = obj.materials();
-  for (size_t i = 0; i < materials.size(); ++i) {
-    materials[i].DumpJson(json_out);
-    const bool last = i == materials.size() - 1;
-    fputs(",\n" + last, json_out);
-  }
-  fputs("  },\n", json_out);
-  
-  const MaterialBatches& batches = obj.material_batches();
-
-  // Pass 1: compute bounds.
-  webgl_loader::Bounds bounds;
-  bounds.Clear();
-  for (MaterialBatches::const_iterator iter = batches.begin();
-       iter != batches.end(); ++iter) {
-    const DrawBatch& draw_batch = iter->second;
-    bounds.Enclose(draw_batch.draw_mesh().attribs);
-  }
-  webgl_loader::BoundsParams bounds_params = 
-      webgl_loader::BoundsParams::FromBounds(bounds);
-  fputs("  \"decodeParams\": ", json_out);
-  bounds_params.DumpJson(json_out);
-  fputs(", \"urls\": {\n", json_out);
-  // Pass 2: quantize, optimize, compress, report.
-  FILE* utf8_out_fp = fopen(argv[2], "wb");
-  CHECK(utf8_out_fp != NULL);
-  fprintf(json_out, "    \"%s\": [\n", argv[2]);
-  webgl_loader::FileSink utf8_sink(utf8_out_fp);
-  size_t offset = 0;
-  MaterialBatches::const_iterator iter = batches.begin();
-  while (iter != batches.end()) {
-    const DrawMesh& draw_mesh = iter->second.draw_mesh();
-    if (draw_mesh.indices.empty()) {
-      ++iter;
-      continue;
-    }
-    QuantizedAttribList quantized_attribs;
-    webgl_loader::AttribsToQuantizedAttribs(draw_mesh.attribs, bounds_params,
-					    &quantized_attribs);
-    VertexOptimizer vertex_optimizer(quantized_attribs);
-    const std::vector<GroupStart>& group_starts = iter->second.group_starts();
-    WebGLMeshList webgl_meshes;
-    std::vector<size_t> group_lengths;
-    for (size_t i = 1; i < group_starts.size(); ++i) {
-      const size_t here = group_starts[i-1].offset;
-      const size_t length = group_starts[i].offset - here;
-      group_lengths.push_back(length);
-      vertex_optimizer.AddTriangles(&draw_mesh.indices[here], length,
-                                    &webgl_meshes);
-    }
-    const size_t here = group_starts.back().offset;
-    const size_t length = draw_mesh.indices.size() - here;
-    CHECK(length % 3 == 0);
-    group_lengths.push_back(length);
-    vertex_optimizer.AddTriangles(&draw_mesh.indices[here], length,
-                                  &webgl_meshes);
-
-    std::vector<std::string> material;
-    std::vector<size_t> attrib_start, attrib_length, index_start, index_length;
-    for (size_t i = 0; i < webgl_meshes.size(); ++i) {
-      const size_t num_attribs = webgl_meshes[i].attribs.size();
-      const size_t num_indices = webgl_meshes[i].indices.size();
-      CHECK(num_attribs % 8 == 0);
-      CHECK(num_indices % 3 == 0);
-      webgl_loader::CompressQuantizedAttribsToUtf8(webgl_meshes[i].attribs, 
-						   &utf8_sink);
-      webgl_loader::CompressIndicesToUtf8(webgl_meshes[i].indices, &utf8_sink);
-      material.push_back(iter->first);
-      attrib_start.push_back(offset);
-      attrib_length.push_back(num_attribs / 8);
-      index_start.push_back(offset + num_attribs);
-      index_length.push_back(num_indices / 3);
-      offset += num_attribs + num_indices;
-    }
-    for (size_t i = 0; i < webgl_meshes.size(); ++i) {
-      fprintf(json_out,
-              "      { \"material\": \"%s\",\n"
-              "        \"attribRange\": [" PRIuS ", " PRIuS "],\n"
-              "        \"indexRange\": [" PRIuS ", " PRIuS "]\n"
-              "      }",
-              material[i].c_str(),
-              attrib_start[i], attrib_length[i],
-              index_start[i], index_length[i]);
-      if (i != webgl_meshes.size() - 1) {
-        fputs(",\n", json_out);
-      }
-    }
-    const bool last = (++iter == batches.end());
-    fputs(",\n" + last, json_out);
-  }
-  fputs("    ]\n", json_out);
-  fputs("  }\n}", json_out);
-  return 0;
-}

+ 0 - 138
utils/converters/utf8/src/obj2utf8x.cc

@@ -1,138 +0,0 @@
-#if 0  // A cute trick to making this .cc self-building from shell.
-g++ $0 -O2 -Wall -Werror -o `basename $0 .cc`;
-exit;
-#endif
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you
-// may not use this file except in compliance with the License. You
-// may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-#include "bounds.h"
-#include "compress.h"
-#include "mesh.h"
-#include "optimize.h"
-#include "stream.h"
-
-int main(int argc, const char* argv[]) {
-  FILE* json_out = stdout;
-  if (argc != 3 && argc != 4) {
-    fprintf(stderr, "Usage: %s in.obj out.utf8\n\n"
-            "\tCompress in.obj to out.utf8 and writes JS to STDOUT.\n\n",
-            argv[0]);
-    return -1;
-  } else if (argc == 4) {
-    json_out = fopen(argv[3], "w");
-    CHECK(json_out != NULL);
-  }
-
-  FILE* fp = fopen(argv[1], "r");
-  WavefrontObjFile obj(fp);
-  fclose(fp);
-
-  fputs("{\n  \"materials\": {\n", json_out);
-  const MaterialList& materials = obj.materials();
-  for (size_t i = 0; i < materials.size(); ++i) {
-    materials[i].DumpJson(json_out);
-    const bool last = i == materials.size() - 1;
-    fputs(",\n" + last, json_out);
-  }
-  fputs("  },\n", json_out);
-  
-  const MaterialBatches& batches = obj.material_batches();
-
-  // Pass 1: compute bounds.
-  webgl_loader::Bounds bounds;
-  bounds.Clear();
-  for (MaterialBatches::const_iterator iter = batches.begin();
-       iter != batches.end(); ++iter) {
-    const DrawBatch& draw_batch = iter->second;
-    bounds.Enclose(draw_batch.draw_mesh().attribs);
-  }
-  webgl_loader::BoundsParams bounds_params = 
-      webgl_loader::BoundsParams::FromBounds(bounds);
-  fputs("  \"decodeParams\": ", json_out);
-  bounds_params.DumpJson(json_out);
-  fputs(",\n  \"urls\": {\n", json_out);
-  // Pass 2: quantize, optimize, compress, report.
-  FILE* utf8_out_fp = fopen(argv[2], "wb");
-  CHECK(utf8_out_fp != NULL);
-  fprintf(json_out, "    \"%s\": [\n", argv[2]);
-  webgl_loader::FileSink utf8_sink(utf8_out_fp);
-  size_t offset = 0;
-  MaterialBatches::const_iterator iter = batches.begin();
-  while (iter != batches.end()) {
-    const DrawMesh& draw_mesh = iter->second.draw_mesh();
-    if (draw_mesh.indices.empty()) {
-      ++iter;
-      continue;
-    }
-    QuantizedAttribList quantized_attribs;
-    webgl_loader::AttribsToQuantizedAttribs(draw_mesh.attribs, bounds_params,
-					    &quantized_attribs);
-    VertexOptimizer vertex_optimizer(quantized_attribs);
-    const std::vector<GroupStart>& group_starts = iter->second.group_starts();
-    WebGLMeshList webgl_meshes;
-    std::vector<size_t> group_lengths;
-    for (size_t i = 1; i < group_starts.size(); ++i) {
-      const size_t here = group_starts[i-1].offset;
-      const size_t length = group_starts[i].offset - here;
-      group_lengths.push_back(length);
-      vertex_optimizer.AddTriangles(&draw_mesh.indices[here], length,
-                                    &webgl_meshes);
-    }
-    const size_t here = group_starts.back().offset;
-    const size_t length = draw_mesh.indices.size() - here;
-    CHECK(length % 3 == 0);
-    group_lengths.push_back(length);
-    vertex_optimizer.AddTriangles(&draw_mesh.indices[here], length,
-                                  &webgl_meshes);
-
-    std::vector<std::string> material;
-    // TODO: is this buffering still necessary?
-    std::vector<size_t> attrib_start, attrib_length, 
-        code_start, code_length, num_tris;
-    for (size_t i = 0; i < webgl_meshes.size(); ++i) {
-      const size_t num_attribs = webgl_meshes[i].attribs.size();
-      const size_t num_indices = webgl_meshes[i].indices.size();
-      CHECK(num_attribs % 8 == 0);
-      CHECK(num_indices % 3 == 0);
-      webgl_loader::EdgeCachingCompressor compressor(webgl_meshes[i].attribs,
-                                                     webgl_meshes[i].indices);
-      compressor.Compress(&utf8_sink);
-      material.push_back(iter->first);
-      attrib_start.push_back(offset);
-      attrib_length.push_back(num_attribs / 8);
-      code_start.push_back(offset + num_attribs);
-      code_length.push_back(compressor.codes().size());
-      num_tris.push_back(num_indices / 3);
-      offset += num_attribs + compressor.codes().size();
-    }
-    for (size_t i = 0; i < webgl_meshes.size(); ++i) {
-      fprintf(json_out,
-              "      { \"material\": \"%s\",\n"
-              "        \"attribRange\": [" PRIuS ", " PRIuS "],\n"
-              "        \"codeRange\": [" PRIuS ", " PRIuS ", " PRIuS "]\n"
-              "      }",
-              material[i].c_str(),
-              attrib_start[i], attrib_length[i],
-              code_start[i], code_length[i], num_tris[i]);
-      if (i != webgl_meshes.size() - 1) {
-        fputs(",\n", json_out);
-      }
-    }
-    const bool last = (++iter == batches.end());
-    fputs(",\n" + last, json_out);
-  }
-  fputs("    ]\n", json_out);
-  fputs("  }\n}", json_out);
-  return 0;
-}

+ 0 - 165
utils/converters/utf8/src/objcompress.cc

@@ -1,165 +0,0 @@
-#if 0  // A cute trick to making this .cc self-building from shell.
-g++ $0 -O2 -Wall -Werror -o `basename $0 .cc`;
-exit;
-#endif
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you
-// may not use this file except in compliance with the License. You
-// may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-#include "bounds.h"
-#include "compress.h"
-#include "mesh.h"
-#include "optimize.h"
-#include "stream.h"
-
-int main(int argc, const char* argv[]) {
-  if (argc != 3) {
-    fprintf(stderr, "Usage: %s in.obj out.utf8\n\n"
-            "\tCompress in.obj to out.utf8 and writes JS to STDOUT.\n\n",
-            argv[0]);
-    return -1;
-  }
-  FILE* fp = fopen(argv[1], "r");
-  WavefrontObjFile obj(fp);
-  fclose(fp);
-
-  printf("MODELS[\'%s\'] = {\n", StripLeadingDir(argv[1]));
-  puts("  materials: {");
-  const MaterialList& materials = obj.materials();
-  for (size_t i = 0; i < materials.size(); ++i) {
-    materials[i].DumpJson();
-  }
-  puts("  },");
-  
-  const MaterialBatches& batches = obj.material_batches();
-
-  // Pass 1: compute bounds.
-  webgl_loader::Bounds bounds;
-  bounds.Clear();
-  for (MaterialBatches::const_iterator iter = batches.begin();
-       iter != batches.end(); ++iter) {
-    const DrawBatch& draw_batch = iter->second;
-    bounds.Enclose(draw_batch.draw_mesh().attribs);
-  }
-  webgl_loader::BoundsParams bounds_params = 
-    webgl_loader::BoundsParams::FromBounds(bounds);
-  printf("  decodeParams: ");
-  bounds_params.DumpJson();
-
-  puts("  urls: {");
-  std::vector<char> utf8;
-  webgl_loader::VectorSink sink(&utf8);
-  // Pass 2: quantize, optimize, compress, report.
-  for (MaterialBatches::const_iterator iter = batches.begin();
-       iter != batches.end(); ++iter) {
-    size_t offset = 0;
-    utf8.clear();
-    const DrawMesh& draw_mesh = iter->second.draw_mesh();
-    if (draw_mesh.indices.empty()) continue;
-    
-    QuantizedAttribList quantized_attribs;
-    webgl_loader::AttribsToQuantizedAttribs(draw_mesh.attribs, bounds_params,
-					    &quantized_attribs);
-    VertexOptimizer vertex_optimizer(quantized_attribs);
-    const std::vector<GroupStart>& group_starts = iter->second.group_starts();
-    WebGLMeshList webgl_meshes;
-    std::vector<size_t> group_lengths;
-    for (size_t i = 1; i < group_starts.size(); ++i) {
-      const size_t here = group_starts[i-1].offset;
-      const size_t length = group_starts[i].offset - here;
-      group_lengths.push_back(length);
-      vertex_optimizer.AddTriangles(&draw_mesh.indices[here], length,
-                                    &webgl_meshes);
-    }
-    const size_t here = group_starts.back().offset;
-    const size_t length = draw_mesh.indices.size() - here;
-    const bool divisible_by_3 = length % 3 == 0;
-    CHECK(divisible_by_3);
-    group_lengths.push_back(length);
-    vertex_optimizer.AddTriangles(&draw_mesh.indices[here], length,
-                                  &webgl_meshes);
-
-    std::vector<std::string> material;
-    std::vector<size_t> attrib_start, attrib_length, index_start, index_length;
-    for (size_t i = 0; i < webgl_meshes.size(); ++i) {
-      const size_t num_attribs = webgl_meshes[i].attribs.size();
-      const size_t num_indices = webgl_meshes[i].indices.size();
-      const bool kBadSizes = num_attribs % 8 || num_indices % 3;
-      CHECK(!kBadSizes);
-      webgl_loader::CompressQuantizedAttribsToUtf8(webgl_meshes[i].attribs, 
-						   &sink);
-      webgl_loader::CompressIndicesToUtf8(webgl_meshes[i].indices, &sink);
-      material.push_back(iter->first);
-      attrib_start.push_back(offset);
-      attrib_length.push_back(num_attribs / 8);
-      index_start.push_back(offset + num_attribs);
-      index_length.push_back(num_indices / 3);
-      offset += num_attribs + num_indices;
-    }
-    const uint32 hash = SimpleHash(&utf8[0], utf8.size());
-    char buf[9] = { '\0' };
-    ToHex(hash, buf);
-    // TODO: this needs to handle paths.
-    std::string out_fn = std::string(buf) + "." + argv[2];
-    FILE* out_fp = fopen(out_fn.c_str(), "wb");
-    printf("    \'%s\': [\n", out_fn.c_str());
-    size_t group_index = 0;
-    for (size_t i = 0; i < webgl_meshes.size(); ++i) {
-      printf("      { material: \'%s\',\n"
-             "        attribRange: [" PRIuS ", " PRIuS "],\n"
-             "        indexRange: [" PRIuS ", " PRIuS "],\n"
-             "        bboxes: " PRIuS ",\n"
-             "        names: [",
-             material[i].c_str(),
-             attrib_start[i], attrib_length[i],
-             index_start[i], index_length[i],
-             offset);
-      std::vector<size_t> buffered_lengths;
-      size_t group_start = 0;
-      while (group_index < group_lengths.size()) {
-        printf("\'%s\', ",
-               obj.LineToGroup(group_starts[group_index].group_line).c_str());
-        const size_t group_length = group_lengths[group_index];
-        const size_t next_start = group_start + group_length;
-        const size_t webgl_index_length = webgl_meshes[i].indices.size();
-        // TODO: bbox info is better placed at the head of the file,
-        // perhaps transposed. Also, when a group gets split between
-        // batches, the bbox gets stored twice.
-	webgl_loader::CompressAABBToUtf8(group_starts[group_index].bounds,
-					 bounds_params, &sink);
-        offset += 6;
-        if (next_start < webgl_index_length) {
-          buffered_lengths.push_back(group_length);
-          group_start = next_start;
-          ++group_index;
-        } else {
-          const size_t fits = webgl_index_length - group_start;
-          buffered_lengths.push_back(fits);
-          group_start = 0;
-          group_lengths[group_index] -= fits;
-          break;
-        }
-      }
-      printf("],\n        lengths: [");
-      for (size_t k = 0; k < buffered_lengths.size(); ++k) {
-        printf(PRIuS ", ", buffered_lengths[k]);
-      }
-      puts("],\n      },");
-    }
-    fwrite(&utf8[0], 1, utf8.size(), out_fp);
-    fclose(out_fp);
-    puts("    ],");
-  }
-  puts("  }\n};");
-  return 0;
-}

+ 0 - 273
utils/converters/utf8/src/optimize.h

@@ -1,273 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you
-// may not use this file except in compliance with the License. You
-// may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-#ifndef WEBGL_LOADER_OPTIMIZE_H_
-#define WEBGL_LOADER_OPTIMIZE_H_
-
-#include <math.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "base.h"
-
-// TODO: since most vertices are part of 6 faces, you can optimize
-// this by using a small inline buffer.
-typedef std::vector<int> FaceList;
-
-// Linear-Speed Vertex Cache Optimisation, via:
-// http://home.comcast.net/~tom_forsyth/papers/fast_vert_cache_opt.html
-class VertexOptimizer {
- public:
-  struct TriangleData {
-    bool active;  // true iff triangle has not been optimized and emitted.
-    // TODO: eliminate some wasted computation by using this cache.
-    // float score;
-  };
-
-  VertexOptimizer(const QuantizedAttribList& attribs)
-      : attribs_(attribs),
-        per_vertex_(attribs_.size() / 8),
-        next_unused_index_(0)
-  {
-    // The cache has an extra slot allocated to simplify the logic in
-    // InsertIndexToCache.
-    for (unsigned int i = 0; i < kCacheSize + 1; ++i) {
-      cache_[i] = kUnknownIndex;
-    }
-
-    // Initialize per-vertex state.
-    for (size_t i = 0; i < per_vertex_.size(); ++i) {
-      VertexData& vertex_data = per_vertex_[i];
-      vertex_data.cache_tag = kCacheSize;
-      vertex_data.output_index = kMaxOutputIndex;
-    }
-  }
-
-  void AddTriangles(const int* indices, size_t length,
-                    WebGLMeshList* meshes) {
-    std::vector<TriangleData> per_tri(length / 3);
-
-    // Loop through the triangles, updating vertex->face lists.
-    for (size_t i = 0; i < per_tri.size(); ++i) {
-      per_tri[i].active = true;
-      per_vertex_[indices[3*i + 0]].faces.push_back(i);
-      per_vertex_[indices[3*i + 1]].faces.push_back(i);
-      per_vertex_[indices[3*i + 2]].faces.push_back(i);
-    }
-
-    // TODO: with index bounds, no need to recompute everything.
-    // Compute initial vertex scores.
-    for (size_t i = 0; i < per_vertex_.size(); ++i) {
-      VertexData& vertex_data = per_vertex_[i];
-      vertex_data.cache_tag = kCacheSize;
-      vertex_data.output_index = kMaxOutputIndex;
-      vertex_data.UpdateScore();
-    }
-
-    // Prepare output.
-    if (meshes->empty()) {
-      meshes->push_back(WebGLMesh());
-    }
-    WebGLMesh* mesh = &meshes->back();
-
-    // Consume indices, one triangle at a time.
-    for (size_t c = 0; c < per_tri.size(); ++c) {
-      const int best_triangle = FindBestTriangle(indices, per_tri);
-      per_tri[best_triangle].active = false;
-
-      // Iterate through triangle indices.
-      for (size_t i = 0; i < 3; ++i) {
-        const int index = indices[3*best_triangle + i];
-        VertexData& vertex_data = per_vertex_[index];
-        vertex_data.RemoveFace(best_triangle);
-      
-        InsertIndexToCache(index);
-        const int cached_output_index = per_vertex_[index].output_index;
-        // Have we seen this index before?
-        if (cached_output_index != kMaxOutputIndex) {
-          mesh->indices.push_back(cached_output_index);
-          continue;
-        }
-        // The first time we see an index, not only do we increment
-        // next_unused_index_ counter, but we must also copy the
-        // corresponding attributes.  TODO: do quantization here?
-        per_vertex_[index].output_index = next_unused_index_;
-        for (size_t j = 0; j < 8; ++j) {
-          mesh->attribs.push_back(attribs_[8*index + j]);
-        }
-        mesh->indices.push_back(next_unused_index_++);
-      }
-      // Check if there is room for another triangle.
-      if (next_unused_index_ > kMaxOutputIndex - 3) {
-        // Is it worth figuring out which other triangles can be added
-        // given the verties already added? Then, perhaps
-        // re-optimizing?
-        next_unused_index_ = 0;
-        meshes->push_back(WebGLMesh());
-        mesh = &meshes->back();
-        for (size_t i = 0; i <= kCacheSize; ++i) {
-          cache_[i] = kUnknownIndex;
-        }
-        for (size_t i = 0; i < per_vertex_.size(); ++i) {
-          per_vertex_[i].output_index = kMaxOutputIndex;
-        }
-      }
-    }
-  }
- private:
-  static const int kUnknownIndex = -1;
-  static const uint16 kMaxOutputIndex = 0xD800;
-  static const size_t kCacheSize = 32;  // Does larger improve compression?
-
-  struct VertexData {
-    // Should this also update scores for incident triangles?
-    void UpdateScore() {
-      const size_t active_tris = faces.size();
-      if (active_tris <= 0) {
-        score = -1.f;
-        return;
-      }
-      // TODO: build initial score table.
-      if (cache_tag < 3) {
-        // The most recent triangle should has a fixed score to
-        // discourage generating nothing but really long strips. If we
-        // want strips, we should use a different optimizer.
-        const float kLastTriScore = 0.75f;
-        score = kLastTriScore;
-      } else if (cache_tag < kCacheSize) {
-        // Points for being recently used.
-        const float kScale = 1.f / (kCacheSize - 3);
-        const float kCacheDecayPower = 1.5f;
-        score = powf(1.f - kScale * (cache_tag - 3), kCacheDecayPower);
-      } else {
-        // Not in cache.
-        score = 0.f;
-      }
-
-      // Bonus points for having a low number of tris still to use the
-      // vert, so we get rid of lone verts quickly.
-      const float kValenceBoostScale = 2.0f;
-      const float kValenceBoostPower = 0.5f;
-      // rsqrt?
-      const float valence_boost = powf(active_tris, -kValenceBoostPower);
-      score += valence_boost * kValenceBoostScale;
-    }
-
-    // TODO: this assumes that "tri" is in the list!
-    void RemoveFace(int tri) {
-      FaceList::iterator face = faces.begin();
-      while (*face != tri) ++face;
-      *face = faces.back();
-      faces.pop_back();
-    }
-
-    FaceList faces;
-    unsigned int cache_tag;  // kCacheSize means not in cache.
-    float score;
-    uint16 output_index;
-  };
-
-  int FindBestTriangle(const int* indices,
-                       const std::vector<TriangleData>& per_tri) {
-    float best_score = -HUGE_VALF;
-    int best_triangle = -1;
-
-    // The trick to making this algorithm run in linear time (with
-    // respect to the vertices) is to only scan the triangles incident
-    // on the simulated cache for the next triangle. It is an
-    // approximation, but the score is heuristic. Anyway, most of the
-    // time the best triangle will be found this way.
-    for (size_t i = 0; i < kCacheSize; ++i) {
-      if (cache_[i] == kUnknownIndex) {
-        break;
-      }
-      const VertexData& vertex_data = per_vertex_[cache_[i]];
-      for (size_t j = 0; j < vertex_data.faces.size(); ++j) {
-        const int tri_index = vertex_data.faces[j];
-        if (per_tri[tri_index].active) {
-          const float score =
-              per_vertex_[indices[3*tri_index + 0]].score +
-              per_vertex_[indices[3*tri_index + 1]].score +
-              per_vertex_[indices[3*tri_index + 2]].score;
-          if (score > best_score) {
-            best_score = score;
-            best_triangle = tri_index;
-          }
-        }
-      }
-    }
-    // TODO: keep a range of active triangles to make the slow scan a
-    // little faster. Does this ever happen?
-    if (best_triangle == -1) {
-      // If no triangles can be found through the cache (e.g. for the
-      // first triangle) go through all the active triangles and find
-      // the best one.
-      for (size_t i = 0; i < per_tri.size(); ++i) {
-        if (per_tri[i].active) {
-          const float score =
-              per_vertex_[indices[3*i + 0]].score +
-              per_vertex_[indices[3*i + 1]].score +
-              per_vertex_[indices[3*i + 2]].score;
-          if (score > best_score) {
-            best_score = score;
-            best_triangle = i;
-          }
-        }
-      }
-      CHECK(-1 != best_triangle);
-    }
-    return best_triangle;
-  }
-
-  // TODO: faster to update an entire triangle.
-  // This also updates the vertex scores!
-  void InsertIndexToCache(int index) {
-    // Find how recently the vertex was used.
-    const unsigned int cache_tag = per_vertex_[index].cache_tag;
-
-    // Don't do anything if the vertex is already at the head of the
-    // LRU list.
-    if (cache_tag == 0) return;
-
-    // Loop through the cache, inserting the index at the front, and
-    // bubbling down to where the index was originally found. If the
-    // index was not originally in the cache, then it claims to be at
-    // the (kCacheSize + 1)th entry, and we use an extra slot to make
-    // that case simpler.
-    int to_insert = index;
-    for (unsigned int i = 0; i <= cache_tag; ++i) {
-      const int current_index = cache_[i];
-
-      // Update cross references between the entry of the cache and
-      // the per-vertex data.
-      cache_[i] = to_insert;
-      per_vertex_[to_insert].cache_tag = i;
-      per_vertex_[to_insert].UpdateScore();
-      
-      // No need to continue if we find an empty entry.
-      if (current_index == kUnknownIndex) {
-        break;
-      }
-      
-      to_insert = current_index;
-    }
-  }
-
-  const QuantizedAttribList& attribs_;
-  std::vector<VertexData> per_vertex_;
-  int cache_[kCacheSize + 1];
-  uint16 next_unused_index_;
-};
-
-#endif  // WEBGL_LOADER_OPTIMIZE_H_

+ 0 - 272
utils/converters/utf8/src/stream.h

@@ -1,272 +0,0 @@
-// Copyright 2012 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you
-// may not use this file except in compliance with the License. You
-// may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-#ifndef WEBGL_LOADER_STREAM_H_
-#define WEBGL_LOADER_STREAM_H_
-
-#include <stdio.h>
-#include <string>
-#include <vector>
-
-#include "base.h"
-
-namespace webgl_loader {
-
-// An abstract interface to allow appending bytes to various streams.
-class ByteSinkInterface {
- public:
-  virtual void Put(char c) = 0;
-  virtual size_t PutN(const char* data, size_t len) = 0;
-  virtual ~ByteSinkInterface() { }
-
- protected:
-  ByteSinkInterface() { }
-
- private:
-  // Disallow copy and assignment.
-  ByteSinkInterface(const ByteSinkInterface&);
-  void operator=(const ByteSinkInterface&);
-};
-
-// None of the concrete implementations actually own the backing data.
-// They should be safe to copy.
-
-class NullSink : public ByteSinkInterface {
- public:
-  NullSink() { }
-
-  virtual void Put(char) { }
-  
-  virtual size_t PutN(const char*, size_t len) { return len; }
-};
-
-class FileSink : public ByteSinkInterface {
- public:
-  // |fp| is unowned and must not be NULL.
-  explicit FileSink(FILE* fp)
-    : fp_(fp) {
-  }
-
-  virtual void Put(char c) {
-    PutChar(c, fp_);
-  }
-
-  virtual size_t PutN(const char* data, size_t len) {
-    return fwrite(data, 1, len, fp_);
-  }
-
- private:
-  FILE *fp_;  // unowned.
-};
-
-class VectorSink : public ByteSinkInterface {
- public:
-  // |vec| is unowned and must not be NULL.
-  explicit VectorSink(std::vector<char>* vec)
-    : vec_(vec) {
-  }
-  
-  virtual void Put(char c) {
-    vec_->push_back(c);
-  }
-
-  virtual size_t PutN(const char* data, size_t len) {
-    vec_->insert(vec_->end(), data, data + len);
-    return len;
-  }
-
- private:
-  std::vector<char>* vec_;  // unowned.
-};
-
-class StringSink : public ByteSinkInterface {
- public:
-  // |str| is unowned and must not be NULL.
-  explicit StringSink(std::string* str)
-    : str_(str) {
-    DCHECK(str != NULL);
-  }
-
-  virtual void Put(char c) {
-    str_->push_back(c);
-  }
-
-  virtual size_t PutN(const char* data, size_t len) {
-    str_->append(data, len);
-    return len;
-  }
-
- private:
-  std::string* str_;  // unowned.
-};
-
-class ByteHistogramSink : public ByteSinkInterface {
- public:
-  // |sink| in unowned and must not be NULL.
-  explicit ByteHistogramSink(ByteSinkInterface* sink)
-      : sink_(sink) {
-    memset(histo_, 0, sizeof(histo_));
-  }
-
-  virtual void Put(char c) {
-    histo_[static_cast<uint8>(c)]++;
-    sink_->Put(c);
-  }
-
-  virtual size_t PutN(const char* data, size_t len) {
-    const char* const end = data + len;
-    for (const char* iter = data; iter != end; ++iter) {
-      histo_[static_cast<uint8>(*iter)]++;
-    }
-    return sink_->PutN(data, len);
-  }
-
-  const size_t* histo() const {
-    return histo_;
-  }
-
- private:
-  size_t histo_[256];
-  ByteSinkInterface* sink_;  // unowned.
-};
-
-// TODO: does it make sense to have a global enum? How should 
-// new BufferedInput implementations define new error codes? 
-enum ErrorCode {
-  kNoError = 0, 
-  kEndOfFile = 1,
-  kFileError = 2,  // TODO: translate errno.
-};
-
-// Adapted from ryg's BufferedStream abstraction:
-// http://fgiesen.wordpress.com/2011/11/21/buffer-centric-io/
-class BufferedInput {
- public:
-  typedef ErrorCode (*Refiller)(BufferedInput*);
-
-  BufferedInput(Refiller refiller = RefillZeroes)
-      : cursor(NULL),
-        begin_(NULL),
-        end_(NULL),
-        refiller_(refiller),
-        error_(kNoError) {
-  }
-
-  // InitFromMemory.
-  BufferedInput(const char* data, size_t length)
-      : cursor(data),
-        begin_(data),
-        end_(data + length),
-        refiller_(RefillEndOfFile),
-        error_(kNoError) {
-  }
-
-  const char* begin() const {
-    return begin_;
-  }
-
-  const char* end() const {
-    return end_;
-  }
-
-  const char* cursor;
-
-  ErrorCode error() const {
-    DCHECK(begin() <= cursor);
-    DCHECK(cursor <= end());
-    return error_;
-  }
-
-  ErrorCode Refill() {
-    DCHECK(begin() <= cursor);
-    DCHECK(cursor <= end());
-    if (cursor == end()) {
-      error_ = refiller_(this);
-    }
-    return error_;
-  }
-
- protected:
-  static ErrorCode RefillZeroes(BufferedInput* bi) {
-    static const char kZeroes[64] = { 0 };
-    bi->cursor = kZeroes;
-    bi->begin_ = kZeroes;
-    bi->end_ = kZeroes + sizeof(kZeroes);
-    return bi->error_;
-  }
-
-  static ErrorCode RefillEndOfFile(BufferedInput* bi) {
-    return bi->fail(kEndOfFile);
-  }
-
-  ErrorCode fail(ErrorCode why) {
-    error_ = why;
-    refiller_ = RefillZeroes;
-    return Refill();
-  }
-
-  const char* begin_;
-  const char* end_;
-  Refiller refiller_;
-  ErrorCode error_;
-
- private:
-  // Disallow copy and assign.
-  BufferedInput(const BufferedInput&);
-  void operator=(const BufferedInput&);
-};
-
-class BufferedInputStream : public BufferedInput {
- public:
-  BufferedInputStream(FILE* fp, char* buf, size_t size)
-      : BufferedInput(RefillFread),
-        fp_(fp),
-        buf_(buf),
-        size_(size) {
-    DCHECK(buf != NULL);
-    // Disable buffering since we're doing it ourselves.
-    // TODO check error.
-    setvbuf(fp_, NULL, _IONBF, 0);
-    cursor = buf;
-    begin_ = buf;
-    end_ = buf;
-  }
- protected:
-  // TODO: figure out how to automate this casting pattern.
-  static ErrorCode RefillFread(BufferedInput* bi) {
-    return static_cast<BufferedInputStream*>(bi)->DoRefillFread();
-  }
- private:
-  ErrorCode DoRefillFread() {
-    const size_t bytes_read = fread(buf_, 1, size_, fp_);
-    cursor = begin_;
-    end_ = begin_ + bytes_read;
-    if (bytes_read < size_) {
-      if (feof(fp_)) {
-        refiller_ = RefillEndOfFile;
-      } else if (ferror(fp_)) {
-        return fail(kFileError);
-      }
-    }
-    return kNoError;
-  }
-
-  FILE* fp_;
-  char* buf_;
-  size_t size_;
-};
-
-}  // namespace webgl_loader
-
-#endif  // WEBGL_LOADER_STREAM_H_

+ 0 - 61
utils/converters/utf8/src/utf8.h

@@ -1,61 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you
-// may not use this file except in compliance with the License. You
-// may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-#ifndef WEBGL_LOADER_UTF8_H_
-#define WEBGL_LOADER_UTF8_H_
-
-#include "base.h"
-#include "stream.h"
-
-namespace webgl_loader {
-
-const uint8 kUtf8MoreBytesPrefix = 0x80;
-const uint8 kUtf8TwoBytePrefix = 0xC0;
-const uint8 kUtf8ThreeBytePrefix = 0xE0;
-
-const uint16 kUtf8TwoByteLimit = 0x0800;
-const uint16 kUtf8SurrogatePairStart = 0xD800;
-const uint16 kUtf8SurrogatePairNum = 0x0800;
-const uint16 kUtf8EncodableEnd = 0x10000 - kUtf8SurrogatePairNum;
-
-const uint16 kUtf8MoreBytesMask = 0x3F;
-
-bool Uint16ToUtf8(uint16 word, ByteSinkInterface* sink) {
-  if (word < 0x80) {
-    sink->Put(static_cast<char>(word));
-  } else if (word < kUtf8TwoByteLimit) {
-    sink->Put(static_cast<char>(kUtf8TwoBytePrefix + (word >> 6)));
-    sink->Put(static_cast<char>(kUtf8MoreBytesPrefix +
-				(word & kUtf8MoreBytesMask)));
-  } else if (word < kUtf8EncodableEnd) {
-    // We can only encode 65535 - 2048 values because of illegal UTF-8
-    // characters, such as surrogate pairs in [0xD800, 0xDFFF].
-    if (word >= kUtf8SurrogatePairStart) {
-      // Shift the result to avoid the surrogate pair range.
-      word += kUtf8SurrogatePairNum;
-    }
-    sink->Put(static_cast<char>(kUtf8ThreeBytePrefix + (word >> 12)));
-    sink->Put(static_cast<char>(kUtf8MoreBytesPrefix +
-				((word >> 6) & kUtf8MoreBytesMask)));
-    sink->Put(static_cast<char>(kUtf8MoreBytesPrefix +
-				(word & kUtf8MoreBytesMask)));
-  } else {
-    return false;
-  }
-  return true;
-}
-
-}  // namespace webgl_loader
-
-#endif  // WEBGL_LOADER_UTF8_H_