summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/assimp/code/FBXConverter.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/assimp/code/FBXConverter.cpp')
-rw-r--r--src/3rdparty/assimp/code/FBXConverter.cpp414
1 files changed, 229 insertions, 185 deletions
diff --git a/src/3rdparty/assimp/code/FBXConverter.cpp b/src/3rdparty/assimp/code/FBXConverter.cpp
index 692cda836..24bdfdd11 100644
--- a/src/3rdparty/assimp/code/FBXConverter.cpp
+++ b/src/3rdparty/assimp/code/FBXConverter.cpp
@@ -2,7 +2,8 @@
Open Asset Import Library (assimp)
----------------------------------------------------------------------
-Copyright (c) 2006-2016, assimp team
+Copyright (c) 2006-2017, assimp team
+
All rights reserved.
Redistribution and use of this software in source and binary forms,
@@ -54,11 +55,10 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "StringComparison.h"
#include <assimp/scene.h>
+
#include <tuple>
#include <memory>
-
#include <iterator>
-#include <sstream>
#include <vector>
namespace Assimp {
@@ -71,7 +71,7 @@ using namespace Util;
#define CONVERT_FBX_TIME(time) static_cast<double>(time) / 46186158000L
- // XXX vc9's debugger won't step into anonymous namespaces
+// XXX vc9's debugger won't step into anonymous namespaces
//namespace {
/** Dummy class to encapsulate the conversion process */
@@ -114,11 +114,9 @@ private:
// collect and assign child nodes
void ConvertNodes( uint64_t id, aiNode& parent, const aiMatrix4x4& parent_transform = aiMatrix4x4() );
-
// ------------------------------------------------------------------------------------------------
void ConvertLights( const Model& model );
-
// ------------------------------------------------------------------------------------------------
void ConvertCameras( const Model& model );
@@ -189,7 +187,6 @@ private:
static const unsigned int NO_MATERIAL_SEPARATION = /* std::numeric_limits<unsigned int>::max() */
static_cast<unsigned int>(-1);
-
// ------------------------------------------------------------------------------------------------
/**
* - if materialIndex == NO_MATERIAL_SEPARATION, materials are not taken into
@@ -341,8 +338,6 @@ private:
typedef std::tuple<std::shared_ptr<KeyTimeList>, std::shared_ptr<KeyValueList>, unsigned int > KeyFrameList;
typedef std::vector<KeyFrameList> KeyFrameListList;
-
-
// ------------------------------------------------------------------------------------------------
KeyFrameListList GetKeyframeList( const std::vector<const AnimationCurveNode*>& nodes, int64_t start, int64_t stop );
@@ -441,6 +436,18 @@ private:
aiScene* const out;
const FBX::Document& doc;
+
+ bool FindTextureIndexByFilename(const Video& video, unsigned int& index) {
+ index = 0;
+ const char* videoFileName = video.FileName().c_str();
+ for (auto texture = textures_converted.begin(); texture != textures_converted.end(); ++texture) {
+ if (!strcmp(texture->first->FileName().c_str(), videoFileName)) {
+ index = texture->second;
+ return true;
+ }
+ }
+ return false;
+ }
};
Converter::Converter( aiScene* out, const Document& doc )
@@ -558,7 +565,6 @@ void Converter::ConvertNodes( uint64_t id, aiNode& parent, const aiMatrix4x4& pa
if ( !name_carrier ) {
nodes_chain.push_back( new aiNode( original_name ) );
- name_carrier = nodes_chain.back();
}
//setup metadata on newest node
@@ -637,7 +643,6 @@ void Converter::ConvertCameras( const Model& model )
}
}
-
void Converter::ConvertLight( const Model& model, const Light& light )
{
lights.push_back( new aiLight() );
@@ -645,7 +650,7 @@ void Converter::ConvertLight( const Model& model, const Light& light )
out_light->mName.Set( FixNodeName( model.Name() ) );
- const float intensity = light.Intensity();
+ const float intensity = light.Intensity() / 100.0f;
const aiVector3D& col = light.Color();
out_light->mColorDiffuse = aiColor3D( col.x, col.y, col.z );
@@ -655,6 +660,11 @@ void Converter::ConvertLight( const Model& model, const Light& light )
out_light->mColorSpecular = out_light->mColorDiffuse;
+ //lights are defined along negative y direction
+ out_light->mPosition = aiVector3D(0.0f);
+ out_light->mDirection = aiVector3D(0.0f, -1.0f, 0.0f);
+ out_light->mUp = aiVector3D(0.0f, 0.0f, -1.0f);
+
switch ( light.LightType() )
{
case Light::Type_Point:
@@ -684,17 +694,23 @@ void Converter::ConvertLight( const Model& model, const Light& light )
ai_assert( false );
}
- // XXX: how to best convert the near and far decay ranges?
+ float decay = light.DecayStart();
switch ( light.DecayType() )
{
case Light::Decay_None:
- out_light->mAttenuationConstant = 1.0f;
+ out_light->mAttenuationConstant = decay;
+ out_light->mAttenuationLinear = 0.0f;
+ out_light->mAttenuationQuadratic = 0.0f;
break;
case Light::Decay_Linear:
- out_light->mAttenuationLinear = 1.0f;
+ out_light->mAttenuationConstant = 0.0f;
+ out_light->mAttenuationLinear = 2.0f / decay;
+ out_light->mAttenuationQuadratic = 0.0f;
break;
case Light::Decay_Quadratic:
- out_light->mAttenuationQuadratic = 1.0f;
+ out_light->mAttenuationConstant = 0.0f;
+ out_light->mAttenuationLinear = 0.0f;
+ out_light->mAttenuationQuadratic = 2.0f / (decay * decay);
break;
case Light::Decay_Cubic:
FBXImporter::LogWarn( "cannot represent cubic attenuation, set to Quadratic" );
@@ -713,10 +729,13 @@ void Converter::ConvertCamera( const Model& model, const Camera& cam )
out_camera->mName.Set( FixNodeName( model.Name() ) );
out_camera->mAspect = cam.AspectWidth() / cam.AspectHeight();
- out_camera->mPosition = cam.Position();
- out_camera->mUp = cam.UpVector();
- out_camera->mLookAt = cam.InterestPosition() - out_camera->mPosition;
+ //cameras are defined along positive x direction
+ out_camera->mPosition = aiVector3D(0.0f);
+ out_camera->mLookAt = aiVector3D(1.0f, 0.0f, 0.0f);
+ out_camera->mUp = aiVector3D(0.0f, 1.0f, 0.0f);
out_camera->mHorizontalFOV = AI_DEG_TO_RAD( cam.FieldOfView() );
+ out_camera->mClipPlaneNear = cam.NearPlane();
+ out_camera->mClipPlaneFar = cam.FarPlane();
}
@@ -761,7 +780,6 @@ const char* Converter::NameTransformationComp( TransformationComp comp )
return NULL;
}
-
const char* Converter::NameTransformationCompProperty( TransformationComp comp )
{
switch ( comp )
@@ -899,7 +917,6 @@ void Converter::GetRotationMatrix( Model::RotOrder mode, const aiVector3D& rotat
}
}
-
bool Converter::NeedsComplexTransformationChain( const Model& model )
{
const PropertyTable& props = model.Props();
@@ -910,7 +927,7 @@ bool Converter::NeedsComplexTransformationChain( const Model& model )
const TransformationComp comp = static_cast< TransformationComp >( i );
if ( comp == TransformationComp_Rotation || comp == TransformationComp_Scaling || comp == TransformationComp_Translation ||
- comp == TransformationComp_GeometricScaling || comp == TransformationComp_GeometricRotation || comp == TransformationComp_GeometricTranslation ) {
+ comp == TransformationComp_GeometricScaling || comp == TransformationComp_GeometricRotation || comp == TransformationComp_GeometricTranslation ) {
continue;
}
@@ -923,14 +940,12 @@ bool Converter::NeedsComplexTransformationChain( const Model& model )
return false;
}
-
std::string Converter::NameTransformationChainNode( const std::string& name, TransformationComp comp )
{
return name + std::string( MAGIC_NODE_TAG ) + "_" + NameTransformationComp( comp );
}
-void Converter::GenerateTransformationNodeChain( const Model& model,
- std::vector<aiNode*>& output_nodes )
+void Converter::GenerateTransformationNodeChain( const Model& model, std::vector<aiNode*>& output_nodes )
{
const PropertyTable& props = model.Props();
const Model::RotOrder rot = model.RotationOrder();
@@ -1045,6 +1060,10 @@ void Converter::GenerateTransformationNodeChain( const Model& model,
continue;
}
+ if ( comp == TransformationComp_PostRotation ) {
+ chain[ i ] = chain[ i ].Inverse();
+ }
+
aiNode* nd = new aiNode();
output_nodes.push_back( nd );
@@ -1067,18 +1086,14 @@ void Converter::GenerateTransformationNodeChain( const Model& model,
}
}
-
void Converter::SetupNodeMetadata( const Model& model, aiNode& nd )
{
const PropertyTable& props = model.Props();
DirectPropertyMap unparsedProperties = props.GetUnparsedProperties();
// create metadata on node
- std::size_t numStaticMetaData = 2;
- aiMetadata* data = new aiMetadata();
- data->mNumProperties = unparsedProperties.size() + numStaticMetaData;
- data->mKeys = new aiString[ data->mNumProperties ]();
- data->mValues = new aiMetadataEntry[ data->mNumProperties ]();
+ const std::size_t numStaticMetaData = 2;
+ aiMetadata* data = aiMetadata::Alloc( static_cast<unsigned int>(unparsedProperties.size() + numStaticMetaData) );
nd.mMetaData = data;
int index = 0;
@@ -1089,22 +1104,22 @@ void Converter::SetupNodeMetadata( const Model& model, aiNode& nd )
// add unparsed properties to the node's metadata
for( const DirectPropertyMap::value_type& prop : unparsedProperties ) {
-
// Interpret the property as a concrete type
- if ( const TypedProperty<bool>* interpreted = prop.second->As<TypedProperty<bool> >() )
+ if ( const TypedProperty<bool>* interpreted = prop.second->As<TypedProperty<bool> >() ) {
data->Set( index++, prop.first, interpreted->Value() );
- else if ( const TypedProperty<int>* interpreted = prop.second->As<TypedProperty<int> >() )
+ } else if ( const TypedProperty<int>* interpreted = prop.second->As<TypedProperty<int> >() ) {
data->Set( index++, prop.first, interpreted->Value() );
- else if ( const TypedProperty<uint64_t>* interpreted = prop.second->As<TypedProperty<uint64_t> >() )
+ } else if ( const TypedProperty<uint64_t>* interpreted = prop.second->As<TypedProperty<uint64_t> >() ) {
data->Set( index++, prop.first, interpreted->Value() );
- else if ( const TypedProperty<float>* interpreted = prop.second->As<TypedProperty<float> >() )
+ } else if ( const TypedProperty<float>* interpreted = prop.second->As<TypedProperty<float> >() ) {
data->Set( index++, prop.first, interpreted->Value() );
- else if ( const TypedProperty<std::string>* interpreted = prop.second->As<TypedProperty<std::string> >() )
+ } else if ( const TypedProperty<std::string>* interpreted = prop.second->As<TypedProperty<std::string> >() ) {
data->Set( index++, prop.first, aiString( interpreted->Value() ) );
- else if ( const TypedProperty<aiVector3D>* interpreted = prop.second->As<TypedProperty<aiVector3D> >() )
+ } else if ( const TypedProperty<aiVector3D>* interpreted = prop.second->As<TypedProperty<aiVector3D> >() ) {
data->Set( index++, prop.first, interpreted->Value() );
- else
- assert( false );
+ } else {
+ ai_assert( false );
+ }
}
}
@@ -1135,7 +1150,6 @@ void Converter::ConvertModel( const Model& model, aiNode& nd, const aiMatrix4x4&
}
}
-
std::vector<unsigned int> Converter::ConvertMesh( const MeshGeometry& mesh, const Model& model,
const aiMatrix4x4& node_global_transform )
{
@@ -1171,7 +1185,6 @@ std::vector<unsigned int> Converter::ConvertMesh( const MeshGeometry& mesh, cons
return temp;
}
-
aiMesh* Converter::SetupEmptyMesh( const MeshGeometry& mesh )
{
aiMesh* const out_mesh = new aiMesh();
@@ -1191,7 +1204,6 @@ aiMesh* Converter::SetupEmptyMesh( const MeshGeometry& mesh )
return out_mesh;
}
-
unsigned int Converter::ConvertMeshSingleMaterial( const MeshGeometry& mesh, const Model& model,
const aiMatrix4x4& node_global_transform )
{
@@ -1394,9 +1406,9 @@ unsigned int Converter::ConvertMeshMultiMaterial( const MeshGeometry& mesh, cons
// allocate tangents, binormals.
const std::vector<aiVector3D>& tangents = mesh.GetTangents();
const std::vector<aiVector3D>* binormals = &mesh.GetBinormals();
+ std::vector<aiVector3D> tempBinormals;
if ( tangents.size() ) {
- std::vector<aiVector3D> tempBinormals;
if ( !binormals->size() ) {
if ( normals.size() ) {
// XXX this computes the binormals for the entire mesh, not only
@@ -1514,7 +1526,6 @@ unsigned int Converter::ConvertMeshMultiMaterial( const MeshGeometry& mesh, cons
return static_cast<unsigned int>( meshes.size() - 1 );
}
-
void Converter::ConvertWeights( aiMesh* out, const Model& model, const MeshGeometry& geo,
const aiMatrix4x4& node_global_transform ,
unsigned int materialIndex,
@@ -1661,7 +1672,6 @@ void Converter::ConvertCluster( std::vector<aiBone*>& bones, const Model& /*mode
}
}
-
void Converter::ConvertMaterialForMesh( aiMesh* out, const Model& model, const MeshGeometry& geo,
MatIndexArray::value_type materialIndex )
{
@@ -1751,7 +1761,7 @@ unsigned int Converter::ConvertVideo( const Video& video )
out_tex->mWidth = static_cast<unsigned int>( video.ContentLength() ); // total data size
out_tex->mHeight = 0; // fixed to 0
- // steal the data from the Video to avoid an additional copy
+ // steal the data from the Video to avoid an additional copy
out_tex->pcData = reinterpret_cast<aiTexel*>( const_cast<Video&>( video ).RelinquishContent() );
// try to extract a hint from the file extension
@@ -1785,22 +1795,32 @@ void Converter::TrySetTextureProperties( aiMaterial* out_mat, const TextureMap&
path.Set( tex->RelativeFilename() );
const Video* media = tex->Media();
- if ( media != 0 && media->ContentLength() > 0 ) {
- unsigned int index;
-
- VideoMap::const_iterator it = textures_converted.find( media );
- if ( it != textures_converted.end() ) {
- index = ( *it ).second;
- }
- else {
- index = ConvertVideo( *media );
- textures_converted[ media ] = index;
- }
-
- // setup texture reference string (copied from ColladaLoader::FindFilenameForEffectTexture)
- path.data[ 0 ] = '*';
- path.length = 1 + ASSIMP_itoa10( path.data + 1, MAXLEN - 1, index );
- }
+ if (media != 0) {
+ bool textureReady = false; //tells if our texture is ready (if it was loaded or if it was found)
+ unsigned int index;
+
+ VideoMap::const_iterator it = textures_converted.find(media);
+ if (it != textures_converted.end()) {
+ index = (*it).second;
+ textureReady = true;
+ }
+ else {
+ if (media->ContentLength() > 0) {
+ index = ConvertVideo(*media);
+ textures_converted[media] = index;
+ textureReady = true;
+ }
+ else if (doc.Settings().searchEmbeddedTextures) { //try to find the texture on the already-loaded textures by the filename, if the flag is on
+ textureReady = FindTextureIndexByFilename(*media, index);
+ }
+ }
+
+ // setup texture reference string (copied from ColladaLoader::FindFilenameForEffectTexture), if the texture is ready
+ if (textureReady) {
+ path.data[0] = '*';
+ path.length = 1 + ASSIMP_itoa10(path.data + 1, MAXLEN - 1, index);
+ }
+ }
out_mat->AddProperty( &path, _AI_MATKEY_TEXTURE_BASE, target, 0 );
@@ -1916,57 +1936,92 @@ void Converter::TrySetTextureProperties( aiMaterial* out_mat, const LayeredTextu
return;
}
- const Texture* const tex = ( *it ).second->getTexture();
+ int texCount = (*it).second->textureCount();
+
+ // Set the blend mode for layered textures
+ int blendmode= (*it).second->GetBlendMode();
+ out_mat->AddProperty(&blendmode,1,_AI_MATKEY_TEXOP_BASE,target,0);
+
+ for(int texIndex = 0; texIndex < texCount; texIndex++){
+
+ const Texture* const tex = ( *it ).second->getTexture(texIndex);
+
+ aiString path;
+ path.Set( tex->RelativeFilename() );
+
+ out_mat->AddProperty( &path, _AI_MATKEY_TEXTURE_BASE, target, texIndex );
- aiString path;
- path.Set( tex->RelativeFilename() );
+ aiUVTransform uvTrafo;
+ // XXX handle all kinds of UV transformations
+ uvTrafo.mScaling = tex->UVScaling();
+ uvTrafo.mTranslation = tex->UVTranslation();
+ out_mat->AddProperty( &uvTrafo, 1, _AI_MATKEY_UVTRANSFORM_BASE, target, texIndex );
- out_mat->AddProperty( &path, _AI_MATKEY_TEXTURE_BASE, target, 0 );
+ const PropertyTable& props = tex->Props();
- aiUVTransform uvTrafo;
- // XXX handle all kinds of UV transformations
- uvTrafo.mScaling = tex->UVScaling();
- uvTrafo.mTranslation = tex->UVTranslation();
- out_mat->AddProperty( &uvTrafo, 1, _AI_MATKEY_UVTRANSFORM_BASE, target, 0 );
+ int uvIndex = 0;
- const PropertyTable& props = tex->Props();
+ bool ok;
+ const std::string& uvSet = PropertyGet<std::string>( props, "UVSet", ok );
+ if ( ok ) {
+ // "default" is the name which usually appears in the FbxFileTexture template
+ if ( uvSet != "default" && uvSet.length() ) {
+ // this is a bit awkward - we need to find a mesh that uses this
+ // material and scan its UV channels for the given UV name because
+ // assimp references UV channels by index, not by name.
- int uvIndex = 0;
+ // XXX: the case that UV channels may appear in different orders
+ // in meshes is unhandled. A possible solution would be to sort
+ // the UV channels alphabetically, but this would have the side
+ // effect that the primary (first) UV channel would sometimes
+ // be moved, causing trouble when users read only the first
+ // UV channel and ignore UV channel assignments altogether.
- bool ok;
- const std::string& uvSet = PropertyGet<std::string>( props, "UVSet", ok );
- if ( ok ) {
- // "default" is the name which usually appears in the FbxFileTexture template
- if ( uvSet != "default" && uvSet.length() ) {
- // this is a bit awkward - we need to find a mesh that uses this
- // material and scan its UV channels for the given UV name because
- // assimp references UV channels by index, not by name.
-
- // XXX: the case that UV channels may appear in different orders
- // in meshes is unhandled. A possible solution would be to sort
- // the UV channels alphabetically, but this would have the side
- // effect that the primary (first) UV channel would sometimes
- // be moved, causing trouble when users read only the first
- // UV channel and ignore UV channel assignments altogether.
-
- const unsigned int matIndex = static_cast<unsigned int>( std::distance( materials.begin(),
- std::find( materials.begin(), materials.end(), out_mat )
- ) );
-
- uvIndex = -1;
- if ( !mesh )
- {
- for( const MeshMap::value_type& v : meshes_converted ) {
- const MeshGeometry* const mesh = dynamic_cast<const MeshGeometry*> ( v.first );
- if ( !mesh ) {
- continue;
- }
+ const unsigned int matIndex = static_cast<unsigned int>( std::distance( materials.begin(),
+ std::find( materials.begin(), materials.end(), out_mat )
+ ) );
- const MatIndexArray& mats = mesh->GetMaterialIndices();
- if ( std::find( mats.begin(), mats.end(), matIndex ) == mats.end() ) {
- continue;
- }
+ uvIndex = -1;
+ if ( !mesh )
+ {
+ for( const MeshMap::value_type& v : meshes_converted ) {
+ const MeshGeometry* const mesh = dynamic_cast<const MeshGeometry*> ( v.first );
+ if ( !mesh ) {
+ continue;
+ }
+
+ const MatIndexArray& mats = mesh->GetMaterialIndices();
+ if ( std::find( mats.begin(), mats.end(), matIndex ) == mats.end() ) {
+ continue;
+ }
+
+ int index = -1;
+ for ( unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i ) {
+ if ( mesh->GetTextureCoords( i ).empty() ) {
+ break;
+ }
+ const std::string& name = mesh->GetTextureCoordChannelName( i );
+ if ( name == uvSet ) {
+ index = static_cast<int>( i );
+ break;
+ }
+ }
+ if ( index == -1 ) {
+ FBXImporter::LogWarn( "did not find UV channel named " + uvSet + " in a mesh using this material" );
+ continue;
+ }
+ if ( uvIndex == -1 ) {
+ uvIndex = index;
+ }
+ else {
+ FBXImporter::LogWarn( "the UV channel named " + uvSet +
+ " appears at different positions in meshes, results will be wrong" );
+ }
+ }
+ }
+ else
+ {
int index = -1;
for ( unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i ) {
if ( mesh->GetTextureCoords( i ).empty() ) {
@@ -1980,48 +2035,22 @@ void Converter::TrySetTextureProperties( aiMaterial* out_mat, const LayeredTextu
}
if ( index == -1 ) {
FBXImporter::LogWarn( "did not find UV channel named " + uvSet + " in a mesh using this material" );
- continue;
}
if ( uvIndex == -1 ) {
uvIndex = index;
}
- else {
- FBXImporter::LogWarn( "the UV channel named " + uvSet +
- " appears at different positions in meshes, results will be wrong" );
- }
- }
- }
- else
- {
- int index = -1;
- for ( unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i ) {
- if ( mesh->GetTextureCoords( i ).empty() ) {
- break;
- }
- const std::string& name = mesh->GetTextureCoordChannelName( i );
- if ( name == uvSet ) {
- index = static_cast<int>( i );
- break;
- }
- }
- if ( index == -1 ) {
- FBXImporter::LogWarn( "did not find UV channel named " + uvSet + " in a mesh using this material" );
}
if ( uvIndex == -1 ) {
- uvIndex = index;
+ FBXImporter::LogWarn( "failed to resolve UV channel " + uvSet + ", using first UV channel" );
+ uvIndex = 0;
}
}
-
- if ( uvIndex == -1 ) {
- FBXImporter::LogWarn( "failed to resolve UV channel " + uvSet + ", using first UV channel" );
- uvIndex = 0;
- }
}
- }
- out_mat->AddProperty( &uvIndex, 1, _AI_MATKEY_UVWSRC_BASE, target, 0 );
+ out_mat->AddProperty( &uvIndex, 1, _AI_MATKEY_UVWSRC_BASE, target, texIndex );
+ }
}
void Converter::SetTextureProperties( aiMaterial* out_mat, const TextureMap& textures, const MeshGeometry* const mesh )
@@ -2030,6 +2059,7 @@ void Converter::SetTextureProperties( aiMaterial* out_mat, const TextureMap& tex
TrySetTextureProperties( out_mat, textures, "AmbientColor", aiTextureType_AMBIENT, mesh );
TrySetTextureProperties( out_mat, textures, "EmissiveColor", aiTextureType_EMISSIVE, mesh );
TrySetTextureProperties( out_mat, textures, "SpecularColor", aiTextureType_SPECULAR, mesh );
+ TrySetTextureProperties( out_mat, textures, "SpecularFactor", aiTextureType_SPECULAR, mesh);
TrySetTextureProperties( out_mat, textures, "TransparentColor", aiTextureType_OPACITY, mesh );
TrySetTextureProperties( out_mat, textures, "ReflectionColor", aiTextureType_REFLECTION, mesh );
TrySetTextureProperties( out_mat, textures, "DisplacementColor", aiTextureType_DISPLACEMENT, mesh );
@@ -2044,6 +2074,7 @@ void Converter::SetTextureProperties( aiMaterial* out_mat, const LayeredTextureM
TrySetTextureProperties( out_mat, layeredTextures, "AmbientColor", aiTextureType_AMBIENT, mesh );
TrySetTextureProperties( out_mat, layeredTextures, "EmissiveColor", aiTextureType_EMISSIVE, mesh );
TrySetTextureProperties( out_mat, layeredTextures, "SpecularColor", aiTextureType_SPECULAR, mesh );
+ TrySetTextureProperties( out_mat, layeredTextures, "SpecularFactor", aiTextureType_SPECULAR, mesh);
TrySetTextureProperties( out_mat, layeredTextures, "TransparentColor", aiTextureType_OPACITY, mesh );
TrySetTextureProperties( out_mat, layeredTextures, "ReflectionColor", aiTextureType_REFLECTION, mesh );
TrySetTextureProperties( out_mat, layeredTextures, "DisplacementColor", aiTextureType_DISPLACEMENT, mesh );
@@ -2125,6 +2156,16 @@ void Converter::SetShadingPropertiesCommon( aiMaterial* out_mat, const PropertyT
if ( ok ) {
out_mat->AddProperty( &ShininessExponent, 1, AI_MATKEY_SHININESS );
}
+
+ const float BumpFactor = PropertyGet<float>(props, "BumpFactor", ok);
+ if (ok) {
+ out_mat->AddProperty(&BumpFactor, 1, AI_MATKEY_BUMPSCALING);
+ }
+
+ const float DispFactor = PropertyGet<float>(props, "DisplacementFactor", ok);
+ if (ok) {
+ out_mat->AddProperty(&DispFactor, 1, "$mat.displacementscaling", 0, 0);
+ }
}
@@ -2194,9 +2235,17 @@ void Converter::ConvertAnimations()
}
}
+void Converter::RenameNode( const std::string& fixed_name, const std::string& new_name ) {
+ if ( node_names.find( fixed_name ) == node_names.end() ) {
+ FBXImporter::LogError( "Cannot rename node " + fixed_name + ", not existing.");
+ return;
+ }
+
+ if ( node_names.find( new_name ) != node_names.end() ) {
+ FBXImporter::LogError( "Cannot rename node " + fixed_name + " to " + new_name +", name already existing." );
+ return;
+ }
-void Converter::RenameNode( const std::string& fixed_name, const std::string& new_name )
-{
ai_assert( node_names.find( fixed_name ) != node_names.end() );
ai_assert( node_names.find( new_name ) == node_names.end() );
@@ -2326,8 +2375,13 @@ void Converter::ConvertAnimationStack( const AnimationStack& st )
int64_t start_time = st.LocalStart();
int64_t stop_time = st.LocalStop();
- double start_timeF = CONVERT_FBX_TIME( start_time );
- double stop_timeF = CONVERT_FBX_TIME( stop_time );
+ bool has_local_startstop = start_time != 0 || stop_time != 0;
+ if ( !has_local_startstop ) {
+ // no time range given, so accept every keyframe and use the actual min/max time
+ // the numbers are INT64_MIN/MAX, the 20000 is for safety because GenerateNodeAnimations uses an epsilon of 10000
+ start_time = -9223372036854775807ll + 20000;
+ stop_time = 9223372036854775807ll - 20000;
+ }
try {
for( const NodeMap::value_type& kv : node_map ) {
@@ -2359,30 +2413,27 @@ void Converter::ConvertAnimationStack( const AnimationStack& st )
return;
}
- //adjust relative timing for animation
- {
- double start_fps = start_timeF * anim_fps;
-
- for ( unsigned int c = 0; c < anim->mNumChannels; c++ )
- {
- aiNodeAnim* channel = anim->mChannels[ c ];
- for ( uint32_t i = 0; i < channel->mNumPositionKeys; i++ )
- channel->mPositionKeys[ i ].mTime -= start_fps;
- for ( uint32_t i = 0; i < channel->mNumRotationKeys; i++ )
- channel->mRotationKeys[ i ].mTime -= start_fps;
- for ( uint32_t i = 0; i < channel->mNumScalingKeys; i++ )
- channel->mScalingKeys[ i ].mTime -= start_fps;
- }
+ double start_time_fps = has_local_startstop ? (CONVERT_FBX_TIME(start_time) * anim_fps) : min_time;
+ double stop_time_fps = has_local_startstop ? (CONVERT_FBX_TIME(stop_time) * anim_fps) : max_time;
- max_time -= min_time;
+ // adjust relative timing for animation
+ for ( unsigned int c = 0; c < anim->mNumChannels; c++ ) {
+ aiNodeAnim* channel = anim->mChannels[ c ];
+ for ( uint32_t i = 0; i < channel->mNumPositionKeys; i++ )
+ channel->mPositionKeys[ i ].mTime -= start_time_fps;
+ for ( uint32_t i = 0; i < channel->mNumRotationKeys; i++ )
+ channel->mRotationKeys[ i ].mTime -= start_time_fps;
+ for ( uint32_t i = 0; i < channel->mNumScalingKeys; i++ )
+ channel->mScalingKeys[ i ].mTime -= start_time_fps;
}
// for some mysterious reason, mDuration is simply the maximum key -- the
// validator always assumes animations to start at zero.
- anim->mDuration = ( stop_timeF - start_timeF ) * anim_fps;
+ anim->mDuration = stop_time_fps - start_time_fps;
anim->mTicksPerSecond = anim_fps;
}
+#ifdef ASSIMP_BUILD_DEBUG
// ------------------------------------------------------------------------------------------------
// sanity check whether the input is ok
static void validateAnimCurveNodes( const std::vector<const AnimationCurveNode*>& curves,
@@ -2400,6 +2451,7 @@ static void validateAnimCurveNodes( const std::vector<const AnimationCurveNode*>
}
}
}
+#endif // ASSIMP_BUILD_DEBUG
// ------------------------------------------------------------------------------------------------
void Converter::GenerateNodeAnimations( std::vector<aiNodeAnim*>& node_anims,
@@ -2691,10 +2743,10 @@ aiNodeAnim* Converter::GenerateRotationNodeAnim( const std::string& name,
double& max_time,
double& min_time )
{
- ScopeGuard<aiNodeAnim> na( new aiNodeAnim() );
+ std::unique_ptr<aiNodeAnim> na( new aiNodeAnim() );
na->mNodeName.Set( name );
- ConvertRotationKeys( na, curves, layer_map, start, stop, max_time, min_time, target.RotationOrder() );
+ ConvertRotationKeys( na.get(), curves, layer_map, start, stop, max_time, min_time, target.RotationOrder() );
// dummy scaling key
na->mScalingKeys = new aiVectorKey[ 1 ];
@@ -2710,7 +2762,7 @@ aiNodeAnim* Converter::GenerateRotationNodeAnim( const std::string& name,
na->mPositionKeys[ 0 ].mTime = 0.;
na->mPositionKeys[ 0 ].mValue = aiVector3D();
- return na.dismiss();
+ return na.release();
}
aiNodeAnim* Converter::GenerateScalingNodeAnim( const std::string& name,
@@ -2721,10 +2773,10 @@ aiNodeAnim* Converter::GenerateScalingNodeAnim( const std::string& name,
double& max_time,
double& min_time )
{
- ScopeGuard<aiNodeAnim> na( new aiNodeAnim() );
+ std::unique_ptr<aiNodeAnim> na( new aiNodeAnim() );
na->mNodeName.Set( name );
- ConvertScaleKeys( na, curves, layer_map, start, stop, max_time, min_time );
+ ConvertScaleKeys( na.get(), curves, layer_map, start, stop, max_time, min_time );
// dummy rotation key
na->mRotationKeys = new aiQuatKey[ 1 ];
@@ -2740,7 +2792,7 @@ aiNodeAnim* Converter::GenerateScalingNodeAnim( const std::string& name,
na->mPositionKeys[ 0 ].mTime = 0.;
na->mPositionKeys[ 0 ].mValue = aiVector3D();
- return na.dismiss();
+ return na.release();
}
@@ -2753,10 +2805,10 @@ aiNodeAnim* Converter::GenerateTranslationNodeAnim( const std::string& name,
double& min_time,
bool inverse )
{
- ScopeGuard<aiNodeAnim> na( new aiNodeAnim() );
+ std::unique_ptr<aiNodeAnim> na( new aiNodeAnim() );
na->mNodeName.Set( name );
- ConvertTranslationKeys( na, curves, layer_map, start, stop, max_time, min_time );
+ ConvertTranslationKeys( na.get(), curves, layer_map, start, stop, max_time, min_time );
if ( inverse ) {
for ( unsigned int i = 0; i < na->mNumPositionKeys; ++i ) {
@@ -2778,7 +2830,7 @@ aiNodeAnim* Converter::GenerateTranslationNodeAnim( const std::string& name,
na->mRotationKeys[ 0 ].mTime = 0.;
na->mRotationKeys[ 0 ].mValue = aiQuaternion();
- return na.dismiss();
+ return na.release();
}
aiNodeAnim* Converter::GenerateSimpleNodeAnim( const std::string& name,
@@ -2792,7 +2844,7 @@ aiNodeAnim* Converter::GenerateSimpleNodeAnim( const std::string& name,
bool reverse_order )
{
- ScopeGuard<aiNodeAnim> na( new aiNodeAnim() );
+ std::unique_ptr<aiNodeAnim> na( new aiNodeAnim() );
na->mNodeName.Set( name );
const PropertyTable& props = target.Props();
@@ -2864,7 +2916,7 @@ aiNodeAnim* Converter::GenerateSimpleNodeAnim( const std::string& name,
// which requires all of rotation, scaling and translation
// to be set.
if ( chain[ TransformationComp_Scaling ] != iter_end ) {
- ConvertScaleKeys( na, ( *chain[ TransformationComp_Scaling ] ).second,
+ ConvertScaleKeys( na.get(), ( *chain[ TransformationComp_Scaling ] ).second,
layer_map,
start, stop,
max_time,
@@ -2880,7 +2932,7 @@ aiNodeAnim* Converter::GenerateSimpleNodeAnim( const std::string& name,
}
if ( chain[ TransformationComp_Rotation ] != iter_end ) {
- ConvertRotationKeys( na, ( *chain[ TransformationComp_Rotation ] ).second,
+ ConvertRotationKeys( na.get(), ( *chain[ TransformationComp_Rotation ] ).second,
layer_map,
start, stop,
max_time,
@@ -2898,7 +2950,7 @@ aiNodeAnim* Converter::GenerateSimpleNodeAnim( const std::string& name,
}
if ( chain[ TransformationComp_Translation ] != iter_end ) {
- ConvertTranslationKeys( na, ( *chain[ TransformationComp_Translation ] ).second,
+ ConvertTranslationKeys( na.get(), ( *chain[ TransformationComp_Translation ] ).second,
layer_map,
start, stop,
max_time,
@@ -2914,7 +2966,7 @@ aiNodeAnim* Converter::GenerateSimpleNodeAnim( const std::string& name,
}
}
- return na.dismiss();
+ return na.release();
}
Converter::KeyFrameListList Converter::GetKeyframeList( const std::vector<const AnimationCurveNode*>& nodes, int64_t start, int64_t stop )
@@ -2953,10 +3005,10 @@ Converter::KeyFrameListList Converter::GetKeyframeList( const std::vector<const
//get values within the start/stop time window
std::shared_ptr<KeyTimeList> Keys( new KeyTimeList() );
std::shared_ptr<KeyValueList> Values( new KeyValueList() );
- const int count = curve->GetKeys().size();
+ const size_t count = curve->GetKeys().size();
Keys->reserve( count );
Values->reserve( count );
- for ( int n = 0; n < count; n++ )
+ for (size_t n = 0; n < count; n++ )
{
int64_t k = curve->GetKeys().at( n );
if ( k >= adj_start && k <= adj_stop )
@@ -3037,7 +3089,7 @@ void Converter::InterpolateKeys( aiVectorKey* valOut, const KeyTimeList& keys, c
next_pos.resize( inputs.size(), 0 );
for( KeyTimeList::value_type time : keys ) {
- float result[ 3 ] = { def_value.x, def_value.y, def_value.z };
+ ai_real result[ 3 ] = { def_value.x, def_value.y, def_value.z };
for ( size_t i = 0; i < count; ++i ) {
const KeyFrameList& kfl = inputs[ i ];
@@ -3057,10 +3109,8 @@ void Converter::InterpolateKeys( aiVectorKey* valOut, const KeyTimeList& keys, c
const KeyTimeList::value_type timeA = std::get<0>(kfl)->at( id0 );
const KeyTimeList::value_type timeB = std::get<0>(kfl)->at( id1 );
- // do the actual interpolation in double-precision arithmetics
- // because it is a bit sensitive to rounding errors.
- const double factor = timeB == timeA ? 0. : static_cast<double>( ( time - timeA ) / ( timeB - timeA ) );
- const float interpValue = static_cast<float>( valueA + ( valueB - valueA ) * factor );
+ const ai_real factor = timeB == timeA ? ai_real(0.) : static_cast<ai_real>( ( time - timeA ) ) / ( timeB - timeA );
+ const ai_real interpValue = static_cast<ai_real>( valueA + ( valueB - valueA ) * factor );
result[ std::get<2>(kfl) ] = interpValue;
}
@@ -3079,7 +3129,6 @@ void Converter::InterpolateKeys( aiVectorKey* valOut, const KeyTimeList& keys, c
}
}
-
void Converter::InterpolateKeys( aiQuatKey* valOut, const KeyTimeList& keys, const KeyFrameListList& inputs,
const aiVector3D& def_value,
double& maxTime,
@@ -3100,7 +3149,6 @@ void Converter::InterpolateKeys( aiQuatKey* valOut, const KeyTimeList& keys, con
valOut[ i ].mTime = temp[ i ].mTime;
-
GetRotationMatrix( order, temp[ i ].mValue, m );
aiQuaternion quat = aiQuaternion( aiMatrix3x3( m ) );
@@ -3119,7 +3167,6 @@ void Converter::InterpolateKeys( aiQuatKey* valOut, const KeyTimeList& keys, con
}
}
-
void Converter::ConvertTransformOrder_TRStoSRT( aiQuatKey* out_quat, aiVectorKey* out_scale,
aiVectorKey* out_translation,
const KeyFrameListList& scaling,
@@ -3178,7 +3225,6 @@ void Converter::ConvertTransformOrder_TRStoSRT( aiQuatKey* out_quat, aiVectorKey
}
}
-
aiQuaternion Converter::EulerToQuaternion( const aiVector3D& rot, Model::RotOrder order )
{
aiMatrix4x4 m;
@@ -3187,7 +3233,6 @@ aiQuaternion Converter::EulerToQuaternion( const aiVector3D& rot, Model::RotOrde
return aiQuaternion( aiMatrix3x3( m ) );
}
-
void Converter::ConvertScaleKeys( aiNodeAnim* na, const std::vector<const AnimationCurveNode*>& nodes, const LayerMap& /*layers*/,
int64_t start, int64_t stop,
double& maxTime,
@@ -3208,7 +3253,6 @@ void Converter::ConvertScaleKeys( aiNodeAnim* na, const std::vector<const Animat
InterpolateKeys( na->mScalingKeys, keys, inputs, aiVector3D( 1.0f, 1.0f, 1.0f ), maxTime, minTime );
}
-
void Converter::ConvertTranslationKeys( aiNodeAnim* na, const std::vector<const AnimationCurveNode*>& nodes,
const LayerMap& /*layers*/,
int64_t start, int64_t stop,
@@ -3227,7 +3271,6 @@ void Converter::ConvertTranslationKeys( aiNodeAnim* na, const std::vector<const
InterpolateKeys( na->mPositionKeys, keys, inputs, aiVector3D( 0.0f, 0.0f, 0.0f ), maxTime, minTime );
}
-
void Converter::ConvertRotationKeys( aiNodeAnim* na, const std::vector<const AnimationCurveNode*>& nodes,
const LayerMap& /*layers*/,
int64_t start, int64_t stop,
@@ -3249,7 +3292,8 @@ void Converter::ConvertRotationKeys( aiNodeAnim* na, const std::vector<const Ani
void Converter::TransferDataToScene()
{
- ai_assert( !out->mMeshes && !out->mNumMeshes );
+ ai_assert( !out->mMeshes );
+ ai_assert( !out->mNumMeshes );
// note: the trailing () ensures initialization with NULL - not
// many C++ users seem to know this, so pointing it out to avoid