WIP change BFS to DFS for rig building and writing
This commit is contained in:
parent
d590a3894f
commit
89338acbea
|
@ -17,7 +17,7 @@
|
|||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
#include <queue>
|
||||
#include <stack>
|
||||
|
||||
namespace SH_COMP
|
||||
{
|
||||
|
@ -138,7 +138,7 @@ namespace SH_COMP
|
|||
}
|
||||
}
|
||||
|
||||
void MeshCompiler::BuildHeaders(ModelAsset& asset) noexcept
|
||||
void MeshCompiler::BuildHeaders(ModelRef asset) noexcept
|
||||
{
|
||||
// Mesh Headers
|
||||
asset.meshHeaders.resize(asset.meshes.size());
|
||||
|
@ -254,19 +254,19 @@ namespace SH_COMP
|
|||
return new RigNodeData(inNode->mName.C_Str(), aiTransformToMat4(inNode->mTransformation));
|
||||
}
|
||||
|
||||
void MeshCompiler::LoadFromFile(AssetPath path, ModelAsset& asset) noexcept
|
||||
void MeshCompiler::LoadFromFile(AssetPath path, ModelRef asset) noexcept
|
||||
{
|
||||
const aiScene* scene = aiImporter.ReadFile(path.string().c_str(),
|
||||
aiProcess_Triangulate // Make sure we get triangles rather than nvert polygons
|
||||
aiProcess_Triangulate // Make sure we get triangles rather than nvert polygons
|
||||
| aiProcess_GenUVCoords // Convert any type of mapping to uv mapping
|
||||
| aiProcess_TransformUVCoords // preprocess UV transformations (scaling, translation ...)
|
||||
| aiProcess_FindInstances // search for instanced meshes and remove them by references to one master
|
||||
| aiProcess_CalcTangentSpace // calculate tangents and bitangents if possible
|
||||
| aiProcess_JoinIdenticalVertices // join identical vertices/ optimize indexing
|
||||
| aiProcess_FindInvalidData // detect invalid model data, such as invalid normal vectors
|
||||
| aiProcess_FindInvalidData // detect invalid model data, such as invalid normal vector
|
||||
| aiProcess_FlipUVs // flip the V to match the Vulkans way of doing UVs
|
||||
| aiProcess_ValidateDataStructure // checks all bones, animations and vertices are linked correctly
|
||||
| aiProcess_LimitBoneWeights // Limit number of bones effect vertices to 4
|
||||
//| aiProcess_LimitBoneWeights // Limit number of bones effect vertices to 4
|
||||
);
|
||||
|
||||
if (!scene || !scene->HasMeshes())
|
||||
|
@ -286,25 +286,25 @@ namespace SH_COMP
|
|||
{
|
||||
// Build implementation copy of armature tree
|
||||
// node collection write done later when writing to file
|
||||
std::queue<std::pair<RigNodeData*, AiNodeConstPtr>> nodeQueue;
|
||||
std::stack<std::pair<RigNodeData*, AiNodeConstPtr>> nodeStack;
|
||||
|
||||
nodeQueue.emplace(PairHelper(baseNode));
|
||||
rig.root = nodeQueue.front().first;
|
||||
nodeStack.emplace(PairHelper(baseNode));
|
||||
rig.root = nodeStack.top().first;
|
||||
rig.header.nodeCount++;
|
||||
rig.header.charCounts.push_back(rig.root->name.length());
|
||||
|
||||
while(!nodeQueue.empty())
|
||||
while(!nodeStack.empty())
|
||||
{
|
||||
auto currPair = nodeQueue.front();
|
||||
nodeQueue.pop();
|
||||
auto currPair = nodeStack.top();
|
||||
nodeStack.pop();
|
||||
auto currNode = currPair.first;
|
||||
auto const& currAiNode = currPair.second;
|
||||
|
||||
for (auto i {0}; i < currAiNode->mNumChildren; ++i)
|
||||
int const iStart {static_cast<int>(currAiNode->mNumChildren - 1)};
|
||||
for (int i {iStart}; i >= 0 ; --i)
|
||||
{
|
||||
auto newPair = PairHelper(currAiNode->mChildren[i]);
|
||||
currNode->children.push_back(newPair.first);
|
||||
nodeQueue.push(newPair);
|
||||
nodeStack.push(newPair);
|
||||
|
||||
rig.header.nodeCount++;
|
||||
rig.header.charCounts.push_back(newPair.first->name.length());
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include "MeshWriter.h"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
#include <stack>
|
||||
|
||||
namespace SH_COMP
|
||||
{
|
||||
|
@ -184,26 +184,27 @@ namespace SH_COMP
|
|||
std::vector<RigNodeDataWrite> dataToWrite;
|
||||
dataToWrite.reserve(rig.header.nodeCount);
|
||||
|
||||
std::queue<std::pair<RigWriteNode*, RigNodeData*>> nodeQueue;
|
||||
std::stack<std::pair<RigWriteNode*, RigNodeData*>> nodeStack;
|
||||
treeRoot = new RigWriteNode;
|
||||
treeRoot->id = 0;
|
||||
treeRoot->children.clear();
|
||||
nodeQueue.emplace(std::make_pair(treeRoot, rig.root));
|
||||
nodeStack.emplace(std::make_pair(treeRoot, rig.root));
|
||||
dataToWrite.emplace_back(rig.root->name, rig.root->transform);
|
||||
|
||||
while(!nodeQueue.empty())
|
||||
while(!nodeStack.empty())
|
||||
{
|
||||
auto currPair = nodeQueue.front();
|
||||
nodeQueue.pop();
|
||||
auto currPair = nodeStack.top();
|
||||
nodeStack.pop();
|
||||
auto currWriteNode = currPair.first;
|
||||
auto currDataNode = currPair.second;
|
||||
|
||||
for (auto child : currDataNode->children)
|
||||
for (int i {static_cast<int>(currDataNode->children.size() - 1)}; i >= 0; --i)
|
||||
{
|
||||
auto child = currDataNode->children[i];
|
||||
auto newPair = std::make_pair(new RigWriteNode(), child);
|
||||
newPair.first->id = dataToWrite.size();
|
||||
currWriteNode->children.push_back(newPair.first);
|
||||
nodeQueue.push(newPair);
|
||||
nodeStack.push(newPair);
|
||||
|
||||
dataToWrite.emplace_back(child->name, child->transform);
|
||||
}
|
||||
|
@ -223,13 +224,13 @@ namespace SH_COMP
|
|||
|
||||
void MeshWriter::WriteRigTree(FileReference file, RigWriteNode const* root)
|
||||
{
|
||||
std::queue<RigWriteNode const*> nodeQueue;
|
||||
nodeQueue.push(root);
|
||||
std::stack<RigWriteNode const*> nodeStack;
|
||||
nodeStack.push(root);
|
||||
|
||||
while(!nodeQueue.empty())
|
||||
while(!nodeStack.empty())
|
||||
{
|
||||
auto node = nodeQueue.front();
|
||||
nodeQueue.pop();
|
||||
auto node = nodeStack.top();
|
||||
nodeStack.pop();
|
||||
|
||||
file.write(
|
||||
reinterpret_cast<char const*>(&node->id),
|
||||
|
@ -245,7 +246,7 @@ namespace SH_COMP
|
|||
|
||||
for (auto child : node->children)
|
||||
{
|
||||
nodeQueue.push(child);
|
||||
nodeStack.push(child);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue