WIP change BFS to DFS for rig building and writing

This commit is contained in:
Xiao Qi 2023-01-17 19:45:20 +08:00
parent d590a3894f
commit 89338acbea
2 changed files with 30 additions and 29 deletions

View File

@ -17,7 +17,7 @@
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <queue> #include <stack>
namespace SH_COMP namespace SH_COMP
{ {
@ -138,7 +138,7 @@ namespace SH_COMP
} }
} }
void MeshCompiler::BuildHeaders(ModelAsset& asset) noexcept void MeshCompiler::BuildHeaders(ModelRef asset) noexcept
{ {
// Mesh Headers // Mesh Headers
asset.meshHeaders.resize(asset.meshes.size()); asset.meshHeaders.resize(asset.meshes.size());
@ -254,19 +254,19 @@ namespace SH_COMP
return new RigNodeData(inNode->mName.C_Str(), aiTransformToMat4(inNode->mTransformation)); return new RigNodeData(inNode->mName.C_Str(), aiTransformToMat4(inNode->mTransformation));
} }
void MeshCompiler::LoadFromFile(AssetPath path, ModelAsset& asset) noexcept void MeshCompiler::LoadFromFile(AssetPath path, ModelRef asset) noexcept
{ {
const aiScene* scene = aiImporter.ReadFile(path.string().c_str(), const aiScene* scene = aiImporter.ReadFile(path.string().c_str(),
aiProcess_Triangulate // Make sure we get triangles rather than nvert polygons aiProcess_Triangulate // Make sure we get triangles rather than nvert polygons
| aiProcess_GenUVCoords // Convert any type of mapping to uv mapping | aiProcess_GenUVCoords // Convert any type of mapping to uv mapping
| aiProcess_TransformUVCoords // preprocess UV transformations (scaling, translation ...) | aiProcess_TransformUVCoords // preprocess UV transformations (scaling, translation ...)
| aiProcess_FindInstances // search for instanced meshes and remove them by references to one master | aiProcess_FindInstances // search for instanced meshes and remove them by references to one master
| aiProcess_CalcTangentSpace // calculate tangents and bitangents if possible | aiProcess_CalcTangentSpace // calculate tangents and bitangents if possible
| aiProcess_JoinIdenticalVertices // join identical vertices/ optimize indexing | aiProcess_JoinIdenticalVertices // join identical vertices/ optimize indexing
| aiProcess_FindInvalidData // detect invalid model data, such as invalid normal vectors | aiProcess_FindInvalidData // detect invalid model data, such as invalid normal vector
| aiProcess_FlipUVs // flip the V to match the Vulkans way of doing UVs | aiProcess_FlipUVs // flip the V to match the Vulkans way of doing UVs
| aiProcess_ValidateDataStructure // checks all bones, animations and vertices are linked correctly | aiProcess_ValidateDataStructure // checks all bones, animations and vertices are linked correctly
| aiProcess_LimitBoneWeights // Limit number of bones effect vertices to 4 //| aiProcess_LimitBoneWeights // Limit number of bones effect vertices to 4
); );
if (!scene || !scene->HasMeshes()) if (!scene || !scene->HasMeshes())
@ -286,25 +286,25 @@ namespace SH_COMP
{ {
// Build implementation copy of armature tree // Build implementation copy of armature tree
// node collection write done later when writing to file // node collection write done later when writing to file
std::queue<std::pair<RigNodeData*, AiNodeConstPtr>> nodeQueue; std::stack<std::pair<RigNodeData*, AiNodeConstPtr>> nodeStack;
nodeQueue.emplace(PairHelper(baseNode)); nodeStack.emplace(PairHelper(baseNode));
rig.root = nodeQueue.front().first; rig.root = nodeStack.top().first;
rig.header.nodeCount++; rig.header.nodeCount++;
rig.header.charCounts.push_back(rig.root->name.length()); rig.header.charCounts.push_back(rig.root->name.length());
while(!nodeQueue.empty()) while(!nodeStack.empty())
{ {
auto currPair = nodeQueue.front(); auto currPair = nodeStack.top();
nodeQueue.pop(); nodeStack.pop();
auto currNode = currPair.first; auto currNode = currPair.first;
auto const& currAiNode = currPair.second; auto const& currAiNode = currPair.second;
int const iStart {static_cast<int>(currAiNode->mNumChildren - 1)};
for (auto i {0}; i < currAiNode->mNumChildren; ++i) for (int i {iStart}; i >= 0 ; --i)
{ {
auto newPair = PairHelper(currAiNode->mChildren[i]); auto newPair = PairHelper(currAiNode->mChildren[i]);
currNode->children.push_back(newPair.first); currNode->children.push_back(newPair.first);
nodeQueue.push(newPair); nodeStack.push(newPair);
rig.header.nodeCount++; rig.header.nodeCount++;
rig.header.charCounts.push_back(newPair.first->name.length()); rig.header.charCounts.push_back(newPair.first->name.length());

View File

@ -11,7 +11,7 @@
#include "MeshWriter.h" #include "MeshWriter.h"
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <queue> #include <stack>
namespace SH_COMP namespace SH_COMP
{ {
@ -184,26 +184,27 @@ namespace SH_COMP
std::vector<RigNodeDataWrite> dataToWrite; std::vector<RigNodeDataWrite> dataToWrite;
dataToWrite.reserve(rig.header.nodeCount); dataToWrite.reserve(rig.header.nodeCount);
std::queue<std::pair<RigWriteNode*, RigNodeData*>> nodeQueue; std::stack<std::pair<RigWriteNode*, RigNodeData*>> nodeStack;
treeRoot = new RigWriteNode; treeRoot = new RigWriteNode;
treeRoot->id = 0; treeRoot->id = 0;
treeRoot->children.clear(); treeRoot->children.clear();
nodeQueue.emplace(std::make_pair(treeRoot, rig.root)); nodeStack.emplace(std::make_pair(treeRoot, rig.root));
dataToWrite.emplace_back(rig.root->name, rig.root->transform); dataToWrite.emplace_back(rig.root->name, rig.root->transform);
while(!nodeQueue.empty()) while(!nodeStack.empty())
{ {
auto currPair = nodeQueue.front(); auto currPair = nodeStack.top();
nodeQueue.pop(); nodeStack.pop();
auto currWriteNode = currPair.first; auto currWriteNode = currPair.first;
auto currDataNode = currPair.second; auto currDataNode = currPair.second;
for (auto child : currDataNode->children) for (int i {static_cast<int>(currDataNode->children.size() - 1)}; i >= 0; --i)
{ {
auto child = currDataNode->children[i];
auto newPair = std::make_pair(new RigWriteNode(), child); auto newPair = std::make_pair(new RigWriteNode(), child);
newPair.first->id = dataToWrite.size(); newPair.first->id = dataToWrite.size();
currWriteNode->children.push_back(newPair.first); currWriteNode->children.push_back(newPair.first);
nodeQueue.push(newPair); nodeStack.push(newPair);
dataToWrite.emplace_back(child->name, child->transform); dataToWrite.emplace_back(child->name, child->transform);
} }
@ -223,13 +224,13 @@ namespace SH_COMP
void MeshWriter::WriteRigTree(FileReference file, RigWriteNode const* root) void MeshWriter::WriteRigTree(FileReference file, RigWriteNode const* root)
{ {
std::queue<RigWriteNode const*> nodeQueue; std::stack<RigWriteNode const*> nodeStack;
nodeQueue.push(root); nodeStack.push(root);
while(!nodeQueue.empty()) while(!nodeStack.empty())
{ {
auto node = nodeQueue.front(); auto node = nodeStack.top();
nodeQueue.pop(); nodeStack.pop();
file.write( file.write(
reinterpret_cast<char const*>(&node->id), reinterpret_cast<char const*>(&node->id),
@ -245,7 +246,7 @@ namespace SH_COMP
for (auto child : node->children) for (auto child : node->children)
{ {
nodeQueue.push(child); nodeStack.push(child);
} }
} }
} }