mirror of
https://github.com/godotengine/godot.git
synced 2025-10-19 07:53:26 +00:00
Merge pull request #106465 from Chubercik/manifold-3.1.0
manifold: Update to 3.1.1
This commit is contained in:
commit
b6b8c32673
41 changed files with 3224 additions and 2725 deletions
|
@ -391,7 +391,7 @@ License: BSD-3-clause
|
|||
|
||||
Files: thirdparty/manifold/*
|
||||
Comment: Manifold
|
||||
Copyright: 2020-2024, The Manifold Authors
|
||||
Copyright: 2020-2025, The Manifold Authors
|
||||
License: Apache-2.0
|
||||
|
||||
Files: thirdparty/mbedtls/*
|
||||
|
|
|
@ -25,9 +25,11 @@ thirdparty_sources = [
|
|||
"src/polygon.cpp",
|
||||
"src/properties.cpp",
|
||||
"src/quickhull.cpp",
|
||||
"src/sdf.cpp",
|
||||
"src/smoothing.cpp",
|
||||
"src/sort.cpp",
|
||||
"src/subdivision.cpp",
|
||||
"src/tree2d.cpp",
|
||||
]
|
||||
|
||||
thirdparty_sources = [thirdparty_dir + file for file in thirdparty_sources]
|
||||
|
|
4
thirdparty/README.md
vendored
4
thirdparty/README.md
vendored
|
@ -624,12 +624,12 @@ See `linuxbsd_headers/README.md`.
|
|||
## manifold
|
||||
|
||||
- Upstream: https://github.com/elalish/manifold
|
||||
- Version: 3.0.1 (98b8142519d35c13e0e25cfa9fd6e3a271403be6, 2024)
|
||||
- Version: 3.1.1 (2f4741e0b1de44d6d461b869e481351335340b44, 2025)
|
||||
- License: Apache 2.0
|
||||
|
||||
File extracted from upstream source:
|
||||
|
||||
- `src/` and `include/`, except from `CMakeLists.txt`, `cross_section.cpp` and `meshIO.{cpp,h}`
|
||||
- `src/` and `include/`, except from `CMakeLists.txt`, `cross_section.h` and `meshIO.{cpp,h}`
|
||||
- `AUTHORS`, `LICENSE`
|
||||
|
||||
|
||||
|
|
59
thirdparty/manifold/include/manifold/common.h
vendored
59
thirdparty/manifold/include/manifold/common.h
vendored
|
@ -20,7 +20,8 @@
|
|||
#include <chrono>
|
||||
#endif
|
||||
|
||||
#include "manifold/linalg.h"
|
||||
#include "linalg.h"
|
||||
#include "optional_assert.h"
|
||||
|
||||
namespace manifold {
|
||||
/** @addtogroup Math
|
||||
|
@ -548,7 +549,7 @@ class Quality {
|
|||
int nSegL = 2.0 * radius * kPi / circularEdgeLength_;
|
||||
int nSeg = fmin(nSegA, nSegL) + 3;
|
||||
nSeg -= nSeg % 4;
|
||||
return std::max(nSeg, 3);
|
||||
return std::max(nSeg, 4);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -577,21 +578,62 @@ struct ExecutionParams {
|
|||
/// Perform extra sanity checks and assertions on the intermediate data
|
||||
/// structures.
|
||||
bool intermediateChecks = false;
|
||||
/// Verbose output primarily of the Boolean, including timing info and vector
|
||||
/// sizes.
|
||||
bool verbose = false;
|
||||
/// Perform 3D mesh self-intersection test on intermediate boolean results to
|
||||
/// test for ϵ-validity. For debug purposes only.
|
||||
bool selfIntersectionChecks = false;
|
||||
/// If processOverlaps is false, a geometric check will be performed to assert
|
||||
/// all triangles are CCW.
|
||||
bool processOverlaps = true;
|
||||
/// Suppresses printed errors regarding CW triangles. Has no effect if
|
||||
/// processOverlaps is true.
|
||||
bool suppressErrors = false;
|
||||
/// Perform optional but recommended triangle cleanups in SimplifyTopology()
|
||||
/// Deprecated! This value no longer has any effect, as cleanup now only
|
||||
/// occurs on intersected triangles.
|
||||
bool cleanupTriangles = true;
|
||||
/// Verbose level:
|
||||
/// - 0 for no verbose output
|
||||
/// - 1 for verbose output for the Boolean, including timing info and vector
|
||||
/// sizes.
|
||||
/// - 2 for verbose output with triangulator action as well.
|
||||
int verbose = 0;
|
||||
};
|
||||
/** @} */
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
template <class T>
|
||||
std::ostream& operator<<(std::ostream& out, const la::vec<T, 1>& v) {
|
||||
return out << '{' << v[0] << '}';
|
||||
}
|
||||
template <class T>
|
||||
std::ostream& operator<<(std::ostream& out, const la::vec<T, 2>& v) {
|
||||
return out << '{' << v[0] << ',' << v[1] << '}';
|
||||
}
|
||||
template <class T>
|
||||
std::ostream& operator<<(std::ostream& out, const la::vec<T, 3>& v) {
|
||||
return out << '{' << v[0] << ',' << v[1] << ',' << v[2] << '}';
|
||||
}
|
||||
template <class T>
|
||||
std::ostream& operator<<(std::ostream& out, const la::vec<T, 4>& v) {
|
||||
return out << '{' << v[0] << ',' << v[1] << ',' << v[2] << ',' << v[3] << '}';
|
||||
}
|
||||
|
||||
template <class T, int M>
|
||||
std::ostream& operator<<(std::ostream& out, const la::mat<T, M, 1>& m) {
|
||||
return out << '{' << m[0] << '}';
|
||||
}
|
||||
template <class T, int M>
|
||||
std::ostream& operator<<(std::ostream& out, const la::mat<T, M, 2>& m) {
|
||||
return out << '{' << m[0] << ',' << m[1] << '}';
|
||||
}
|
||||
template <class T, int M>
|
||||
std::ostream& operator<<(std::ostream& out, const la::mat<T, M, 3>& m) {
|
||||
return out << '{' << m[0] << ',' << m[1] << ',' << m[2] << '}';
|
||||
}
|
||||
template <class T, int M>
|
||||
std::ostream& operator<<(std::ostream& out, const la::mat<T, M, 4>& m) {
|
||||
return out << '{' << m[0] << ',' << m[1] << ',' << m[2] << ',' << m[3] << '}';
|
||||
}
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& stream, const Box& box) {
|
||||
return stream << "min: " << box.min << ", "
|
||||
<< "max: " << box.max;
|
||||
|
@ -602,6 +644,11 @@ inline std::ostream& operator<<(std::ostream& stream, const Rect& box) {
|
|||
<< "max: " << box.max;
|
||||
}
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& stream, const Smoothness& s) {
|
||||
return stream << "halfedge: " << s.halfedge << ", "
|
||||
<< "smoothness: " << s.smoothness;
|
||||
}
|
||||
|
||||
/**
|
||||
* Print the contents of this vector to standard output. Only exists if compiled
|
||||
* with MANIFOLD_DEBUG flag.
|
||||
|
|
741
thirdparty/manifold/include/manifold/linalg.h
vendored
741
thirdparty/manifold/include/manifold/linalg.h
vendored
File diff suppressed because it is too large
Load diff
148
thirdparty/manifold/include/manifold/manifold.h
vendored
148
thirdparty/manifold/include/manifold/manifold.h
vendored
|
@ -13,12 +13,9 @@
|
|||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
#include <cstdint> // uint32_t, uint64_t
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
#ifdef MANIFOLD_EXPORT
|
||||
#include <iostream>
|
||||
#endif
|
||||
#include <memory> // needed for shared_ptr
|
||||
|
||||
#include "manifold/common.h"
|
||||
#include "manifold/vec_view.h"
|
||||
|
@ -46,11 +43,71 @@ class CsgLeafNode;
|
|||
* @brief Mesh input/output suitable for pushing directly into graphics
|
||||
* libraries.
|
||||
*
|
||||
* This may not be manifold since the verts are duplicated along property
|
||||
* boundaries that do not match. The additional merge vectors store this missing
|
||||
* information, allowing the manifold to be reconstructed. MeshGL is an alias
|
||||
* for the standard single-precision version. Use MeshGL64 to output the full
|
||||
* double precision that Manifold uses internally.
|
||||
* The core (non-optional) parts of MeshGL are the triVerts indices buffer and
|
||||
* the vertProperties interleaved vertex buffer, which follow the conventions of
|
||||
* OpenGL (and other graphic libraries') buffers and are therefore generally
|
||||
* easy to map directly to other applications' data structures.
|
||||
*
|
||||
* The triVerts vector has a stride of 3 and specifies triangles as
|
||||
* vertex indices. For triVerts = [2, 4, 5, 3, 1, 6, ...], the triangles are [2,
|
||||
* 4, 5], [3, 1, 6], etc. and likewise the halfedges are [2, 4], [4, 5], [5, 2],
|
||||
* [3, 1], [1, 6], [6, 3], etc.
|
||||
*
|
||||
* The triVerts indices should form a manifold mesh: each of the 3 halfedges of
|
||||
* each triangle should have exactly one paired halfedge in the list, defined as
|
||||
* having the first index of one equal to the second index of the other and
|
||||
* vice-versa. However, this is not always possible - consider e.g. a cube with
|
||||
* normal-vector properties. Shared vertices would turn the cube into a ball by
|
||||
* interpolating normals - the common solution is to duplicate each corner
|
||||
* vertex into 3, each with the same position, but different normals
|
||||
* corresponding to each face. This is exactly what should be done in MeshGL,
|
||||
* however we request two additional vectors in this case: mergeFromVert and
|
||||
* mergeToVert. Each vertex mergeFromVert[i] is merged into vertex
|
||||
* mergeToVert[i], avoiding unreliable floating-point comparisons to recover the
|
||||
* manifold topology. These merges are simply a union, so which is from and to
|
||||
* doesn't matter.
|
||||
*
|
||||
* If you don't have merge vectors, you can create them with the Merge() method,
|
||||
* however this will fail if the mesh is not already manifold within the set
|
||||
* tolerance. For maximum reliablility, always store the merge vectors with the
|
||||
* mesh, e.g. using the EXT_mesh_manifold extension in glTF.
|
||||
*
|
||||
* You can have any number of arbitrary floating-point properties per vertex,
|
||||
* and they will all be interpolated as necessary during operations. It is up to
|
||||
* you to keep track of which channel represents what type of data. A few of
|
||||
* Manifold's methods allow you to specify the channel where normals data
|
||||
* starts, in order to update it automatically for transforms and such. This
|
||||
* will be easier if your meshes all use the same channels for properties, but
|
||||
* this is not a requirement. Operations between meshes with different numbers
|
||||
* of peroperties will simply use the larger numProp and pad the smaller one
|
||||
* with zeroes.
|
||||
*
|
||||
* On output, the triangles are sorted into runs (runIndex, runOriginalID,
|
||||
* runTransform) that correspond to different mesh inputs. Other 3D libraries
|
||||
* may refer to these runs as primitives of a mesh (as in glTF) or draw calls,
|
||||
* as they often represent different materials on different parts of the mesh.
|
||||
* It is generally a good idea to maintain a map of OriginalIDs to materials to
|
||||
* make it easy to reapply them after a set of Boolean operations. These runs
|
||||
* can also be used as input, and thus also ensure a lossless roundtrip of data
|
||||
* through MeshGL.
|
||||
*
|
||||
* As an example, with runIndex = [0, 6, 18, 21] and runOriginalID = [1, 3, 3],
|
||||
* there are 7 triangles, where the first two are from the input mesh with ID 1,
|
||||
* the next 4 are from an input mesh with ID 3, and the last triangle is from a
|
||||
* different copy (instance) of the input mesh with ID 3. These two instances
|
||||
* can be distinguished by their different runTransform matrices.
|
||||
*
|
||||
* You can reconstruct polygonal faces by assembling all the triangles that are
|
||||
* from the same run and share the same faceID. These faces will be planar
|
||||
* within the output tolerance.
|
||||
*
|
||||
* The halfedgeTangent vector is used to specify the weighted tangent vectors of
|
||||
* each halfedge for the purpose of using the Refine methods to create a
|
||||
* smoothly-interpolated surface. They can also be output when calculated
|
||||
* automatically by the Smooth functions.
|
||||
*
|
||||
* MeshGL is an alias for the standard single-precision version. Use MeshGL64 to
|
||||
* output the full double precision that Manifold uses internally.
|
||||
*/
|
||||
template <typename Precision, typename I = uint32_t>
|
||||
struct MeshGLP {
|
||||
|
@ -62,7 +119,7 @@ struct MeshGLP {
|
|||
I numProp = 3;
|
||||
/// Flat, GL-style interleaved list of all vertex properties: propVal =
|
||||
/// vertProperties[vert * numProp + propIdx]. The first three properties are
|
||||
/// always the position x, y, z.
|
||||
/// always the position x, y, z. The stride of the array is numProp.
|
||||
std::vector<Precision> vertProperties;
|
||||
/// The vertex indices of the three triangle corners in CCW (from the outside)
|
||||
/// order, for each triangle.
|
||||
|
@ -93,10 +150,11 @@ struct MeshGLP {
|
|||
/// This matrix is stored in column-major order and the length of the overall
|
||||
/// vector is 12 * runOriginalID.size().
|
||||
std::vector<Precision> runTransform;
|
||||
/// Optional: Length NumTri, contains the source face ID this
|
||||
/// triangle comes from. When auto-generated, this ID will be a triangle index
|
||||
/// into the original mesh. This index/ID is purely for external use (e.g.
|
||||
/// recreating polygonal faces) and will not affect Manifold's algorithms.
|
||||
/// Optional: Length NumTri, contains the source face ID this triangle comes
|
||||
/// from. Simplification will maintain all edges between triangles with
|
||||
/// different faceIDs. Input faceIDs will be maintained to the outputs, but if
|
||||
/// none are given, they will be filled in with Manifold's coplanar face
|
||||
/// calculation based on mesh tolerance.
|
||||
std::vector<I> faceID;
|
||||
/// Optional: The X-Y-Z-W weighted tangent vectors for smooth Refine(). If
|
||||
/// non-empty, must be exactly four times as long as Mesh.triVerts. Indexed
|
||||
|
@ -263,6 +321,7 @@ class Manifold {
|
|||
RunIndexWrongLength,
|
||||
FaceIDWrongLength,
|
||||
InvalidConstruction,
|
||||
ResultTooLarge,
|
||||
};
|
||||
|
||||
/** @name Information
|
||||
|
@ -311,6 +370,7 @@ class Manifold {
|
|||
Manifold Warp(std::function<void(vec3&)>) const;
|
||||
Manifold WarpBatch(std::function<void(VecView<vec3>)>) const;
|
||||
Manifold SetTolerance(double) const;
|
||||
Manifold Simplify(double tolerance = 0) const;
|
||||
///@}
|
||||
|
||||
/** @name Boolean
|
||||
|
@ -368,22 +428,68 @@ class Manifold {
|
|||
static Manifold Hull(const std::vector<vec3>& pts);
|
||||
///@}
|
||||
|
||||
/** @name Debugging I/O
|
||||
* Self-contained mechanism for reading and writing high precision Manifold
|
||||
* data. Write function creates special-purpose OBJ files, and Read function
|
||||
* reads them in. Be warned these are not (and not intended to be)
|
||||
* full-featured OBJ importers/exporters. Their primary use is to extract
|
||||
* accurate Manifold data for debugging purposes - writing out any info
|
||||
* needed to accurately reproduce a problem case's state. Consequently, they
|
||||
* may store and process additional data in comments that other OBJ parsing
|
||||
* programs won't understand.
|
||||
*
|
||||
* The "format" read and written by these functions is not guaranteed to be
|
||||
* stable from release to release - it will be modified as needed to ensure
|
||||
* it captures information needed for debugging. The only API guarantee is
|
||||
* that the ReadOBJ method in a given build/release will read in the output
|
||||
* of the WriteOBJ method produced by that release.
|
||||
*
|
||||
* To work with a file, the caller should prepare the ifstream/ostream
|
||||
* themselves, as follows:
|
||||
*
|
||||
* Reading:
|
||||
* @code
|
||||
* std::ifstream ifile;
|
||||
* ifile.open(filename);
|
||||
* if (ifile.is_open()) {
|
||||
* Manifold obj_m = Manifold::ReadOBJ(ifile);
|
||||
* ifile.close();
|
||||
* if (obj_m.Status() != Manifold::Error::NoError) {
|
||||
* std::cerr << "Failed reading " << filename << ":\n";
|
||||
* std::cerr << Manifold::ToString(ob_m.Status()) << "\n";
|
||||
* }
|
||||
* ifile.close();
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* Writing:
|
||||
* @code
|
||||
* std::ofstream ofile;
|
||||
* ofile.open(filename);
|
||||
* if (ofile.is_open()) {
|
||||
* if (!m.WriteOBJ(ofile)) {
|
||||
* std::cerr << "Failed writing to " << filename << "\n";
|
||||
* }
|
||||
* }
|
||||
* ofile.close();
|
||||
* @endcode
|
||||
*/
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
static Manifold ReadOBJ(std::istream& stream);
|
||||
bool WriteOBJ(std::ostream& stream) const;
|
||||
#endif
|
||||
|
||||
/** @name Testing Hooks
|
||||
* These are just for internal testing.
|
||||
*/
|
||||
///@{
|
||||
bool MatchesTriNormals() const;
|
||||
size_t NumDegenerateTris() const;
|
||||
size_t NumOverlaps(const Manifold& second) const;
|
||||
double GetEpsilon() const;
|
||||
///@}
|
||||
|
||||
struct Impl;
|
||||
|
||||
#ifdef MANIFOLD_EXPORT
|
||||
static Manifold ImportMeshGL64(std::istream& stream);
|
||||
#endif
|
||||
|
||||
private:
|
||||
Manifold(std::shared_ptr<CsgNode> pNode_);
|
||||
Manifold(std::shared_ptr<Impl> pImpl_);
|
||||
|
@ -429,6 +535,8 @@ inline std::string ToString(const Manifold::Error& error) {
|
|||
return "Face ID Wrong Length";
|
||||
case Manifold::Error::InvalidConstruction:
|
||||
return "Invalid Construction";
|
||||
case Manifold::Error::ResultTooLarge:
|
||||
return "Result Too Large";
|
||||
default:
|
||||
return "Unknown Error";
|
||||
};
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#pragma once
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
|
@ -33,6 +34,9 @@ struct geometryErr : public virtual std::runtime_error {
|
|||
using std::runtime_error::runtime_error;
|
||||
};
|
||||
using logicErr = std::logic_error;
|
||||
#endif
|
||||
|
||||
#if defined(MANIFOLD_ASSERT) && defined(MANIFOLD_DEBUG)
|
||||
|
||||
template <typename Ex>
|
||||
void AssertFail(const char* file, int line, const char* cond, const char* msg) {
|
||||
|
|
|
@ -52,11 +52,10 @@ using PolygonsIdx = std::vector<SimplePolygonIdx>;
|
|||
* @brief Polygon triangulation
|
||||
* @{
|
||||
*/
|
||||
std::vector<ivec3> TriangulateIdx(const PolygonsIdx &polys,
|
||||
double epsilon = -1);
|
||||
std::vector<ivec3> TriangulateIdx(const PolygonsIdx& polys, double epsilon = -1,
|
||||
bool allowConvex = true);
|
||||
|
||||
std::vector<ivec3> Triangulate(const Polygons &polygons, double epsilon = -1);
|
||||
|
||||
ExecutionParams &PolygonParams();
|
||||
std::vector<ivec3> Triangulate(const Polygons& polygons, double epsilon = -1,
|
||||
bool allowConvex = true);
|
||||
/** @} */
|
||||
} // namespace manifold
|
||||
|
|
36
thirdparty/manifold/include/manifold/vec_view.h
vendored
36
thirdparty/manifold/include/manifold/vec_view.h
vendored
|
@ -31,22 +31,22 @@ namespace manifold {
|
|||
template <typename T>
|
||||
class VecView {
|
||||
public:
|
||||
using Iter = T *;
|
||||
using IterC = const T *;
|
||||
using Iter = T*;
|
||||
using IterC = const T*;
|
||||
|
||||
VecView() : ptr_(nullptr), size_(0) {}
|
||||
|
||||
VecView(T *ptr, size_t size) : ptr_(ptr), size_(size) {}
|
||||
VecView(T* ptr, size_t size) : ptr_(ptr), size_(size) {}
|
||||
|
||||
VecView(const std::vector<std::remove_cv_t<T>> &v)
|
||||
VecView(const std::vector<std::remove_cv_t<T>>& v)
|
||||
: ptr_(v.data()), size_(v.size()) {}
|
||||
|
||||
VecView(const VecView &other) {
|
||||
VecView(const VecView& other) {
|
||||
ptr_ = other.ptr_;
|
||||
size_ = other.size_;
|
||||
}
|
||||
|
||||
VecView &operator=(const VecView &other) {
|
||||
VecView& operator=(const VecView& other) {
|
||||
ptr_ = other.ptr_;
|
||||
size_ = other.size_;
|
||||
return *this;
|
||||
|
@ -55,12 +55,12 @@ class VecView {
|
|||
// allows conversion to a const VecView
|
||||
operator VecView<const T>() const { return {ptr_, size_}; }
|
||||
|
||||
inline const T &operator[](size_t i) const {
|
||||
inline const T& operator[](size_t i) const {
|
||||
ASSERT(i < size_, std::out_of_range("Vec out of range"));
|
||||
return ptr_[i];
|
||||
}
|
||||
|
||||
inline T &operator[](size_t i) {
|
||||
inline T& operator[](size_t i) {
|
||||
ASSERT(i < size_, std::out_of_range("Vec out of range"));
|
||||
return ptr_[i];
|
||||
}
|
||||
|
@ -74,25 +74,25 @@ class VecView {
|
|||
Iter begin() { return ptr_; }
|
||||
Iter end() { return ptr_ + size_; }
|
||||
|
||||
const T &front() const {
|
||||
const T& front() const {
|
||||
ASSERT(size_ != 0,
|
||||
std::out_of_range("Attempt to take the front of an empty vector"));
|
||||
return ptr_[0];
|
||||
}
|
||||
|
||||
const T &back() const {
|
||||
const T& back() const {
|
||||
ASSERT(size_ != 0,
|
||||
std::out_of_range("Attempt to take the back of an empty vector"));
|
||||
return ptr_[size_ - 1];
|
||||
}
|
||||
|
||||
T &front() {
|
||||
T& front() {
|
||||
ASSERT(size_ != 0,
|
||||
std::out_of_range("Attempt to take the front of an empty vector"));
|
||||
return ptr_[0];
|
||||
}
|
||||
|
||||
T &back() {
|
||||
T& back() {
|
||||
ASSERT(size_ != 0,
|
||||
std::out_of_range("Attempt to take the back of an empty vector"));
|
||||
return ptr_[size_ - 1];
|
||||
|
@ -106,8 +106,7 @@ class VecView {
|
|||
size_t length = std::numeric_limits<size_t>::max()) {
|
||||
if (length == std::numeric_limits<size_t>::max())
|
||||
length = this->size_ - offset;
|
||||
ASSERT(length >= 0, std::out_of_range("Vec::view out of range"));
|
||||
ASSERT(offset + length <= this->size_ && offset >= 0,
|
||||
ASSERT(offset + length <= this->size_,
|
||||
std::out_of_range("Vec::view out of range"));
|
||||
return VecView<T>(this->ptr_ + offset, length);
|
||||
}
|
||||
|
@ -117,8 +116,7 @@ class VecView {
|
|||
size_t length = std::numeric_limits<size_t>::max()) const {
|
||||
if (length == std::numeric_limits<size_t>::max())
|
||||
length = this->size_ - offset;
|
||||
ASSERT(length >= 0, std::out_of_range("Vec::cview out of range"));
|
||||
ASSERT(offset + length <= this->size_ && offset >= 0,
|
||||
ASSERT(offset + length <= this->size_,
|
||||
std::out_of_range("Vec::cview out of range"));
|
||||
return VecView<const T>(this->ptr_ + offset, length);
|
||||
}
|
||||
|
@ -129,9 +127,9 @@ class VecView {
|
|||
return cview(offset, length);
|
||||
}
|
||||
|
||||
T *data() { return this->ptr_; }
|
||||
T* data() { return this->ptr_; }
|
||||
|
||||
const T *data() const { return this->ptr_; }
|
||||
const T* data() const { return this->ptr_; }
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
void Dump() const {
|
||||
|
@ -144,7 +142,7 @@ class VecView {
|
|||
#endif
|
||||
|
||||
protected:
|
||||
T *ptr_ = nullptr;
|
||||
T* ptr_ = nullptr;
|
||||
size_t size_ = 0;
|
||||
};
|
||||
|
||||
|
|
443
thirdparty/manifold/src/boolean3.cpp
vendored
443
thirdparty/manifold/src/boolean3.cpp
vendored
|
@ -12,11 +12,15 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "./boolean3.h"
|
||||
#include "boolean3.h"
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "./parallel.h"
|
||||
#include "parallel.h"
|
||||
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
#include <tbb/combinable.h>
|
||||
#endif
|
||||
|
||||
using namespace manifold;
|
||||
|
||||
|
@ -38,12 +42,12 @@ vec2 Interpolate(vec3 pL, vec3 pR, double x) {
|
|||
if (!std::isfinite(lambda) || !std::isfinite(dLR.y) || !std::isfinite(dLR.z))
|
||||
return vec2(pL.y, pL.z);
|
||||
vec2 yz;
|
||||
yz[0] = fma(lambda, dLR.y, useL ? pL.y : pR.y);
|
||||
yz[1] = fma(lambda, dLR.z, useL ? pL.z : pR.z);
|
||||
yz[0] = lambda * dLR.y + (useL ? pL.y : pR.y);
|
||||
yz[1] = lambda * dLR.z + (useL ? pL.z : pR.z);
|
||||
return yz;
|
||||
}
|
||||
|
||||
vec4 Intersect(const vec3 &pL, const vec3 &pR, const vec3 &qL, const vec3 &qR) {
|
||||
vec4 Intersect(const vec3& pL, const vec3& pR, const vec3& qL, const vec3& qR) {
|
||||
const double dyL = qL.y - pL.y;
|
||||
const double dyR = qR.y - pR.y;
|
||||
DEBUG_ASSERT(dyL * dyR <= 0, logicErr,
|
||||
|
@ -53,53 +57,17 @@ vec4 Intersect(const vec3 &pL, const vec3 &pR, const vec3 &qL, const vec3 &qR) {
|
|||
double lambda = (useL ? dyL : dyR) / (dyL - dyR);
|
||||
if (!std::isfinite(lambda)) lambda = 0.0;
|
||||
vec4 xyzz;
|
||||
xyzz.x = fma(lambda, dx, useL ? pL.x : pR.x);
|
||||
xyzz.x = lambda * dx + (useL ? pL.x : pR.x);
|
||||
const double pDy = pR.y - pL.y;
|
||||
const double qDy = qR.y - qL.y;
|
||||
const bool useP = fabs(pDy) < fabs(qDy);
|
||||
xyzz.y = fma(lambda, useP ? pDy : qDy,
|
||||
useL ? (useP ? pL.y : qL.y) : (useP ? pR.y : qR.y));
|
||||
xyzz.z = fma(lambda, pR.z - pL.z, useL ? pL.z : pR.z);
|
||||
xyzz.w = fma(lambda, qR.z - qL.z, useL ? qL.z : qR.z);
|
||||
xyzz.y = lambda * (useP ? pDy : qDy) +
|
||||
(useL ? (useP ? pL.y : qL.y) : (useP ? pR.y : qR.y));
|
||||
xyzz.z = lambda * (pR.z - pL.z) + (useL ? pL.z : pR.z);
|
||||
xyzz.w = lambda * (qR.z - qL.z) + (useL ? qL.z : qR.z);
|
||||
return xyzz;
|
||||
}
|
||||
|
||||
template <const bool inverted>
|
||||
struct CopyFaceEdges {
|
||||
const SparseIndices &p1q1;
|
||||
// x can be either vert or edge (0 or 1).
|
||||
SparseIndices &pXq1;
|
||||
VecView<const Halfedge> halfedgesQ;
|
||||
const size_t offset;
|
||||
|
||||
void operator()(const size_t i) {
|
||||
int idx = 3 * (i + offset);
|
||||
int pX = p1q1.Get(i, inverted);
|
||||
int q2 = p1q1.Get(i, !inverted);
|
||||
|
||||
for (const int j : {0, 1, 2}) {
|
||||
const int q1 = 3 * q2 + j;
|
||||
const Halfedge edge = halfedgesQ[q1];
|
||||
int a = pX;
|
||||
int b = edge.IsForward() ? q1 : edge.pairedHalfedge;
|
||||
if (inverted) std::swap(a, b);
|
||||
pXq1.Set(idx + static_cast<size_t>(j), a, b);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
SparseIndices Filter11(const Manifold::Impl &inP, const Manifold::Impl &inQ,
|
||||
const SparseIndices &p1q2, const SparseIndices &p2q1) {
|
||||
ZoneScoped;
|
||||
SparseIndices p1q1(3 * p1q2.size() + 3 * p2q1.size());
|
||||
for_each_n(autoPolicy(p1q2.size(), 1e5), countAt(0_uz), p1q2.size(),
|
||||
CopyFaceEdges<false>({p1q2, p1q1, inQ.halfedge_, 0_uz}));
|
||||
for_each_n(autoPolicy(p2q1.size(), 1e5), countAt(0_uz), p2q1.size(),
|
||||
CopyFaceEdges<true>({p2q1, p1q1, inP.halfedge_, p1q2.size()}));
|
||||
p1q1.Unique();
|
||||
return p1q1;
|
||||
}
|
||||
|
||||
inline bool Shadows(double p, double q, double dir) {
|
||||
return p == q ? dir < 0 : p < q;
|
||||
}
|
||||
|
@ -107,16 +75,16 @@ inline bool Shadows(double p, double q, double dir) {
|
|||
inline std::pair<int, vec2> Shadow01(
|
||||
const int p0, const int q1, VecView<const vec3> vertPosP,
|
||||
VecView<const vec3> vertPosQ, VecView<const Halfedge> halfedgeQ,
|
||||
const double expandP, VecView<const vec3> normalP, const bool reverse) {
|
||||
const double expandP, VecView<const vec3> normal, const bool reverse) {
|
||||
const int q1s = halfedgeQ[q1].startVert;
|
||||
const int q1e = halfedgeQ[q1].endVert;
|
||||
const double p0x = vertPosP[p0].x;
|
||||
const double q1sx = vertPosQ[q1s].x;
|
||||
const double q1ex = vertPosQ[q1e].x;
|
||||
int s01 = reverse ? Shadows(q1sx, p0x, expandP * normalP[q1s].x) -
|
||||
Shadows(q1ex, p0x, expandP * normalP[q1e].x)
|
||||
: Shadows(p0x, q1ex, expandP * normalP[p0].x) -
|
||||
Shadows(p0x, q1sx, expandP * normalP[p0].x);
|
||||
int s01 = reverse ? Shadows(q1sx, p0x, expandP * normal[q1s].x) -
|
||||
Shadows(q1ex, p0x, expandP * normal[q1e].x)
|
||||
: Shadows(p0x, q1ex, expandP * normal[p0].x) -
|
||||
Shadows(p0x, q1sx, expandP * normal[p0].x);
|
||||
vec2 yz01(NAN);
|
||||
|
||||
if (s01 != 0) {
|
||||
|
@ -126,70 +94,26 @@ inline std::pair<int, vec2> Shadow01(
|
|||
const double start2 = la::dot(diff, diff);
|
||||
diff = vertPosQ[q1e] - vertPosP[p0];
|
||||
const double end2 = la::dot(diff, diff);
|
||||
const double dir = start2 < end2 ? normalP[q1s].y : normalP[q1e].y;
|
||||
const double dir = start2 < end2 ? normal[q1s].y : normal[q1e].y;
|
||||
if (!Shadows(yz01[0], vertPosP[p0].y, expandP * dir)) s01 = 0;
|
||||
} else {
|
||||
if (!Shadows(vertPosP[p0].y, yz01[0], expandP * normalP[p0].y)) s01 = 0;
|
||||
if (!Shadows(vertPosP[p0].y, yz01[0], expandP * normal[p0].y)) s01 = 0;
|
||||
}
|
||||
}
|
||||
return std::make_pair(s01, yz01);
|
||||
}
|
||||
|
||||
// https://github.com/scandum/binary_search/blob/master/README.md
|
||||
// much faster than standard binary search on large arrays
|
||||
size_t monobound_quaternary_search(VecView<const int64_t> array, int64_t key) {
|
||||
if (array.size() == 0) {
|
||||
return std::numeric_limits<size_t>::max();
|
||||
}
|
||||
size_t bot = 0;
|
||||
size_t top = array.size();
|
||||
while (top >= 65536) {
|
||||
size_t mid = top / 4;
|
||||
top -= mid * 3;
|
||||
if (key < array[bot + mid * 2]) {
|
||||
if (key >= array[bot + mid]) {
|
||||
bot += mid;
|
||||
}
|
||||
} else {
|
||||
bot += mid * 2;
|
||||
if (key >= array[bot + mid]) {
|
||||
bot += mid;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (top > 3) {
|
||||
size_t mid = top / 2;
|
||||
if (key >= array[bot + mid]) {
|
||||
bot += mid;
|
||||
}
|
||||
top -= mid;
|
||||
}
|
||||
|
||||
while (top--) {
|
||||
if (key == array[bot + top]) {
|
||||
return bot + top;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct Kernel11 {
|
||||
VecView<vec4> xyzz;
|
||||
VecView<int> s;
|
||||
VecView<const vec3> vertPosP;
|
||||
VecView<const vec3> vertPosQ;
|
||||
VecView<const Halfedge> halfedgeP;
|
||||
VecView<const Halfedge> halfedgeQ;
|
||||
const double expandP;
|
||||
VecView<const vec3> normalP;
|
||||
const SparseIndices &p1q1;
|
||||
|
||||
void operator()(const size_t idx) {
|
||||
const int p1 = p1q1.Get(idx, false);
|
||||
const int q1 = p1q1.Get(idx, true);
|
||||
vec4 &xyzz11 = xyzz[idx];
|
||||
int &s11 = s[idx];
|
||||
std::pair<int, vec4> operator()(int p1, int q1) {
|
||||
vec4 xyzz11 = vec4(NAN);
|
||||
int s11 = 0;
|
||||
|
||||
// For pRL[k], qRL[k], k==0 is the left and k==1 is the right.
|
||||
int k = 0;
|
||||
|
@ -201,10 +125,8 @@ struct Kernel11 {
|
|||
|
||||
const int p0[2] = {halfedgeP[p1].startVert, halfedgeP[p1].endVert};
|
||||
for (int i : {0, 1}) {
|
||||
const auto syz01 = Shadow01(p0[i], q1, vertPosP, vertPosQ, halfedgeQ,
|
||||
expandP, normalP, false);
|
||||
const int s01 = syz01.first;
|
||||
const vec2 yz01 = syz01.second;
|
||||
const auto [s01, yz01] = Shadow01(p0[i], q1, vertPosP, vertPosQ,
|
||||
halfedgeQ, expandP, normalP, false);
|
||||
// If the value is NaN, then these do not overlap.
|
||||
if (std::isfinite(yz01[0])) {
|
||||
s11 += s01 * (i == 0 ? -1 : 1);
|
||||
|
@ -219,10 +141,8 @@ struct Kernel11 {
|
|||
|
||||
const int q0[2] = {halfedgeQ[q1].startVert, halfedgeQ[q1].endVert};
|
||||
for (int i : {0, 1}) {
|
||||
const auto syz10 = Shadow01(q0[i], p1, vertPosQ, vertPosP, halfedgeP,
|
||||
expandP, normalP, true);
|
||||
const int s10 = syz10.first;
|
||||
const vec2 yz10 = syz10.second;
|
||||
const auto [s10, yz10] = Shadow01(q0[i], p1, vertPosQ, vertPosP,
|
||||
halfedgeP, expandP, normalP, true);
|
||||
// If the value is NaN, then these do not overlap.
|
||||
if (std::isfinite(yz10[0])) {
|
||||
s11 += s10 * (i == 0 ? -1 : 1);
|
||||
|
@ -251,42 +171,22 @@ struct Kernel11 {
|
|||
|
||||
if (!Shadows(xyzz11.z, xyzz11.w, expandP * dir)) s11 = 0;
|
||||
}
|
||||
|
||||
return std::make_pair(s11, xyzz11);
|
||||
}
|
||||
};
|
||||
|
||||
std::tuple<Vec<int>, Vec<vec4>> Shadow11(SparseIndices &p1q1,
|
||||
const Manifold::Impl &inP,
|
||||
const Manifold::Impl &inQ,
|
||||
double expandP) {
|
||||
ZoneScoped;
|
||||
Vec<int> s11(p1q1.size());
|
||||
Vec<vec4> xyzz11(p1q1.size());
|
||||
|
||||
for_each_n(autoPolicy(p1q1.size(), 1e4), countAt(0_uz), p1q1.size(),
|
||||
Kernel11({xyzz11, s11, inP.vertPos_, inQ.vertPos_, inP.halfedge_,
|
||||
inQ.halfedge_, expandP, inP.vertNormal_, p1q1}));
|
||||
|
||||
p1q1.KeepFinite(xyzz11, s11);
|
||||
|
||||
return std::make_tuple(s11, xyzz11);
|
||||
};
|
||||
|
||||
struct Kernel02 {
|
||||
VecView<int> s;
|
||||
VecView<double> z;
|
||||
VecView<const vec3> vertPosP;
|
||||
VecView<const Halfedge> halfedgeQ;
|
||||
VecView<const vec3> vertPosQ;
|
||||
const double expandP;
|
||||
VecView<const vec3> vertNormalP;
|
||||
const SparseIndices &p0q2;
|
||||
const bool forward;
|
||||
|
||||
void operator()(const size_t idx) {
|
||||
const int p0 = p0q2.Get(idx, !forward);
|
||||
const int q2 = p0q2.Get(idx, forward);
|
||||
int &s02 = s[idx];
|
||||
double &z02 = z[idx];
|
||||
std::pair<int, double> operator()(int p0, int q2) {
|
||||
int s02 = 0;
|
||||
double z02 = 0.0;
|
||||
|
||||
// For yzzLR[k], k==0 is the left and k==1 is the right.
|
||||
int k = 0;
|
||||
|
@ -342,47 +242,21 @@ struct Kernel02 {
|
|||
s02 = 0;
|
||||
}
|
||||
}
|
||||
return std::make_pair(s02, z02);
|
||||
}
|
||||
};
|
||||
|
||||
std::tuple<Vec<int>, Vec<double>> Shadow02(const Manifold::Impl &inP,
|
||||
const Manifold::Impl &inQ,
|
||||
SparseIndices &p0q2, bool forward,
|
||||
double expandP) {
|
||||
ZoneScoped;
|
||||
Vec<int> s02(p0q2.size());
|
||||
Vec<double> z02(p0q2.size());
|
||||
|
||||
auto vertNormalP = forward ? inP.vertNormal_ : inQ.vertNormal_;
|
||||
for_each_n(autoPolicy(p0q2.size(), 1e4), countAt(0_uz), p0q2.size(),
|
||||
Kernel02({s02, z02, inP.vertPos_, inQ.halfedge_, inQ.vertPos_,
|
||||
expandP, vertNormalP, p0q2, forward}));
|
||||
|
||||
p0q2.KeepFinite(z02, s02);
|
||||
|
||||
return std::make_tuple(s02, z02);
|
||||
};
|
||||
|
||||
struct Kernel12 {
|
||||
VecView<int> x;
|
||||
VecView<vec3> v;
|
||||
VecView<const int64_t> p0q2;
|
||||
VecView<const int> s02;
|
||||
VecView<const double> z02;
|
||||
VecView<const int64_t> p1q1;
|
||||
VecView<const int> s11;
|
||||
VecView<const vec4> xyzz11;
|
||||
VecView<const Halfedge> halfedgesP;
|
||||
VecView<const Halfedge> halfedgesQ;
|
||||
VecView<const vec3> vertPosP;
|
||||
const bool forward;
|
||||
const SparseIndices &p1q2;
|
||||
Kernel02 k02;
|
||||
Kernel11 k11;
|
||||
|
||||
void operator()(const size_t idx) {
|
||||
int p1 = p1q2.Get(idx, !forward);
|
||||
int q2 = p1q2.Get(idx, forward);
|
||||
int &x12 = x[idx];
|
||||
vec3 &v12 = v[idx];
|
||||
std::pair<int, vec3> operator()(int p1, int q2) {
|
||||
int x12 = 0;
|
||||
vec3 v12 = vec3(NAN);
|
||||
|
||||
// For xzyLR-[k], k==0 is the left and k==1 is the right.
|
||||
int k = 0;
|
||||
|
@ -396,18 +270,15 @@ struct Kernel12 {
|
|||
const Halfedge edge = halfedgesP[p1];
|
||||
|
||||
for (int vert : {edge.startVert, edge.endVert}) {
|
||||
const int64_t key = forward ? SparseIndices::EncodePQ(vert, q2)
|
||||
: SparseIndices::EncodePQ(q2, vert);
|
||||
const size_t idx = monobound_quaternary_search(p0q2, key);
|
||||
if (idx != std::numeric_limits<size_t>::max()) {
|
||||
const int s = s02[idx];
|
||||
const auto [s, z] = k02(vert, q2);
|
||||
if (std::isfinite(z)) {
|
||||
x12 += s * ((vert == edge.startVert) == forward ? 1 : -1);
|
||||
if (k < 2 && (k == 0 || (s != 0) != shadows)) {
|
||||
shadows = s != 0;
|
||||
xzyLR0[k] = vertPosP[vert];
|
||||
std::swap(xzyLR0[k].y, xzyLR0[k].z);
|
||||
xzyLR1[k] = xzyLR0[k];
|
||||
xzyLR1[k][1] = z02[idx];
|
||||
xzyLR1[k][1] = z;
|
||||
k++;
|
||||
}
|
||||
}
|
||||
|
@ -417,17 +288,11 @@ struct Kernel12 {
|
|||
const int q1 = 3 * q2 + i;
|
||||
const Halfedge edge = halfedgesQ[q1];
|
||||
const int q1F = edge.IsForward() ? q1 : edge.pairedHalfedge;
|
||||
const int64_t key = forward ? SparseIndices::EncodePQ(p1, q1F)
|
||||
: SparseIndices::EncodePQ(q1F, p1);
|
||||
const size_t idx = monobound_quaternary_search(p1q1, key);
|
||||
if (idx !=
|
||||
std::numeric_limits<size_t>::max()) { // s is implicitly zero for
|
||||
// anything not found
|
||||
const int s = s11[idx];
|
||||
const auto [s, xyzz] = forward ? k11(p1, q1F) : k11(q1F, p1);
|
||||
if (std::isfinite(xyzz[0])) {
|
||||
x12 -= s * (edge.IsForward() ? 1 : -1);
|
||||
if (k < 2 && (k == 0 || (s != 0) != shadows)) {
|
||||
shadows = s != 0;
|
||||
const vec4 xyzz = xyzz11[idx];
|
||||
xzyLR0[k][0] = xyzz.x;
|
||||
xzyLR0[k][1] = xyzz.z;
|
||||
xzyLR0[k][2] = xyzz.y;
|
||||
|
@ -448,60 +313,146 @@ struct Kernel12 {
|
|||
v12.y = xzyy[2];
|
||||
v12.z = xzyy[1];
|
||||
}
|
||||
return std::make_pair(x12, v12);
|
||||
}
|
||||
};
|
||||
|
||||
std::tuple<Vec<int>, Vec<vec3>> Intersect12(
|
||||
const Manifold::Impl &inP, const Manifold::Impl &inQ, const Vec<int> &s02,
|
||||
const SparseIndices &p0q2, const Vec<int> &s11, const SparseIndices &p1q1,
|
||||
const Vec<double> &z02, const Vec<vec4> &xyzz11, SparseIndices &p1q2,
|
||||
bool forward) {
|
||||
struct Kernel12Tmp {
|
||||
Vec<std::array<int, 2>> p1q2_;
|
||||
Vec<int> x12_;
|
||||
Vec<vec3> v12_;
|
||||
};
|
||||
|
||||
struct Kernel12Recorder {
|
||||
using Local = Kernel12Tmp;
|
||||
Kernel12& k12;
|
||||
VecView<const TmpEdge> tmpedges;
|
||||
bool forward;
|
||||
|
||||
#if MANIFOLD_PAR == 1
|
||||
tbb::combinable<Kernel12Tmp> store;
|
||||
Local& local() { return store.local(); }
|
||||
#else
|
||||
Kernel12Tmp localStore;
|
||||
Local& local() { return localStore; }
|
||||
#endif
|
||||
|
||||
void record(int queryIdx, int leafIdx, Local& tmp) {
|
||||
queryIdx = tmpedges[queryIdx].halfedgeIdx;
|
||||
const auto [x12, v12] = k12(queryIdx, leafIdx);
|
||||
if (std::isfinite(v12[0])) {
|
||||
if (forward)
|
||||
tmp.p1q2_.push_back({queryIdx, leafIdx});
|
||||
else
|
||||
tmp.p1q2_.push_back({leafIdx, queryIdx});
|
||||
tmp.x12_.push_back(x12);
|
||||
tmp.v12_.push_back(v12);
|
||||
}
|
||||
}
|
||||
|
||||
Kernel12Tmp get() {
|
||||
#if MANIFOLD_PAR == 1
|
||||
Kernel12Tmp result;
|
||||
std::vector<Kernel12Tmp> tmps;
|
||||
store.combine_each(
|
||||
[&](Kernel12Tmp& data) { tmps.emplace_back(std::move(data)); });
|
||||
std::vector<size_t> sizes;
|
||||
size_t total_size = 0;
|
||||
for (const auto& tmp : tmps) {
|
||||
sizes.push_back(total_size);
|
||||
total_size += tmp.x12_.size();
|
||||
}
|
||||
result.p1q2_.resize(total_size);
|
||||
result.x12_.resize(total_size);
|
||||
result.v12_.resize(total_size);
|
||||
for_each_n(ExecutionPolicy::Seq, countAt(0), tmps.size(), [&](size_t i) {
|
||||
std::copy(tmps[i].p1q2_.begin(), tmps[i].p1q2_.end(),
|
||||
result.p1q2_.begin() + sizes[i]);
|
||||
std::copy(tmps[i].x12_.begin(), tmps[i].x12_.end(),
|
||||
result.x12_.begin() + sizes[i]);
|
||||
std::copy(tmps[i].v12_.begin(), tmps[i].v12_.end(),
|
||||
result.v12_.begin() + sizes[i]);
|
||||
});
|
||||
return result;
|
||||
#else
|
||||
return localStore;
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
std::tuple<Vec<int>, Vec<vec3>> Intersect12(const Manifold::Impl& inP,
|
||||
const Manifold::Impl& inQ,
|
||||
Vec<std::array<int, 2>>& p1q2,
|
||||
double expandP, bool forward) {
|
||||
ZoneScoped;
|
||||
Vec<int> x12(p1q2.size());
|
||||
Vec<vec3> v12(p1q2.size());
|
||||
// a: 1 (edge), b: 2 (face)
|
||||
const Manifold::Impl& a = forward ? inP : inQ;
|
||||
const Manifold::Impl& b = forward ? inQ : inP;
|
||||
|
||||
for_each_n(
|
||||
autoPolicy(p1q2.size(), 1e4), countAt(0_uz), p1q2.size(),
|
||||
Kernel12({x12, v12, p0q2.AsVec64(), s02, z02, p1q1.AsVec64(), s11, xyzz11,
|
||||
inP.halfedge_, inQ.halfedge_, inP.vertPos_, forward, p1q2}));
|
||||
Kernel02 k02{a.vertPos_, b.halfedge_, b.vertPos_,
|
||||
expandP, inP.vertNormal_, forward};
|
||||
Kernel11 k11{inP.vertPos_, inQ.vertPos_, inP.halfedge_,
|
||||
inQ.halfedge_, expandP, inP.vertNormal_};
|
||||
|
||||
p1q2.KeepFinite(v12, x12);
|
||||
Vec<TmpEdge> tmpedges = CreateTmpEdges(a.halfedge_);
|
||||
Vec<Box> AEdgeBB(tmpedges.size());
|
||||
for_each_n(autoPolicy(tmpedges.size(), 1e5), countAt(0), tmpedges.size(),
|
||||
[&](const int e) {
|
||||
AEdgeBB[e] = Box(a.vertPos_[tmpedges[e].first],
|
||||
a.vertPos_[tmpedges[e].second]);
|
||||
});
|
||||
Kernel12 k12{a.halfedge_, b.halfedge_, a.vertPos_, forward, k02, k11};
|
||||
Kernel12Recorder recorder{k12, tmpedges, forward, {}};
|
||||
|
||||
b.collider_.Collisions<false, Box, Kernel12Recorder>(AEdgeBB.cview(),
|
||||
recorder);
|
||||
|
||||
Kernel12Tmp result = recorder.get();
|
||||
p1q2 = std::move(result.p1q2_);
|
||||
auto x12 = std::move(result.x12_);
|
||||
auto v12 = std::move(result.v12_);
|
||||
// sort p1q2
|
||||
Vec<size_t> i12(p1q2.size());
|
||||
sequence(i12.begin(), i12.end());
|
||||
stable_sort(i12.begin(), i12.end(), [&](int a, int b) {
|
||||
return p1q2[a][0] < p1q2[b][0] ||
|
||||
(p1q2[a][0] == p1q2[b][0] && p1q2[a][1] < p1q2[b][1]);
|
||||
});
|
||||
Permute(p1q2, i12);
|
||||
Permute(x12, i12);
|
||||
Permute(v12, i12);
|
||||
return std::make_tuple(x12, v12);
|
||||
};
|
||||
|
||||
Vec<int> Winding03(const Manifold::Impl &inP, Vec<int> &vertices, Vec<int> &s02,
|
||||
bool reverse) {
|
||||
Vec<int> Winding03(const Manifold::Impl& inP, const Manifold::Impl& inQ,
|
||||
double expandP, bool forward) {
|
||||
ZoneScoped;
|
||||
// verts that are not shadowed (not in p0q2) have winding number zero.
|
||||
Vec<int> w03(inP.NumVert(), 0);
|
||||
if (vertices.size() <= 1e5) {
|
||||
for_each_n(ExecutionPolicy::Seq, countAt(0), s02.size(),
|
||||
[&w03, &vertices, &s02, reverse](const int i) {
|
||||
w03[vertices[i]] += s02[i] * (reverse ? -1 : 1);
|
||||
});
|
||||
} else {
|
||||
for_each_n(ExecutionPolicy::Par, countAt(0), s02.size(),
|
||||
[&w03, &vertices, &s02, reverse](const int i) {
|
||||
AtomicAdd(w03[vertices[i]], s02[i] * (reverse ? -1 : 1));
|
||||
});
|
||||
}
|
||||
// a: 0 (vertex), b: 2 (face)
|
||||
const Manifold::Impl& a = forward ? inP : inQ;
|
||||
const Manifold::Impl& b = forward ? inQ : inP;
|
||||
Vec<int> w03(a.NumVert(), 0);
|
||||
Kernel02 k02{a.vertPos_, b.halfedge_, b.vertPos_,
|
||||
expandP, inP.vertNormal_, forward};
|
||||
auto f = [&](int a, int b) {
|
||||
const auto [s02, z02] = k02(a, b);
|
||||
if (std::isfinite(z02)) AtomicAdd(w03[a], s02 * (!forward ? -1 : 1));
|
||||
};
|
||||
auto recorder = MakeSimpleRecorder(f);
|
||||
b.collider_.Collisions<false>(a.vertPos_.cview(), recorder);
|
||||
return w03;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
namespace manifold {
|
||||
Boolean3::Boolean3(const Manifold::Impl &inP, const Manifold::Impl &inQ,
|
||||
Boolean3::Boolean3(const Manifold::Impl& inP, const Manifold::Impl& inQ,
|
||||
OpType op)
|
||||
: inP_(inP), inQ_(inQ), expandP_(op == OpType::Add ? 1.0 : -1.0) {
|
||||
// Symbolic perturbation:
|
||||
// Union -> expand inP
|
||||
// Difference, Intersection -> contract inP
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
Timer broad;
|
||||
broad.Start();
|
||||
#endif
|
||||
constexpr size_t INT_MAX_SZ =
|
||||
static_cast<size_t>(std::numeric_limits<int>::max());
|
||||
|
||||
if (inP.IsEmpty() || inQ.IsEmpty() || !inP.bBox_.DoesOverlap(inQ.bBox_)) {
|
||||
PRINT("No overlap, early out");
|
||||
|
@ -510,88 +461,34 @@ Boolean3::Boolean3(const Manifold::Impl &inP, const Manifold::Impl &inQ,
|
|||
return;
|
||||
}
|
||||
|
||||
// Level 3
|
||||
// Find edge-triangle overlaps (broad phase)
|
||||
p1q2_ = inQ_.EdgeCollisions(inP_);
|
||||
p2q1_ = inP_.EdgeCollisions(inQ_, true); // inverted
|
||||
|
||||
p1q2_.Sort();
|
||||
PRINT("p1q2 size = " << p1q2_.size());
|
||||
|
||||
p2q1_.Sort();
|
||||
PRINT("p2q1 size = " << p2q1_.size());
|
||||
|
||||
// Level 2
|
||||
// Find vertices that overlap faces in XY-projection
|
||||
SparseIndices p0q2 = inQ.VertexCollisionsZ(inP.vertPos_);
|
||||
p0q2.Sort();
|
||||
PRINT("p0q2 size = " << p0q2.size());
|
||||
|
||||
SparseIndices p2q0 = inP.VertexCollisionsZ(inQ.vertPos_, true); // inverted
|
||||
p2q0.Sort();
|
||||
PRINT("p2q0 size = " << p2q0.size());
|
||||
|
||||
// Find involved edge pairs from Level 3
|
||||
SparseIndices p1q1 = Filter11(inP_, inQ_, p1q2_, p2q1_);
|
||||
PRINT("p1q1 size = " << p1q1.size());
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
broad.Stop();
|
||||
Timer intersections;
|
||||
intersections.Start();
|
||||
#endif
|
||||
|
||||
// Level 2
|
||||
// Build up XY-projection intersection of two edges, including the z-value for
|
||||
// each edge, keeping only those whose intersection exists.
|
||||
Vec<int> s11;
|
||||
Vec<vec4> xyzz11;
|
||||
std::tie(s11, xyzz11) = Shadow11(p1q1, inP, inQ, expandP_);
|
||||
PRINT("s11 size = " << s11.size());
|
||||
|
||||
// Build up Z-projection of vertices onto triangles, keeping only those that
|
||||
// fall inside the triangle.
|
||||
Vec<int> s02;
|
||||
Vec<double> z02;
|
||||
std::tie(s02, z02) = Shadow02(inP, inQ, p0q2, true, expandP_);
|
||||
PRINT("s02 size = " << s02.size());
|
||||
|
||||
Vec<int> s20;
|
||||
Vec<double> z20;
|
||||
std::tie(s20, z20) = Shadow02(inQ, inP, p2q0, false, expandP_);
|
||||
PRINT("s20 size = " << s20.size());
|
||||
|
||||
// Level 3
|
||||
// Build up the intersection of the edges and triangles, keeping only those
|
||||
// that intersect, and record the direction the edge is passing through the
|
||||
// triangle.
|
||||
std::tie(x12_, v12_) =
|
||||
Intersect12(inP, inQ, s02, p0q2, s11, p1q1, z02, xyzz11, p1q2_, true);
|
||||
std::tie(x12_, v12_) = Intersect12(inP, inQ, p1q2_, expandP_, true);
|
||||
PRINT("x12 size = " << x12_.size());
|
||||
|
||||
std::tie(x21_, v21_) =
|
||||
Intersect12(inQ, inP, s20, p2q0, s11, p1q1, z20, xyzz11, p2q1_, false);
|
||||
std::tie(x21_, v21_) = Intersect12(inP, inQ, p2q1_, expandP_, false);
|
||||
PRINT("x21 size = " << x21_.size());
|
||||
|
||||
s11.clear();
|
||||
xyzz11.clear();
|
||||
z02.clear();
|
||||
z20.clear();
|
||||
if (x12_.size() > INT_MAX_SZ || x21_.size() > INT_MAX_SZ) {
|
||||
valid = false;
|
||||
return;
|
||||
}
|
||||
|
||||
Vec<int> p0 = p0q2.Copy(false);
|
||||
p0q2.Resize(0);
|
||||
Vec<int> q0 = p2q0.Copy(true);
|
||||
p2q0.Resize(0);
|
||||
// Sum up the winding numbers of all vertices.
|
||||
w03_ = Winding03(inP, p0, s02, false);
|
||||
|
||||
w30_ = Winding03(inQ, q0, s20, true);
|
||||
w03_ = Winding03(inP, inQ, expandP_, true);
|
||||
w30_ = Winding03(inP, inQ, expandP_, false);
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
intersections.Stop();
|
||||
|
||||
if (ManifoldParams().verbose) {
|
||||
broad.Print("Broad phase");
|
||||
intersections.Print("Intersections");
|
||||
}
|
||||
#endif
|
||||
|
|
7
thirdparty/manifold/src/boolean3.h
vendored
7
thirdparty/manifold/src/boolean3.h
vendored
|
@ -13,11 +13,11 @@
|
|||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
#include "./impl.h"
|
||||
#include "impl.h"
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
#define PRINT(msg) \
|
||||
if (ManifoldParams().verbose) std::cout << msg << std::endl;
|
||||
if (ManifoldParams().verbose > 0) std::cout << msg << std::endl;
|
||||
#else
|
||||
#define PRINT(msg)
|
||||
#endif
|
||||
|
@ -53,8 +53,9 @@ class Boolean3 {
|
|||
private:
|
||||
const Manifold::Impl &inP_, &inQ_;
|
||||
const double expandP_;
|
||||
SparseIndices p1q2_, p2q1_;
|
||||
Vec<std::array<int, 2>> p1q2_, p2q1_;
|
||||
Vec<int> x12_, x21_, w03_, w30_;
|
||||
Vec<vec3> v12_, v21_;
|
||||
bool valid = true;
|
||||
};
|
||||
} // namespace manifold
|
||||
|
|
239
thirdparty/manifold/src/boolean_result.cpp
vendored
239
thirdparty/manifold/src/boolean_result.cpp
vendored
|
@ -16,9 +16,9 @@
|
|||
#include <array>
|
||||
#include <map>
|
||||
|
||||
#include "./boolean3.h"
|
||||
#include "./parallel.h"
|
||||
#include "./utils.h"
|
||||
#include "boolean3.h"
|
||||
#include "parallel.h"
|
||||
#include "utils.h"
|
||||
|
||||
#if (MANIFOLD_PAR == 1) && __has_include(<tbb/concurrent_map.h>)
|
||||
#define TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS 1
|
||||
|
@ -37,7 +37,7 @@ using namespace manifold;
|
|||
|
||||
template <>
|
||||
struct std::hash<std::pair<int, int>> {
|
||||
size_t operator()(const std::pair<int, int> &p) const {
|
||||
size_t operator()(const std::pair<int, int>& p) const {
|
||||
return std::hash<int>()(p.first) ^ std::hash<int>()(p.second);
|
||||
}
|
||||
};
|
||||
|
@ -83,12 +83,12 @@ struct CountNewVerts {
|
|||
VecView<int> countP;
|
||||
VecView<int> countQ;
|
||||
VecView<const int> i12;
|
||||
const SparseIndices &pq;
|
||||
const Vec<std::array<int, 2>>& pq;
|
||||
VecView<const Halfedge> halfedges;
|
||||
|
||||
void operator()(const int idx) {
|
||||
int edgeP = pq.Get(idx, inverted);
|
||||
int faceQ = pq.Get(idx, !inverted);
|
||||
int edgeP = pq[idx][inverted ? 1 : 0];
|
||||
int faceQ = pq[idx][inverted ? 0 : 1];
|
||||
int inclusion = std::abs(i12[idx]);
|
||||
|
||||
if (atomic) {
|
||||
|
@ -106,10 +106,10 @@ struct CountNewVerts {
|
|||
};
|
||||
|
||||
std::tuple<Vec<int>, Vec<int>> SizeOutput(
|
||||
Manifold::Impl &outR, const Manifold::Impl &inP, const Manifold::Impl &inQ,
|
||||
const Vec<int> &i03, const Vec<int> &i30, const Vec<int> &i12,
|
||||
const Vec<int> &i21, const SparseIndices &p1q2, const SparseIndices &p2q1,
|
||||
bool invertQ) {
|
||||
Manifold::Impl& outR, const Manifold::Impl& inP, const Manifold::Impl& inQ,
|
||||
const Vec<int>& i03, const Vec<int>& i30, const Vec<int>& i12,
|
||||
const Vec<int>& i21, const Vec<std::array<int, 2>>& p1q2,
|
||||
const Vec<std::array<int, 2>>& p2q1, bool invertQ) {
|
||||
ZoneScoped;
|
||||
Vec<int> sidesPerFacePQ(inP.NumTri() + inQ.NumTri(), 0);
|
||||
// note: numFaceR <= facePQ2R.size() = sidesPerFacePQ.size() + 1
|
||||
|
@ -154,7 +154,7 @@ std::tuple<Vec<int>, Vec<int>> SizeOutput(
|
|||
int numFaceR = facePQ2R.back();
|
||||
facePQ2R.resize(inP.NumTri() + inQ.NumTri());
|
||||
|
||||
outR.faceNormal_.resize(numFaceR);
|
||||
outR.faceNormal_.resize_nofill(numFaceR);
|
||||
|
||||
Vec<size_t> tmpBuffer(outR.faceNormal_.size());
|
||||
auto faceIds = TransformIterator(countAt(0_uz), [&sidesPerFacePQ](size_t i) {
|
||||
|
@ -196,8 +196,9 @@ std::tuple<Vec<int>, Vec<int>> SizeOutput(
|
|||
}
|
||||
|
||||
struct EdgePos {
|
||||
int vert;
|
||||
double edgePos;
|
||||
int vert;
|
||||
int collisionId;
|
||||
bool isStart;
|
||||
};
|
||||
|
||||
|
@ -212,8 +213,8 @@ void AddNewEdgeVerts(
|
|||
// we need concurrent_map because we will be adding things concurrently
|
||||
concurrent_map<int, std::vector<EdgePos>> &edgesP,
|
||||
concurrent_map<std::pair<int, int>, std::vector<EdgePos>> &edgesNew,
|
||||
const SparseIndices &p1q2, const Vec<int> &i12, const Vec<int> &v12R,
|
||||
const Vec<Halfedge> &halfedgeP, bool forward) {
|
||||
const Vec<std::array<int, 2>> &p1q2, const Vec<int> &i12, const Vec<int> &v12R,
|
||||
const Vec<Halfedge> &halfedgeP, bool forward, size_t offset) {
|
||||
ZoneScoped;
|
||||
// For each edge of P that intersects a face of Q (p1q2), add this vertex to
|
||||
// P's corresponding edge vector and to the two new edges, which are
|
||||
|
@ -222,8 +223,8 @@ void AddNewEdgeVerts(
|
|||
// the output vert index. When forward is false, all is reversed.
|
||||
auto process = [&](std::function<void(size_t)> lock,
|
||||
std::function<void(size_t)> unlock, size_t i) {
|
||||
const int edgeP = p1q2.Get(i, !forward);
|
||||
const int faceQ = p1q2.Get(i, forward);
|
||||
const int edgeP = p1q2[i][forward ? 0 : 1];
|
||||
const int faceQ = p1q2[i][forward ? 1 : 0];
|
||||
const int vert = v12R[i];
|
||||
const int inclusion = i12[i];
|
||||
|
||||
|
@ -236,21 +237,22 @@ void AddNewEdgeVerts(
|
|||
|
||||
bool direction = inclusion < 0;
|
||||
std::hash<std::pair<int, int>> pairHasher;
|
||||
std::array<std::tuple<bool, size_t, std::vector<EdgePos> *>, 3> edges = {
|
||||
std::array<std::tuple<bool, size_t, std::vector<EdgePos>*>, 3> edges = {
|
||||
std::make_tuple(direction, std::hash<int>{}(edgeP), &edgesP[edgeP]),
|
||||
std::make_tuple(direction ^ !forward, // revert if not forward
|
||||
pairHasher(keyRight), &edgesNew[keyRight]),
|
||||
std::make_tuple(direction ^ forward, // revert if forward
|
||||
pairHasher(keyLeft), &edgesNew[keyLeft])};
|
||||
for (const auto &tuple : edges) {
|
||||
for (const auto& tuple : edges) {
|
||||
lock(std::get<1>(tuple));
|
||||
for (int j = 0; j < std::abs(inclusion); ++j)
|
||||
std::get<2>(tuple)->push_back({vert + j, 0.0, std::get<0>(tuple)});
|
||||
std::get<2>(tuple)->push_back(
|
||||
{0.0, vert + j, static_cast<int>(i + offset), std::get<0>(tuple)});
|
||||
unlock(std::get<1>(tuple));
|
||||
direction = !direction;
|
||||
}
|
||||
};
|
||||
#if (MANIFOLD_PAR == 1) && __has_include(<tbb/tbb.h>)
|
||||
#if (MANIFOLD_PAR == 1) && __has_include(<tbb/concurrent_map.h>)
|
||||
// parallelize operations, requires concurrent_map so we can only enable this
|
||||
// with tbb
|
||||
if (p1q2.size() > kParallelThreshold) {
|
||||
|
@ -264,19 +266,19 @@ void AddNewEdgeVerts(
|
|||
std::placeholders::_1);
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<size_t>(0_uz, p1q2.size(), 32),
|
||||
[&](const tbb::blocked_range<size_t> &range) {
|
||||
[&](const tbb::blocked_range<size_t>& range) {
|
||||
for (size_t i = range.begin(); i != range.end(); i++) processFun(i);
|
||||
},
|
||||
ap);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
auto processFun = std::bind(
|
||||
process, [](size_t _) {}, [](size_t _) {}, std::placeholders::_1);
|
||||
auto processFun =
|
||||
std::bind(process, [](size_t) {}, [](size_t) {}, std::placeholders::_1);
|
||||
for (size_t i = 0; i < p1q2.size(); ++i) processFun(i);
|
||||
}
|
||||
|
||||
std::vector<Halfedge> PairUp(std::vector<EdgePos> &edgePos) {
|
||||
std::vector<Halfedge> PairUp(std::vector<EdgePos>& edgePos) {
|
||||
// Pair start vertices with end vertices to form edges. The choice of pairing
|
||||
// is arbitrary for the manifoldness guarantee, but must be ordered to be
|
||||
// geometrically valid. If the order does not go start-end-start-end... then
|
||||
|
@ -289,7 +291,11 @@ std::vector<Halfedge> PairUp(std::vector<EdgePos> &edgePos) {
|
|||
[](EdgePos x) { return x.isStart; });
|
||||
DEBUG_ASSERT(static_cast<size_t>(middle - edgePos.begin()) == nEdges,
|
||||
topologyErr, "Non-manifold edge!");
|
||||
auto cmp = [](EdgePos a, EdgePos b) { return a.edgePos < b.edgePos; };
|
||||
auto cmp = [](EdgePos a, EdgePos b) {
|
||||
return a.edgePos < b.edgePos ||
|
||||
// we also sort by collisionId to make things deterministic
|
||||
(a.edgePos == b.edgePos && a.collisionId < b.collisionId);
|
||||
};
|
||||
std::stable_sort(edgePos.begin(), middle, cmp);
|
||||
std::stable_sort(middle, edgePos.end(), cmp);
|
||||
std::vector<Halfedge> edges;
|
||||
|
@ -298,11 +304,11 @@ std::vector<Halfedge> PairUp(std::vector<EdgePos> &edgePos) {
|
|||
return edges;
|
||||
}
|
||||
|
||||
void AppendPartialEdges(Manifold::Impl &outR, Vec<char> &wholeHalfedgeP,
|
||||
Vec<int> &facePtrR,
|
||||
concurrent_map<int, std::vector<EdgePos>> &edgesP,
|
||||
Vec<TriRef> &halfedgeRef, const Manifold::Impl &inP,
|
||||
const Vec<int> &i03, const Vec<int> &vP2R,
|
||||
void AppendPartialEdges(Manifold::Impl& outR, Vec<char>& wholeHalfedgeP,
|
||||
Vec<int>& facePtrR,
|
||||
concurrent_map<int, std::vector<EdgePos>>& edgesP,
|
||||
Vec<TriRef>& halfedgeRef, const Manifold::Impl& inP,
|
||||
const Vec<int>& i03, const Vec<int>& vP2R,
|
||||
const Vec<int>::IterC faceP2R, bool forward) {
|
||||
ZoneScoped;
|
||||
// Each edge in the map is partially retained; for each of these, look up
|
||||
|
@ -310,15 +316,15 @@ void AppendPartialEdges(Manifold::Impl &outR, Vec<char> &wholeHalfedgeP,
|
|||
// while remapping them to the output using vP2R. Use the verts position
|
||||
// projected along the edge vector to pair them up, then distribute these
|
||||
// edges to their faces.
|
||||
Vec<Halfedge> &halfedgeR = outR.halfedge_;
|
||||
const Vec<vec3> &vertPosP = inP.vertPos_;
|
||||
const Vec<Halfedge> &halfedgeP = inP.halfedge_;
|
||||
Vec<Halfedge>& halfedgeR = outR.halfedge_;
|
||||
const Vec<vec3>& vertPosP = inP.vertPos_;
|
||||
const Vec<Halfedge>& halfedgeP = inP.halfedge_;
|
||||
|
||||
for (auto &value : edgesP) {
|
||||
for (auto& value : edgesP) {
|
||||
const int edgeP = value.first;
|
||||
std::vector<EdgePos> &edgePosP = value.second;
|
||||
std::vector<EdgePos>& edgePosP = value.second;
|
||||
|
||||
const Halfedge &halfedge = halfedgeP[edgeP];
|
||||
const Halfedge& halfedge = halfedgeP[edgeP];
|
||||
wholeHalfedgeP[edgeP] = false;
|
||||
wholeHalfedgeP[halfedge.pairedHalfedge] = false;
|
||||
|
||||
|
@ -326,13 +332,13 @@ void AppendPartialEdges(Manifold::Impl &outR, Vec<char> &wholeHalfedgeP,
|
|||
const int vEnd = halfedge.endVert;
|
||||
const vec3 edgeVec = vertPosP[vEnd] - vertPosP[vStart];
|
||||
// Fill in the edge positions of the old points.
|
||||
for (EdgePos &edge : edgePosP) {
|
||||
for (EdgePos& edge : edgePosP) {
|
||||
edge.edgePos = la::dot(outR.vertPos_[edge.vert], edgeVec);
|
||||
}
|
||||
|
||||
int inclusion = i03[vStart];
|
||||
EdgePos edgePos = {vP2R[vStart],
|
||||
la::dot(outR.vertPos_[vP2R[vStart]], edgeVec),
|
||||
EdgePos edgePos = {la::dot(outR.vertPos_[vP2R[vStart]], edgeVec),
|
||||
vP2R[vStart], std::numeric_limits<int>::max(),
|
||||
inclusion > 0};
|
||||
for (int j = 0; j < std::abs(inclusion); ++j) {
|
||||
edgePosP.push_back(edgePos);
|
||||
|
@ -340,8 +346,8 @@ void AppendPartialEdges(Manifold::Impl &outR, Vec<char> &wholeHalfedgeP,
|
|||
}
|
||||
|
||||
inclusion = i03[vEnd];
|
||||
edgePos = {vP2R[vEnd], la::dot(outR.vertPos_[vP2R[vEnd]], edgeVec),
|
||||
inclusion < 0};
|
||||
edgePos = {la::dot(outR.vertPos_[vP2R[vEnd]], edgeVec), vP2R[vEnd],
|
||||
std::numeric_limits<int>::max(), inclusion < 0};
|
||||
for (int j = 0; j < std::abs(inclusion); ++j) {
|
||||
edgePosP.push_back(edgePos);
|
||||
++edgePos.vert;
|
||||
|
@ -359,8 +365,8 @@ void AppendPartialEdges(Manifold::Impl &outR, Vec<char> &wholeHalfedgeP,
|
|||
// reference is now to the endVert instead of the startVert, which is one
|
||||
// position advanced CCW. This is only valid if this is a retained vert; it
|
||||
// will be ignored later if the vert is new.
|
||||
const TriRef forwardRef = {forward ? 0 : 1, -1, faceLeftP};
|
||||
const TriRef backwardRef = {forward ? 0 : 1, -1, faceRightP};
|
||||
const TriRef forwardRef = {forward ? 0 : 1, -1, faceLeftP, -1};
|
||||
const TriRef backwardRef = {forward ? 0 : 1, -1, faceRightP, -1};
|
||||
|
||||
for (Halfedge e : edges) {
|
||||
const int forwardEdge = facePtrR[faceLeft]++;
|
||||
|
@ -379,18 +385,18 @@ void AppendPartialEdges(Manifold::Impl &outR, Vec<char> &wholeHalfedgeP,
|
|||
}
|
||||
|
||||
void AppendNewEdges(
|
||||
Manifold::Impl &outR, Vec<int> &facePtrR,
|
||||
concurrent_map<std::pair<int, int>, std::vector<EdgePos>> &edgesNew,
|
||||
Vec<TriRef> &halfedgeRef, const Vec<int> &facePQ2R, const int numFaceP) {
|
||||
Manifold::Impl& outR, Vec<int>& facePtrR,
|
||||
concurrent_map<std::pair<int, int>, std::vector<EdgePos>>& edgesNew,
|
||||
Vec<TriRef>& halfedgeRef, const Vec<int>& facePQ2R, const int numFaceP) {
|
||||
ZoneScoped;
|
||||
// Pair up each edge's verts and distribute to faces based on indices in key.
|
||||
Vec<Halfedge> &halfedgeR = outR.halfedge_;
|
||||
Vec<vec3> &vertPosR = outR.vertPos_;
|
||||
Vec<Halfedge>& halfedgeR = outR.halfedge_;
|
||||
Vec<vec3>& vertPosR = outR.vertPos_;
|
||||
|
||||
for (auto &value : edgesNew) {
|
||||
for (auto& value : edgesNew) {
|
||||
const int faceP = value.first.first;
|
||||
const int faceQ = value.first.second;
|
||||
std::vector<EdgePos> &edgePos = value.second;
|
||||
std::vector<EdgePos>& edgePos = value.second;
|
||||
|
||||
Box bbox;
|
||||
for (auto edge : edgePos) {
|
||||
|
@ -401,7 +407,7 @@ void AppendNewEdges(
|
|||
const int i = (size.x > size.y && size.x > size.z) ? 0
|
||||
: size.y > size.z ? 1
|
||||
: 2;
|
||||
for (auto &edge : edgePos) {
|
||||
for (auto& edge : edgePos) {
|
||||
edge.edgePos = vertPosR[edge.vert][i];
|
||||
}
|
||||
|
||||
|
@ -411,8 +417,8 @@ void AppendNewEdges(
|
|||
// add halfedges to result
|
||||
const int faceLeft = facePQ2R[faceP];
|
||||
const int faceRight = facePQ2R[numFaceP + faceQ];
|
||||
const TriRef forwardRef = {0, -1, faceP};
|
||||
const TriRef backwardRef = {1, -1, faceQ};
|
||||
const TriRef forwardRef = {0, -1, faceP, -1};
|
||||
const TriRef backwardRef = {1, -1, faceQ, -1};
|
||||
for (Halfedge e : edges) {
|
||||
const int forwardEdge = facePtrR[faceLeft]++;
|
||||
const int backwardEdge = facePtrR[faceRight]++;
|
||||
|
@ -459,8 +465,8 @@ struct DuplicateHalfedges {
|
|||
// Negative inclusion means the halfedges are reversed, which means our
|
||||
// reference is now to the endVert instead of the startVert, which is one
|
||||
// position advanced CCW.
|
||||
const TriRef forwardRef = {forward ? 0 : 1, -1, faceLeftP};
|
||||
const TriRef backwardRef = {forward ? 0 : 1, -1, faceRightP};
|
||||
const TriRef forwardRef = {forward ? 0 : 1, -1, faceLeftP, -1};
|
||||
const TriRef backwardRef = {forward ? 0 : 1, -1, faceRightP, -1};
|
||||
|
||||
for (int i = 0; i < std::abs(inclusion); ++i) {
|
||||
int forwardEdge = AtomicAdd(facePtr[newFace], 1);
|
||||
|
@ -479,10 +485,10 @@ struct DuplicateHalfedges {
|
|||
}
|
||||
};
|
||||
|
||||
void AppendWholeEdges(Manifold::Impl &outR, Vec<int> &facePtrR,
|
||||
Vec<TriRef> &halfedgeRef, const Manifold::Impl &inP,
|
||||
const Vec<char> wholeHalfedgeP, const Vec<int> &i03,
|
||||
const Vec<int> &vP2R, VecView<const int> faceP2R,
|
||||
void AppendWholeEdges(Manifold::Impl& outR, Vec<int>& facePtrR,
|
||||
Vec<TriRef>& halfedgeRef, const Manifold::Impl& inP,
|
||||
const Vec<char> wholeHalfedgeP, const Vec<int>& i03,
|
||||
const Vec<int>& vP2R, VecView<const int> faceP2R,
|
||||
bool forward) {
|
||||
ZoneScoped;
|
||||
for_each_n(
|
||||
|
@ -496,26 +502,26 @@ struct MapTriRef {
|
|||
VecView<const TriRef> triRefQ;
|
||||
const int offsetQ;
|
||||
|
||||
void operator()(TriRef &triRef) {
|
||||
const int tri = triRef.tri;
|
||||
void operator()(TriRef& triRef) {
|
||||
const int tri = triRef.faceID;
|
||||
const bool PQ = triRef.meshID == 0;
|
||||
triRef = PQ ? triRefP[tri] : triRefQ[tri];
|
||||
if (!PQ) triRef.meshID += offsetQ;
|
||||
}
|
||||
};
|
||||
|
||||
void UpdateReference(Manifold::Impl &outR, const Manifold::Impl &inP,
|
||||
const Manifold::Impl &inQ, bool invertQ) {
|
||||
void UpdateReference(Manifold::Impl& outR, const Manifold::Impl& inP,
|
||||
const Manifold::Impl& inQ, bool invertQ) {
|
||||
const int offsetQ = Manifold::Impl::meshIDCounter_;
|
||||
for_each_n(
|
||||
autoPolicy(outR.NumTri(), 1e5), outR.meshRelation_.triRef.begin(),
|
||||
outR.NumTri(),
|
||||
MapTriRef({inP.meshRelation_.triRef, inQ.meshRelation_.triRef, offsetQ}));
|
||||
|
||||
for (const auto &pair : inP.meshRelation_.meshIDtransform) {
|
||||
for (const auto& pair : inP.meshRelation_.meshIDtransform) {
|
||||
outR.meshRelation_.meshIDtransform[pair.first] = pair.second;
|
||||
}
|
||||
for (const auto &pair : inQ.meshRelation_.meshIDtransform) {
|
||||
for (const auto& pair : inQ.meshRelation_.meshIDtransform) {
|
||||
outR.meshRelation_.meshIDtransform[pair.first + offsetQ] = pair.second;
|
||||
outR.meshRelation_.meshIDtransform[pair.first + offsetQ].backSide ^=
|
||||
invertQ;
|
||||
|
@ -537,10 +543,10 @@ struct Barycentric {
|
|||
const TriRef refPQ = ref[tri];
|
||||
if (halfedgeR[3 * tri].startVert < 0) return;
|
||||
|
||||
const int triPQ = refPQ.tri;
|
||||
const int triPQ = refPQ.faceID;
|
||||
const bool PQ = refPQ.meshID == 0;
|
||||
const auto &vertPos = PQ ? vertPosP : vertPosQ;
|
||||
const auto &halfedge = PQ ? halfedgeP : halfedgeQ;
|
||||
const auto& vertPos = PQ ? vertPosP : vertPosQ;
|
||||
const auto& halfedge = PQ ? halfedgeP : halfedgeQ;
|
||||
|
||||
mat3 triPos;
|
||||
for (const int j : {0, 1, 2})
|
||||
|
@ -553,18 +559,16 @@ struct Barycentric {
|
|||
}
|
||||
};
|
||||
|
||||
void CreateProperties(Manifold::Impl &outR, const Manifold::Impl &inP,
|
||||
const Manifold::Impl &inQ) {
|
||||
void CreateProperties(Manifold::Impl& outR, const Manifold::Impl& inP,
|
||||
const Manifold::Impl& inQ) {
|
||||
ZoneScoped;
|
||||
const int numPropP = inP.NumProp();
|
||||
const int numPropQ = inQ.NumProp();
|
||||
const int numProp = std::max(numPropP, numPropQ);
|
||||
outR.meshRelation_.numProp = numProp;
|
||||
outR.numProp_ = numProp;
|
||||
if (numProp == 0) return;
|
||||
|
||||
const int numTri = outR.NumTri();
|
||||
outR.meshRelation_.triProperties.resize(numTri);
|
||||
|
||||
Vec<vec3> bary(outR.halfedge_.size());
|
||||
for_each_n(autoPolicy(numTri, 1e4), countAt(0), numTri,
|
||||
Barycentric({bary, outR.meshRelation_.triRef, inP.vertPos_,
|
||||
|
@ -578,7 +582,7 @@ void CreateProperties(Manifold::Impl &outR, const Manifold::Impl &inP,
|
|||
propMissIdx[0].resize(inQ.NumPropVert(), -1);
|
||||
propMissIdx[1].resize(inP.NumPropVert(), -1);
|
||||
|
||||
outR.meshRelation_.properties.reserve(outR.NumVert() * numProp);
|
||||
outR.properties_.reserve(outR.NumVert() * numProp);
|
||||
int idx = 0;
|
||||
|
||||
for (int tri = 0; tri < numTri; ++tri) {
|
||||
|
@ -588,15 +592,12 @@ void CreateProperties(Manifold::Impl &outR, const Manifold::Impl &inP,
|
|||
const TriRef ref = outR.meshRelation_.triRef[tri];
|
||||
const bool PQ = ref.meshID == 0;
|
||||
const int oldNumProp = PQ ? numPropP : numPropQ;
|
||||
const auto &properties =
|
||||
PQ ? inP.meshRelation_.properties : inQ.meshRelation_.properties;
|
||||
const ivec3 &triProp = oldNumProp == 0 ? ivec3(-1)
|
||||
: PQ ? inP.meshRelation_.triProperties[ref.tri]
|
||||
: inQ.meshRelation_.triProperties[ref.tri];
|
||||
const auto& properties = PQ ? inP.properties_ : inQ.properties_;
|
||||
const auto& halfedge = PQ ? inP.halfedge_ : inQ.halfedge_;
|
||||
|
||||
for (const int i : {0, 1, 2}) {
|
||||
const int vert = outR.halfedge_[3 * tri + i].startVert;
|
||||
const vec3 &uvw = bary[3 * tri + i];
|
||||
const vec3& uvw = bary[3 * tri + i];
|
||||
|
||||
ivec4 key(PQ, idMissProp, -1, -1);
|
||||
if (oldNumProp > 0) {
|
||||
|
@ -604,7 +605,7 @@ void CreateProperties(Manifold::Impl &outR, const Manifold::Impl &inP,
|
|||
for (const int j : {0, 1, 2}) {
|
||||
if (uvw[j] == 1) {
|
||||
// On a retained vert, the propVert must also match
|
||||
key[2] = triProp[j];
|
||||
key[2] = halfedge[3 * ref.faceID + j].propVert;
|
||||
edge = -1;
|
||||
break;
|
||||
}
|
||||
|
@ -612,8 +613,8 @@ void CreateProperties(Manifold::Impl &outR, const Manifold::Impl &inP,
|
|||
}
|
||||
if (edge >= 0) {
|
||||
// On an edge, both propVerts must match
|
||||
const int p0 = triProp[Next3(edge)];
|
||||
const int p1 = triProp[Prev3(edge)];
|
||||
const int p0 = halfedge[3 * ref.faceID + Next3(edge)].propVert;
|
||||
const int p1 = halfedge[3 * ref.faceID + Prev3(edge)].propVert;
|
||||
key[1] = vert;
|
||||
key[2] = std::min(p0, p1);
|
||||
key[3] = std::max(p0, p1);
|
||||
|
@ -624,19 +625,19 @@ void CreateProperties(Manifold::Impl &outR, const Manifold::Impl &inP,
|
|||
|
||||
if (key.y == idMissProp && key.z >= 0) {
|
||||
// only key.x/key.z matters
|
||||
auto &entry = propMissIdx[key.x][key.z];
|
||||
auto& entry = propMissIdx[key.x][key.z];
|
||||
if (entry >= 0) {
|
||||
outR.meshRelation_.triProperties[tri][i] = entry;
|
||||
outR.halfedge_[3 * tri + i].propVert = entry;
|
||||
continue;
|
||||
}
|
||||
entry = idx;
|
||||
} else {
|
||||
auto &bin = propIdx[key.y];
|
||||
auto& bin = propIdx[key.y];
|
||||
bool bFound = false;
|
||||
for (const auto &b : bin) {
|
||||
for (const auto& b : bin) {
|
||||
if (b.first == ivec3(key.x, key.z, key.w)) {
|
||||
bFound = true;
|
||||
outR.meshRelation_.triProperties[tri][i] = b.second;
|
||||
outR.halfedge_[3 * tri + i].propVert = b.second;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -644,20 +645,55 @@ void CreateProperties(Manifold::Impl &outR, const Manifold::Impl &inP,
|
|||
bin.push_back(std::make_pair(ivec3(key.x, key.z, key.w), idx));
|
||||
}
|
||||
|
||||
outR.meshRelation_.triProperties[tri][i] = idx++;
|
||||
outR.halfedge_[3 * tri + i].propVert = idx++;
|
||||
for (int p = 0; p < numProp; ++p) {
|
||||
if (p < oldNumProp) {
|
||||
vec3 oldProps;
|
||||
for (const int j : {0, 1, 2})
|
||||
oldProps[j] = properties[oldNumProp * triProp[j] + p];
|
||||
outR.meshRelation_.properties.push_back(la::dot(uvw, oldProps));
|
||||
oldProps[j] =
|
||||
properties[oldNumProp * halfedge[3 * ref.faceID + j].propVert +
|
||||
p];
|
||||
outR.properties_.push_back(la::dot(uvw, oldProps));
|
||||
} else {
|
||||
outR.meshRelation_.properties.push_back(0);
|
||||
outR.properties_.push_back(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ReorderHalfedges(VecView<Halfedge>& halfedges) {
|
||||
// halfedges in the same face are added in non-deterministic order, so we have
|
||||
// to reorder them for determinism
|
||||
|
||||
// step 1: reorder within the same face, such that the halfedge with the
|
||||
// smallest starting vertex is placed first
|
||||
for_each(autoPolicy(halfedges.size() / 3), countAt(0),
|
||||
countAt(halfedges.size() / 3), [&halfedges](size_t tri) {
|
||||
std::array<Halfedge, 3> face = {halfedges[tri * 3],
|
||||
halfedges[tri * 3 + 1],
|
||||
halfedges[tri * 3 + 2]};
|
||||
int index = 0;
|
||||
for (int i : {1, 2})
|
||||
if (face[i].startVert < face[index].startVert) index = i;
|
||||
for (int i : {0, 1, 2})
|
||||
halfedges[tri * 3 + i] = face[(index + i) % 3];
|
||||
});
|
||||
// step 2: fix paired halfedge
|
||||
for_each(autoPolicy(halfedges.size() / 3), countAt(0),
|
||||
countAt(halfedges.size() / 3), [&halfedges](size_t tri) {
|
||||
for (int i : {0, 1, 2}) {
|
||||
Halfedge& curr = halfedges[tri * 3 + i];
|
||||
int oppositeFace = curr.pairedHalfedge / 3;
|
||||
int index = -1;
|
||||
for (int j : {0, 1, 2})
|
||||
if (curr.startVert == halfedges[oppositeFace * 3 + j].endVert)
|
||||
index = j;
|
||||
curr.pairedHalfedge = oppositeFace * 3 + index;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace manifold {
|
||||
|
@ -697,6 +733,12 @@ Manifold::Impl Boolean3::Result(OpType op) const {
|
|||
return inP_;
|
||||
}
|
||||
|
||||
if (!valid) {
|
||||
auto impl = Manifold::Impl();
|
||||
impl.status_ = Manifold::Error::ResultTooLarge;
|
||||
return impl;
|
||||
}
|
||||
|
||||
const bool invertQ = op == OpType::Subtract;
|
||||
|
||||
// Convert winding numbers to inclusion values based on operation type.
|
||||
|
@ -746,7 +788,7 @@ Manifold::Impl Boolean3::Result(OpType op) const {
|
|||
outR.epsilon_ = std::max(inP_.epsilon_, inQ_.epsilon_);
|
||||
outR.tolerance_ = std::max(inP_.tolerance_, inQ_.tolerance_);
|
||||
|
||||
outR.vertPos_.resize(numVertR);
|
||||
outR.vertPos_.resize_nofill(numVertR);
|
||||
// Add vertices, duplicating for inclusion numbers not in [-1, 1].
|
||||
// Retained vertices from P and Q:
|
||||
for_each_n(autoPolicy(inP_.NumVert(), 1e4), countAt(0), inP_.NumVert(),
|
||||
|
@ -775,8 +817,9 @@ Manifold::Impl Boolean3::Result(OpType op) const {
|
|||
// This key is the face index of <P, Q>
|
||||
concurrent_map<std::pair<int, int>, std::vector<EdgePos>> edgesNew;
|
||||
|
||||
AddNewEdgeVerts(edgesP, edgesNew, p1q2_, i12, v12R, inP_.halfedge_, true);
|
||||
AddNewEdgeVerts(edgesQ, edgesNew, p2q1_, i21, v21R, inQ_.halfedge_, false);
|
||||
AddNewEdgeVerts(edgesP, edgesNew, p1q2_, i12, v12R, inP_.halfedge_, true, 0);
|
||||
AddNewEdgeVerts(edgesQ, edgesNew, p2q1_, i21, v21R, inQ_.halfedge_, false,
|
||||
p1q2_.size());
|
||||
|
||||
v12R.clear();
|
||||
v21R.clear();
|
||||
|
@ -842,6 +885,8 @@ Manifold::Impl Boolean3::Result(OpType op) const {
|
|||
halfedgeRef.clear();
|
||||
faceEdge.clear();
|
||||
|
||||
ReorderHalfedges(outR.halfedge_);
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
triangulate.Stop();
|
||||
Timer simplify;
|
||||
|
@ -856,7 +901,7 @@ Manifold::Impl Boolean3::Result(OpType op) const {
|
|||
|
||||
UpdateReference(outR, inP_, inQ_, invertQ);
|
||||
|
||||
outR.SimplifyTopology();
|
||||
outR.SimplifyTopology(nPv + nQv);
|
||||
outR.RemoveUnreferencedVerts();
|
||||
|
||||
if (ManifoldParams().intermediateChecks)
|
||||
|
|
121
thirdparty/manifold/src/collider.h
vendored
121
thirdparty/manifold/src/collider.h
vendored
|
@ -13,11 +13,10 @@
|
|||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
#include "./parallel.h"
|
||||
#include "./sparse.h"
|
||||
#include "./utils.h"
|
||||
#include "./vec.h"
|
||||
#include "manifold/common.h"
|
||||
#include "parallel.h"
|
||||
#include "utils.h"
|
||||
#include "vec.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
|
@ -40,7 +39,7 @@ constexpr int kRoot = 1;
|
|||
#ifdef _MSC_VER
|
||||
|
||||
#ifndef _WINDEF_
|
||||
typedef unsigned long DWORD;
|
||||
using DWORD = unsigned long;
|
||||
#endif
|
||||
|
||||
uint32_t inline ctz(uint32_t value) {
|
||||
|
@ -163,14 +162,16 @@ struct FindCollision {
|
|||
VecView<const T> queries;
|
||||
VecView<const Box> nodeBBox_;
|
||||
VecView<const std::pair<int, int>> internalChildren_;
|
||||
Recorder recorder;
|
||||
Recorder& recorder;
|
||||
|
||||
inline int RecordCollision(int node, const int queryIdx, SparseIndices& ind) {
|
||||
using Local = typename Recorder::Local;
|
||||
|
||||
inline int RecordCollision(int node, const int queryIdx, Local& local) {
|
||||
bool overlaps = nodeBBox_[node].DoesOverlap(queries[queryIdx]);
|
||||
if (overlaps && IsLeaf(node)) {
|
||||
int leafIdx = Node2Leaf(node);
|
||||
if (!selfCollision || leafIdx != queryIdx) {
|
||||
recorder.record(queryIdx, leafIdx, ind);
|
||||
recorder.record(queryIdx, leafIdx, local);
|
||||
}
|
||||
}
|
||||
return overlaps && IsInternal(node); // Should traverse into node
|
||||
|
@ -183,14 +184,14 @@ struct FindCollision {
|
|||
int top = -1;
|
||||
// Depth-first search
|
||||
int node = kRoot;
|
||||
SparseIndices& ind = recorder.local();
|
||||
Local& local = recorder.local();
|
||||
while (1) {
|
||||
int internal = Node2Internal(node);
|
||||
int child1 = internalChildren_[internal].first;
|
||||
int child2 = internalChildren_[internal].second;
|
||||
|
||||
int traverse1 = RecordCollision(child1, queryIdx, ind);
|
||||
int traverse2 = RecordCollision(child2, queryIdx, ind);
|
||||
int traverse1 = RecordCollision(child1, queryIdx, local);
|
||||
int traverse2 = RecordCollision(child2, queryIdx, local);
|
||||
|
||||
if (!traverse1 && !traverse2) {
|
||||
if (top < 0) break; // done
|
||||
|
@ -205,35 +206,6 @@ struct FindCollision {
|
|||
}
|
||||
};
|
||||
|
||||
template <const bool inverted>
|
||||
struct SeqCollisionRecorder {
|
||||
SparseIndices& queryTri_;
|
||||
inline void record(int queryIdx, int leafIdx, SparseIndices& ind) const {
|
||||
if (inverted)
|
||||
ind.Add(leafIdx, queryIdx);
|
||||
else
|
||||
ind.Add(queryIdx, leafIdx);
|
||||
}
|
||||
SparseIndices& local() { return queryTri_; }
|
||||
};
|
||||
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
template <const bool inverted>
|
||||
struct ParCollisionRecorder {
|
||||
tbb::combinable<SparseIndices>& store;
|
||||
inline void record(int queryIdx, int leafIdx, SparseIndices& ind) const {
|
||||
// Add may invoke something in parallel, and it may return in
|
||||
// another thread, making thread local unsafe
|
||||
// we need to explicitly forbid parallelization by passing a flag
|
||||
if (inverted)
|
||||
ind.Add(leafIdx, queryIdx, true);
|
||||
else
|
||||
ind.Add(queryIdx, leafIdx, true);
|
||||
}
|
||||
SparseIndices& local() { return store.local(); }
|
||||
};
|
||||
#endif
|
||||
|
||||
struct BuildInternalBoxes {
|
||||
VecView<Box> nodeBBox_;
|
||||
VecView<int> counter_;
|
||||
|
@ -266,6 +238,22 @@ constexpr inline uint32_t SpreadBits3(uint32_t v) {
|
|||
}
|
||||
} // namespace collider_internal
|
||||
|
||||
template <typename F>
|
||||
struct SimpleRecorder {
|
||||
using Local = F;
|
||||
F& f;
|
||||
|
||||
inline void record(int queryIdx, int leafIdx, F& f) const {
|
||||
f(queryIdx, leafIdx);
|
||||
}
|
||||
Local& local() { return f; }
|
||||
};
|
||||
|
||||
template <typename F>
|
||||
inline SimpleRecorder<F> MakeSimpleRecorder(F& f) {
|
||||
return SimpleRecorder<F>{f};
|
||||
}
|
||||
|
||||
/** @ingroup Private */
|
||||
class Collider {
|
||||
public:
|
||||
|
@ -278,7 +266,7 @@ class Collider {
|
|||
"vectors must be the same length");
|
||||
int num_nodes = 2 * leafBB.size() - 1;
|
||||
// assign and allocate members
|
||||
nodeBBox_.resize(num_nodes);
|
||||
nodeBBox_.resize_nofill(num_nodes);
|
||||
nodeParent_.resize(num_nodes, -1);
|
||||
internalChildren_.resize(leafBB.size() - 1, std::make_pair(-1, -1));
|
||||
// organize tree
|
||||
|
@ -321,40 +309,27 @@ class Collider {
|
|||
{nodeBBox_, counter, nodeParent_, internalChildren_}));
|
||||
}
|
||||
|
||||
template <const bool selfCollision = false, const bool inverted = false,
|
||||
typename T>
|
||||
void Collisions(const VecView<const T>& queriesIn,
|
||||
SparseIndices& queryTri) const {
|
||||
// This function iterates over queriesIn and calls recorder.record(queryIdx,
|
||||
// leafIdx, local) for each collision it found.
|
||||
// If selfCollisionl is true, it will skip the case where queryIdx == leafIdx.
|
||||
// The recorder should provide a local() method that returns a Recorder::Local
|
||||
// type, representing thread local storage. By default, recorder.record can
|
||||
// run in parallel and the thread local storage can be combined at the end.
|
||||
// If parallel is false, the function will run in sequential mode.
|
||||
//
|
||||
// If thread local storage is not needed, use SimpleRecorder.
|
||||
template <const bool selfCollision = false, typename T, typename Recorder>
|
||||
void Collisions(const VecView<const T>& queriesIn, Recorder& recorder,
|
||||
bool parallel = true) const {
|
||||
ZoneScoped;
|
||||
using collider_internal::FindCollision;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (queriesIn.size() > collider_internal::kSequentialThreshold) {
|
||||
tbb::combinable<SparseIndices> store;
|
||||
for_each_n(
|
||||
ExecutionPolicy::Par, countAt(0), queriesIn.size(),
|
||||
FindCollision<T, selfCollision,
|
||||
collider_internal::ParCollisionRecorder<inverted>>{
|
||||
queriesIn, nodeBBox_, internalChildren_, {store}});
|
||||
|
||||
std::vector<SparseIndices> tmp;
|
||||
store.combine_each(
|
||||
[&](SparseIndices& ind) { tmp.emplace_back(std::move(ind)); });
|
||||
queryTri.FromIndices(tmp);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
for_each_n(ExecutionPolicy::Seq, countAt(0), queriesIn.size(),
|
||||
FindCollision<T, selfCollision,
|
||||
collider_internal::SeqCollisionRecorder<inverted>>{
|
||||
queriesIn, nodeBBox_, internalChildren_, {queryTri}});
|
||||
}
|
||||
|
||||
template <const bool selfCollision = false, const bool inverted = false,
|
||||
typename T>
|
||||
SparseIndices Collisions(const VecView<const T>& queriesIn) const {
|
||||
SparseIndices result;
|
||||
Collisions<selfCollision, inverted, T>(queriesIn, result);
|
||||
return result;
|
||||
if (internalChildren_.empty()) return;
|
||||
for_each_n(parallel ? autoPolicy(queriesIn.size(),
|
||||
collider_internal::kSequentialThreshold)
|
||||
: ExecutionPolicy::Seq,
|
||||
countAt(0), queriesIn.size(),
|
||||
FindCollision<T, selfCollision, Recorder>{
|
||||
queriesIn, nodeBBox_, internalChildren_, recorder});
|
||||
}
|
||||
|
||||
static uint32_t MortonCode(vec3 position, Box bBox) {
|
||||
|
|
68
thirdparty/manifold/src/constructors.cpp
vendored
68
thirdparty/manifold/src/constructors.cpp
vendored
|
@ -12,10 +12,43 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "./csg_tree.h"
|
||||
#include "./impl.h"
|
||||
#include "./parallel.h"
|
||||
#include "csg_tree.h"
|
||||
#include "impl.h"
|
||||
#include "manifold/manifold.h"
|
||||
#include "manifold/polygon.h"
|
||||
#include "parallel.h"
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
||||
template <typename P, typename I>
|
||||
std::shared_ptr<Manifold::Impl> SmoothImpl(
|
||||
const MeshGLP<P, I>& meshGL,
|
||||
const std::vector<Smoothness>& sharpenedEdges) {
|
||||
DEBUG_ASSERT(meshGL.halfedgeTangent.empty(), std::runtime_error,
|
||||
"when supplying tangents, the normal constructor should be used "
|
||||
"rather than Smooth().");
|
||||
|
||||
MeshGLP<P, I> meshTmp = meshGL;
|
||||
meshTmp.faceID.resize(meshGL.NumTri());
|
||||
std::iota(meshTmp.faceID.begin(), meshTmp.faceID.end(), 0);
|
||||
|
||||
std::shared_ptr<Manifold::Impl> impl =
|
||||
std::make_shared<Manifold::Impl>(meshTmp);
|
||||
impl->CreateTangents(impl->UpdateSharpenedEdges(sharpenedEdges));
|
||||
// Restore the original faceID
|
||||
const size_t numTri = impl->NumTri();
|
||||
for (size_t i = 0; i < numTri; ++i) {
|
||||
if (meshGL.faceID.size() == numTri) {
|
||||
impl->meshRelation_.triRef[i].faceID =
|
||||
meshGL.faceID[impl->meshRelation_.triRef[i].faceID];
|
||||
} else {
|
||||
impl->meshRelation_.triRef[i].faceID = -1;
|
||||
}
|
||||
}
|
||||
return impl;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
namespace manifold {
|
||||
/**
|
||||
|
@ -48,13 +81,7 @@ namespace manifold {
|
|||
*/
|
||||
Manifold Manifold::Smooth(const MeshGL& meshGL,
|
||||
const std::vector<Smoothness>& sharpenedEdges) {
|
||||
DEBUG_ASSERT(meshGL.halfedgeTangent.empty(), std::runtime_error,
|
||||
"when supplying tangents, the normal constructor should be used "
|
||||
"rather than Smooth().");
|
||||
|
||||
std::shared_ptr<Impl> impl = std::make_shared<Impl>(meshGL);
|
||||
impl->CreateTangents(impl->UpdateSharpenedEdges(sharpenedEdges));
|
||||
return Manifold(impl);
|
||||
return Manifold(SmoothImpl(meshGL, sharpenedEdges));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -87,13 +114,7 @@ Manifold Manifold::Smooth(const MeshGL& meshGL,
|
|||
*/
|
||||
Manifold Manifold::Smooth(const MeshGL64& meshGL64,
|
||||
const std::vector<Smoothness>& sharpenedEdges) {
|
||||
DEBUG_ASSERT(meshGL64.halfedgeTangent.empty(), std::runtime_error,
|
||||
"when supplying tangents, the normal constructor should be used "
|
||||
"rather than Smooth().");
|
||||
|
||||
std::shared_ptr<Impl> impl = std::make_shared<Impl>(meshGL64);
|
||||
impl->CreateTangents(impl->UpdateSharpenedEdges(sharpenedEdges));
|
||||
return Manifold(impl);
|
||||
return Manifold(SmoothImpl(meshGL64, sharpenedEdges));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -173,8 +194,7 @@ Manifold Manifold::Sphere(double radius, int circularSegments) {
|
|||
int n = circularSegments > 0 ? (circularSegments + 3) / 4
|
||||
: Quality::GetCircularSegments(radius) / 4;
|
||||
auto pImpl_ = std::make_shared<Impl>(Impl::Shape::Octahedron);
|
||||
pImpl_->Subdivide(
|
||||
[n](vec3 edge, vec4 tangentStart, vec4 tangentEnd) { return n - 1; });
|
||||
pImpl_->Subdivide([n](vec3, vec4, vec4) { return n - 1; });
|
||||
for_each_n(autoPolicy(pImpl_->NumVert(), 1e5), pImpl_->vertPos_.begin(),
|
||||
pImpl_->NumVert(), [radius](vec3& v) {
|
||||
v = la::cos(kHalfPi * (1.0 - v));
|
||||
|
@ -277,7 +297,7 @@ Manifold Manifold::Extrude(const Polygons& crossSection, double height,
|
|||
pImpl_->CreateHalfedges(triVertsDH);
|
||||
pImpl_->Finish();
|
||||
pImpl_->InitializeOriginal();
|
||||
pImpl_->CreateFaces();
|
||||
pImpl_->MarkCoplanar();
|
||||
return Manifold(pImpl_);
|
||||
}
|
||||
|
||||
|
@ -316,7 +336,7 @@ Manifold Manifold::Revolve(const Polygons& crossSection, int circularSegments,
|
|||
}
|
||||
const size_t next = i + 1 == poly.size() ? 0 : i + 1;
|
||||
if ((poly[next].x < 0) != (poly[i].x < 0)) {
|
||||
const double y = poly[next].y + poly[next].x *
|
||||
const double y = poly[next].y - poly[next].x *
|
||||
(poly[i].y - poly[next].y) /
|
||||
(poly[i].x - poly[next].x);
|
||||
polygons.back().push_back({0, y});
|
||||
|
@ -352,8 +372,8 @@ Manifold Manifold::Revolve(const Polygons& crossSection, int circularSegments,
|
|||
const int nSlices = isFullRevolution ? nDivisions : nDivisions + 1;
|
||||
|
||||
for (const auto& poly : polygons) {
|
||||
std::size_t nPosVerts = 0;
|
||||
std::size_t nRevolveAxisVerts = 0;
|
||||
size_t nPosVerts = 0;
|
||||
size_t nRevolveAxisVerts = 0;
|
||||
for (auto& pt : poly) {
|
||||
if (pt.x > 0) {
|
||||
nPosVerts++;
|
||||
|
@ -420,7 +440,7 @@ Manifold Manifold::Revolve(const Polygons& crossSection, int circularSegments,
|
|||
pImpl_->CreateHalfedges(triVertsDH);
|
||||
pImpl_->Finish();
|
||||
pImpl_->InitializeOriginal();
|
||||
pImpl_->CreateFaces();
|
||||
pImpl_->MarkCoplanar();
|
||||
return Manifold(pImpl_);
|
||||
}
|
||||
|
||||
|
|
|
@ -376,6 +376,16 @@ CrossSection CrossSection::BatchBoolean(
|
|||
return crossSections[0];
|
||||
|
||||
auto subjs = crossSections[0].GetPaths();
|
||||
|
||||
if (op == OpType::Intersect) {
|
||||
auto res = subjs->paths_;
|
||||
for (size_t i = 1; i < crossSections.size(); ++i) {
|
||||
res = C2::BooleanOp(C2::ClipType::Intersection, C2::FillRule::Positive,
|
||||
res, crossSections[i].GetPaths()->paths_, precision_);
|
||||
}
|
||||
return CrossSection(shared_paths(res));
|
||||
}
|
||||
|
||||
int n_clips = 0;
|
||||
for (size_t i = 1; i < crossSections.size(); ++i) {
|
||||
n_clips += crossSections[i].GetPaths()->paths_.size();
|
||||
|
@ -446,7 +456,8 @@ CrossSection& CrossSection::operator^=(const CrossSection& Q) {
|
|||
* Construct a CrossSection from a vector of other CrossSections (batch
|
||||
* boolean union).
|
||||
*/
|
||||
CrossSection CrossSection::Compose(std::vector<CrossSection>& crossSections) {
|
||||
CrossSection CrossSection::Compose(
|
||||
const std::vector<CrossSection>& crossSections) {
|
||||
return BatchBoolean(crossSections, OpType::Add);
|
||||
}
|
||||
|
||||
|
@ -518,9 +529,9 @@ CrossSection CrossSection::Scale(const vec2 scale) const {
|
|||
}
|
||||
|
||||
/**
|
||||
* Mirror this CrossSection over the arbitrary axis described by the unit form
|
||||
* of the given vector. If the length of the vector is zero, an empty
|
||||
* CrossSection is returned. This operation can be chained. Transforms are
|
||||
* Mirror this CrossSection over the arbitrary axis whose normal is described by
|
||||
* the unit form of the given vector. If the length of the vector is zero, an
|
||||
* empty CrossSection is returned. This operation can be chained. Transforms are
|
||||
* combined and applied lazily.
|
||||
*
|
||||
* @param ax the axis to be mirrored over
|
||||
|
@ -529,7 +540,7 @@ CrossSection CrossSection::Mirror(const vec2 ax) const {
|
|||
if (la::length(ax) == 0.) {
|
||||
return CrossSection();
|
||||
}
|
||||
auto n = la::normalize(la::abs(ax));
|
||||
auto n = la::normalize(ax);
|
||||
auto m = mat2x3(mat2(la::identity) - 2.0 * la::outerprod(n, n), vec2(0.0));
|
||||
return Transform(m);
|
||||
}
|
||||
|
@ -641,7 +652,7 @@ CrossSection CrossSection::Simplify(double epsilon) const {
|
|||
* to expand, and retraction of inner (hole) contours. Negative deltas will
|
||||
* have the opposite effect.
|
||||
* @param jointype The join type specifying the treatment of contour joins
|
||||
* (corners).
|
||||
* (corners). Defaults to Round.
|
||||
* @param miter_limit The maximum distance in multiples of delta that vertices
|
||||
* can be offset from their original positions with before squaring is
|
||||
* applied, <B>when the join type is Miter</B> (default is 2, which is the
|
||||
|
|
217
thirdparty/manifold/src/csg_tree.cpp
vendored
217
thirdparty/manifold/src/csg_tree.cpp
vendored
|
@ -12,27 +12,23 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#if (MANIFOLD_PAR == 1) && __has_include(<tbb/concurrent_priority_queue.h>)
|
||||
#if MANIFOLD_PAR == 1
|
||||
#include <tbb/tbb.h>
|
||||
#define TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS 1
|
||||
#include <tbb/concurrent_priority_queue.h>
|
||||
#endif
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "./boolean3.h"
|
||||
#include "./csg_tree.h"
|
||||
#include "./impl.h"
|
||||
#include "./mesh_fixes.h"
|
||||
#include "./parallel.h"
|
||||
|
||||
constexpr int kParallelThreshold = 4096;
|
||||
#include "boolean3.h"
|
||||
#include "csg_tree.h"
|
||||
#include "impl.h"
|
||||
#include "mesh_fixes.h"
|
||||
#include "parallel.h"
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
struct MeshCompare {
|
||||
bool operator()(const std::shared_ptr<CsgLeafNode> &a,
|
||||
const std::shared_ptr<CsgLeafNode> &b) {
|
||||
bool operator()(const std::shared_ptr<CsgLeafNode>& a,
|
||||
const std::shared_ptr<CsgLeafNode>& b) {
|
||||
return a->GetImpl()->NumVert() < b->GetImpl()->NumVert();
|
||||
}
|
||||
};
|
||||
|
@ -41,7 +37,7 @@ struct MeshCompare {
|
|||
namespace manifold {
|
||||
|
||||
std::shared_ptr<CsgNode> CsgNode::Boolean(
|
||||
const std::shared_ptr<CsgNode> &second, OpType op) {
|
||||
const std::shared_ptr<CsgNode>& second, OpType op) {
|
||||
if (second->GetNodeType() != CsgNodeType::Leaf) {
|
||||
// "this" is not a CsgOpNode (which overrides Boolean), but if "second" is
|
||||
// and the operation is commutative, we let it built the tree.
|
||||
|
@ -54,13 +50,13 @@ std::shared_ptr<CsgNode> CsgNode::Boolean(
|
|||
return std::make_shared<CsgOpNode>(children, op);
|
||||
}
|
||||
|
||||
std::shared_ptr<CsgNode> CsgNode::Translate(const vec3 &t) const {
|
||||
std::shared_ptr<CsgNode> CsgNode::Translate(const vec3& t) const {
|
||||
mat3x4 transform = la::identity;
|
||||
transform[3] += t;
|
||||
return Transform(transform);
|
||||
}
|
||||
|
||||
std::shared_ptr<CsgNode> CsgNode::Scale(const vec3 &v) const {
|
||||
std::shared_ptr<CsgNode> CsgNode::Scale(const vec3& v) const {
|
||||
mat3x4 transform;
|
||||
for (int i : {0, 1, 2}) transform[i][i] = v[i];
|
||||
return Transform(transform);
|
||||
|
@ -102,18 +98,18 @@ std::shared_ptr<CsgLeafNode> CsgLeafNode::ToLeafNode() const {
|
|||
return std::make_shared<CsgLeafNode>(*this);
|
||||
}
|
||||
|
||||
std::shared_ptr<CsgNode> CsgLeafNode::Transform(const mat3x4 &m) const {
|
||||
std::shared_ptr<CsgNode> CsgLeafNode::Transform(const mat3x4& m) const {
|
||||
return std::make_shared<CsgLeafNode>(pImpl_, m * Mat4(transform_));
|
||||
}
|
||||
|
||||
CsgNodeType CsgLeafNode::GetNodeType() const { return CsgNodeType::Leaf; }
|
||||
|
||||
std::shared_ptr<CsgLeafNode> ImplToLeaf(Manifold::Impl &&impl) {
|
||||
std::shared_ptr<CsgLeafNode> ImplToLeaf(Manifold::Impl&& impl) {
|
||||
return std::make_shared<CsgLeafNode>(std::make_shared<Manifold::Impl>(impl));
|
||||
}
|
||||
|
||||
std::shared_ptr<CsgLeafNode> SimpleBoolean(const Manifold::Impl &a,
|
||||
const Manifold::Impl &b, OpType op) {
|
||||
std::shared_ptr<CsgLeafNode> SimpleBoolean(const Manifold::Impl& a,
|
||||
const Manifold::Impl& b, OpType op) {
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
auto dump = [&]() {
|
||||
dump_lock.lock();
|
||||
|
@ -121,6 +117,7 @@ std::shared_ptr<CsgLeafNode> SimpleBoolean(const Manifold::Impl &a,
|
|||
<< std::endl;
|
||||
std::cout << "RHS self-intersecting: " << b.IsSelfIntersecting()
|
||||
<< std::endl;
|
||||
#ifdef MANIFOLD_EXPORT
|
||||
if (ManifoldParams().verbose) {
|
||||
if (op == OpType::Add)
|
||||
std::cout << "Add";
|
||||
|
@ -132,22 +129,23 @@ std::shared_ptr<CsgLeafNode> SimpleBoolean(const Manifold::Impl &a,
|
|||
std::cout << a;
|
||||
std::cout << b;
|
||||
}
|
||||
#endif
|
||||
dump_lock.unlock();
|
||||
};
|
||||
try {
|
||||
Boolean3 boolean(a, b, op);
|
||||
auto impl = boolean.Result(op);
|
||||
if (ManifoldParams().intermediateChecks && impl.IsSelfIntersecting()) {
|
||||
if (ManifoldParams().selfIntersectionChecks && impl.IsSelfIntersecting()) {
|
||||
dump_lock.lock();
|
||||
std::cout << "self intersections detected" << std::endl;
|
||||
dump_lock.unlock();
|
||||
throw logicErr("self intersection detected");
|
||||
}
|
||||
return ImplToLeaf(std::move(impl));
|
||||
} catch (logicErr &err) {
|
||||
} catch (logicErr& err) {
|
||||
dump();
|
||||
throw err;
|
||||
} catch (geometryErr &err) {
|
||||
} catch (geometryErr& err) {
|
||||
dump();
|
||||
throw err;
|
||||
}
|
||||
|
@ -160,7 +158,7 @@ std::shared_ptr<CsgLeafNode> SimpleBoolean(const Manifold::Impl &a,
|
|||
* Efficient union of a set of pairwise disjoint meshes.
|
||||
*/
|
||||
std::shared_ptr<CsgLeafNode> CsgLeafNode::Compose(
|
||||
const std::vector<std::shared_ptr<CsgLeafNode>> &nodes) {
|
||||
const std::vector<std::shared_ptr<CsgLeafNode>>& nodes) {
|
||||
ZoneScoped;
|
||||
double epsilon = -1;
|
||||
double tolerance = -1;
|
||||
|
@ -173,7 +171,7 @@ std::shared_ptr<CsgLeafNode> CsgLeafNode::Compose(
|
|||
std::vector<int> triIndices;
|
||||
std::vector<int> propVertIndices;
|
||||
int numPropOut = 0;
|
||||
for (auto &node : nodes) {
|
||||
for (auto& node : nodes) {
|
||||
if (node->pImpl_->status_ != Manifold::Error::NoError) {
|
||||
Manifold::Impl impl;
|
||||
impl.status_ = node->pImpl_->status_;
|
||||
|
@ -199,22 +197,20 @@ std::shared_ptr<CsgLeafNode> CsgLeafNode::Compose(
|
|||
const int numProp = node->pImpl_->NumProp();
|
||||
numPropOut = std::max(numPropOut, numProp);
|
||||
numPropVert +=
|
||||
numProp == 0 ? 1
|
||||
: node->pImpl_->meshRelation_.properties.size() / numProp;
|
||||
numProp == 0 ? 1 : node->pImpl_->properties_.size() / numProp;
|
||||
}
|
||||
|
||||
Manifold::Impl combined;
|
||||
combined.epsilon_ = epsilon;
|
||||
combined.tolerance_ = tolerance;
|
||||
combined.vertPos_.resize(numVert);
|
||||
combined.halfedge_.resize(2 * numEdge);
|
||||
combined.faceNormal_.resize(numTri);
|
||||
combined.vertPos_.resize_nofill(numVert);
|
||||
combined.halfedge_.resize_nofill(2 * numEdge);
|
||||
combined.faceNormal_.resize_nofill(numTri);
|
||||
combined.halfedgeTangent_.resize(2 * numEdge);
|
||||
combined.meshRelation_.triRef.resize(numTri);
|
||||
combined.meshRelation_.triRef.resize_nofill(numTri);
|
||||
if (numPropOut > 0) {
|
||||
combined.meshRelation_.numProp = numPropOut;
|
||||
combined.meshRelation_.properties.resize(numPropOut * numPropVert, 0);
|
||||
combined.meshRelation_.triProperties.resize(numTri);
|
||||
combined.numProp_ = numPropOut;
|
||||
combined.properties_.resize(numPropOut * numPropVert, 0);
|
||||
}
|
||||
auto policy = autoPolicy(numTri);
|
||||
|
||||
|
@ -228,50 +224,35 @@ std::shared_ptr<CsgLeafNode> CsgLeafNode::Compose(
|
|||
countAt(0), nodes.size(),
|
||||
[&nodes, &vertIndices, &edgeIndices, &triIndices, &propVertIndices,
|
||||
numPropOut, &combined, policy](int i) {
|
||||
auto &node = nodes[i];
|
||||
auto& node = nodes[i];
|
||||
copy(node->pImpl_->halfedgeTangent_.begin(),
|
||||
node->pImpl_->halfedgeTangent_.end(),
|
||||
combined.halfedgeTangent_.begin() + edgeIndices[i]);
|
||||
const int nextVert = vertIndices[i];
|
||||
const int nextEdge = edgeIndices[i];
|
||||
const int nextFace = triIndices[i];
|
||||
const int nextProp = propVertIndices[i];
|
||||
transform(node->pImpl_->halfedge_.begin(),
|
||||
node->pImpl_->halfedge_.end(),
|
||||
combined.halfedge_.begin() + edgeIndices[i],
|
||||
[nextVert, nextEdge, nextFace](Halfedge edge) {
|
||||
[nextVert, nextEdge, nextProp](Halfedge edge) {
|
||||
edge.startVert += nextVert;
|
||||
edge.endVert += nextVert;
|
||||
edge.pairedHalfedge += nextEdge;
|
||||
edge.propVert += nextProp;
|
||||
return edge;
|
||||
});
|
||||
|
||||
if (numPropOut > 0) {
|
||||
auto start =
|
||||
combined.meshRelation_.triProperties.begin() + triIndices[i];
|
||||
if (node->pImpl_->NumProp() > 0) {
|
||||
auto &triProp = node->pImpl_->meshRelation_.triProperties;
|
||||
const int nextProp = propVertIndices[i];
|
||||
transform(triProp.begin(), triProp.end(), start,
|
||||
[nextProp](ivec3 tri) {
|
||||
tri += nextProp;
|
||||
return tri;
|
||||
});
|
||||
|
||||
const int numProp = node->pImpl_->NumProp();
|
||||
auto &oldProp = node->pImpl_->meshRelation_.properties;
|
||||
auto &newProp = combined.meshRelation_.properties;
|
||||
for (int p = 0; p < numProp; ++p) {
|
||||
auto oldRange =
|
||||
StridedRange(oldProp.cbegin() + p, oldProp.cend(), numProp);
|
||||
auto newRange = StridedRange(
|
||||
newProp.begin() + numPropOut * propVertIndices[i] + p,
|
||||
newProp.end(), numPropOut);
|
||||
copy(oldRange.begin(), oldRange.end(), newRange.begin());
|
||||
}
|
||||
} else {
|
||||
// point all triangles at single new property of zeros.
|
||||
fill(start, start + node->pImpl_->NumTri(),
|
||||
ivec3(propVertIndices[i]));
|
||||
if (node->pImpl_->NumProp() > 0) {
|
||||
const int numProp = node->pImpl_->NumProp();
|
||||
auto& oldProp = node->pImpl_->properties_;
|
||||
auto& newProp = combined.properties_;
|
||||
for (int p = 0; p < numProp; ++p) {
|
||||
auto oldRange =
|
||||
StridedRange(oldProp.cbegin() + p, oldProp.cend(), numProp);
|
||||
auto newRange = StridedRange(
|
||||
newProp.begin() + numPropOut * propVertIndices[i] + p,
|
||||
newProp.end(), numPropOut);
|
||||
copy(oldRange.begin(), oldRange.end(), newRange.begin());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -323,16 +304,16 @@ std::shared_ptr<CsgLeafNode> CsgLeafNode::Compose(
|
|||
});
|
||||
|
||||
for (size_t i = 0; i < nodes.size(); i++) {
|
||||
auto &node = nodes[i];
|
||||
auto& node = nodes[i];
|
||||
const int offset = i * Manifold::Impl::meshIDCounter_;
|
||||
|
||||
for (const auto &pair : node->pImpl_->meshRelation_.meshIDtransform) {
|
||||
for (const auto& pair : node->pImpl_->meshRelation_.meshIDtransform) {
|
||||
combined.meshRelation_.meshIDtransform[pair.first + offset] = pair.second;
|
||||
}
|
||||
}
|
||||
|
||||
// required to remove parts that are smaller than the tolerance
|
||||
combined.SimplifyTopology();
|
||||
combined.RemoveDegenerates();
|
||||
combined.Finish();
|
||||
combined.IncrementMeshIDs();
|
||||
return ImplToLeaf(std::move(combined));
|
||||
|
@ -343,7 +324,7 @@ std::shared_ptr<CsgLeafNode> CsgLeafNode::Compose(
|
|||
* operation. Only supports union and intersection.
|
||||
*/
|
||||
std::shared_ptr<CsgLeafNode> BatchBoolean(
|
||||
OpType operation, std::vector<std::shared_ptr<CsgLeafNode>> &results) {
|
||||
OpType operation, std::vector<std::shared_ptr<CsgLeafNode>>& results) {
|
||||
ZoneScoped;
|
||||
DEBUG_ASSERT(operation != OpType::Subtract, logicErr,
|
||||
"BatchBoolean doesn't support Difference.");
|
||||
|
@ -353,50 +334,44 @@ std::shared_ptr<CsgLeafNode> BatchBoolean(
|
|||
if (results.size() == 2)
|
||||
return SimpleBoolean(*results[0]->GetImpl(), *results[1]->GetImpl(),
|
||||
operation);
|
||||
#if (MANIFOLD_PAR == 1) && __has_include(<tbb/tbb.h>)
|
||||
tbb::task_group group;
|
||||
tbb::concurrent_priority_queue<std::shared_ptr<CsgLeafNode>, MeshCompare>
|
||||
queue(results.size());
|
||||
for (auto result : results) {
|
||||
queue.emplace(result);
|
||||
}
|
||||
results.clear();
|
||||
std::function<void()> process = [&]() {
|
||||
while (queue.size() > 1) {
|
||||
std::shared_ptr<CsgLeafNode> a, b;
|
||||
if (!queue.try_pop(a)) continue;
|
||||
if (!queue.try_pop(b)) {
|
||||
queue.push(a);
|
||||
continue;
|
||||
}
|
||||
group.run([&, a, b]() {
|
||||
queue.emplace(SimpleBoolean(*a->GetImpl(), *b->GetImpl(), operation));
|
||||
return group.run(process);
|
||||
});
|
||||
}
|
||||
};
|
||||
group.run_and_wait(process);
|
||||
std::shared_ptr<CsgLeafNode> r;
|
||||
queue.try_pop(r);
|
||||
return r;
|
||||
#endif
|
||||
// apply boolean operations starting from smaller meshes
|
||||
// the assumption is that boolean operations on smaller meshes is faster,
|
||||
// due to less data being copied and processed
|
||||
auto cmpFn = MeshCompare();
|
||||
std::make_heap(results.begin(), results.end(), cmpFn);
|
||||
std::vector<std::shared_ptr<CsgLeafNode>> tmp;
|
||||
#if MANIFOLD_PAR == 1
|
||||
tbb::task_group group;
|
||||
std::mutex mutex;
|
||||
#endif
|
||||
while (results.size() > 1) {
|
||||
std::pop_heap(results.begin(), results.end(), cmpFn);
|
||||
auto a = std::move(results.back());
|
||||
results.pop_back();
|
||||
std::pop_heap(results.begin(), results.end(), cmpFn);
|
||||
auto b = std::move(results.back());
|
||||
results.pop_back();
|
||||
// boolean operation
|
||||
auto result = SimpleBoolean(*a->GetImpl(), *b->GetImpl(), operation);
|
||||
if (results.size() == 0) return result;
|
||||
results.push_back(result);
|
||||
std::push_heap(results.begin(), results.end(), cmpFn);
|
||||
for (size_t i = 0; i < 4 && results.size() > 1; i++) {
|
||||
std::pop_heap(results.begin(), results.end(), cmpFn);
|
||||
auto a = std::move(results.back());
|
||||
results.pop_back();
|
||||
std::pop_heap(results.begin(), results.end(), cmpFn);
|
||||
auto b = std::move(results.back());
|
||||
results.pop_back();
|
||||
#if MANIFOLD_PAR == 1
|
||||
group.run([&, a, b]() {
|
||||
auto result = SimpleBoolean(*a->GetImpl(), *b->GetImpl(), operation);
|
||||
mutex.lock();
|
||||
tmp.push_back(result);
|
||||
mutex.unlock();
|
||||
});
|
||||
#else
|
||||
auto result = SimpleBoolean(*a->GetImpl(), *b->GetImpl(), operation);
|
||||
tmp.push_back(result);
|
||||
#endif
|
||||
}
|
||||
#if MANIFOLD_PAR == 1
|
||||
group.wait();
|
||||
#endif
|
||||
for (auto result : tmp) {
|
||||
results.push_back(result);
|
||||
std::push_heap(results.begin(), results.end(), cmpFn);
|
||||
}
|
||||
tmp.clear();
|
||||
}
|
||||
return results.front();
|
||||
}
|
||||
|
@ -406,7 +381,7 @@ std::shared_ptr<CsgLeafNode> BatchBoolean(
|
|||
* possible.
|
||||
*/
|
||||
std::shared_ptr<CsgLeafNode> BatchUnion(
|
||||
std::vector<std::shared_ptr<CsgLeafNode>> &children) {
|
||||
std::vector<std::shared_ptr<CsgLeafNode>>& children) {
|
||||
ZoneScoped;
|
||||
// INVARIANT: children_ is a vector of leaf nodes
|
||||
// this kMaxUnionSize is a heuristic to avoid the pairwise disjoint check
|
||||
|
@ -429,7 +404,7 @@ std::shared_ptr<CsgLeafNode> BatchUnion(
|
|||
// each set contains a set of children that are pairwise disjoint
|
||||
std::vector<Vec<size_t>> disjointSets;
|
||||
for (size_t i = 0; i < boxes.size(); i++) {
|
||||
auto lambda = [&boxes, i](const Vec<size_t> &set) {
|
||||
auto lambda = [&boxes, i](const Vec<size_t>& set) {
|
||||
return std::find_if(set.begin(), set.end(), [&boxes, i](size_t j) {
|
||||
return boxes[i].DoesOverlap(boxes[j]);
|
||||
}) == set.end();
|
||||
|
@ -443,7 +418,7 @@ std::shared_ptr<CsgLeafNode> BatchUnion(
|
|||
}
|
||||
// compose each set of disjoint children
|
||||
std::vector<std::shared_ptr<CsgLeafNode>> impls;
|
||||
for (auto &set : disjointSets) {
|
||||
for (auto& set : disjointSets) {
|
||||
if (set.size() == 1) {
|
||||
impls.push_back(children[start + set[0]]);
|
||||
} else {
|
||||
|
@ -466,7 +441,7 @@ std::shared_ptr<CsgLeafNode> BatchUnion(
|
|||
|
||||
CsgOpNode::CsgOpNode() {}
|
||||
|
||||
CsgOpNode::CsgOpNode(const std::vector<std::shared_ptr<CsgNode>> &children,
|
||||
CsgOpNode::CsgOpNode(const std::vector<std::shared_ptr<CsgNode>>& children,
|
||||
OpType op)
|
||||
: impl_(children), op_(op) {}
|
||||
|
||||
|
@ -475,7 +450,7 @@ CsgOpNode::~CsgOpNode() {
|
|||
auto impl = impl_.GetGuard();
|
||||
std::vector<std::shared_ptr<CsgOpNode>> toProcess;
|
||||
auto handleChildren =
|
||||
[&toProcess](std::vector<std::shared_ptr<CsgNode>> &children) {
|
||||
[&toProcess](std::vector<std::shared_ptr<CsgNode>>& children) {
|
||||
while (!children.empty()) {
|
||||
// move out so shrinking the vector will not trigger recursive drop
|
||||
auto movedChild = std::move(children.back());
|
||||
|
@ -498,7 +473,7 @@ CsgOpNode::~CsgOpNode() {
|
|||
}
|
||||
|
||||
std::shared_ptr<CsgNode> CsgOpNode::Boolean(
|
||||
const std::shared_ptr<CsgNode> &second, OpType op) {
|
||||
const std::shared_ptr<CsgNode>& second, OpType op) {
|
||||
std::vector<std::shared_ptr<CsgNode>> children;
|
||||
children.push_back(shared_from_this());
|
||||
children.push_back(second);
|
||||
|
@ -506,7 +481,7 @@ std::shared_ptr<CsgNode> CsgOpNode::Boolean(
|
|||
return std::make_shared<CsgOpNode>(children, op);
|
||||
}
|
||||
|
||||
std::shared_ptr<CsgNode> CsgOpNode::Transform(const mat3x4 &m) const {
|
||||
std::shared_ptr<CsgNode> CsgOpNode::Transform(const mat3x4& m) const {
|
||||
auto node = std::make_shared<CsgOpNode>();
|
||||
node->impl_ = impl_;
|
||||
node->transform_ = m * Mat4(transform_);
|
||||
|
@ -520,14 +495,14 @@ struct CsgStackFrame {
|
|||
bool finalize;
|
||||
OpType parent_op;
|
||||
mat3x4 transform;
|
||||
Nodes *positive_dest;
|
||||
Nodes *negative_dest;
|
||||
Nodes* positive_dest;
|
||||
Nodes* negative_dest;
|
||||
std::shared_ptr<const CsgOpNode> op_node;
|
||||
Nodes positive_children;
|
||||
Nodes negative_children;
|
||||
|
||||
CsgStackFrame(bool finalize, OpType parent_op, mat3x4 transform,
|
||||
Nodes *positive_dest, Nodes *negative_dest,
|
||||
Nodes* positive_dest, Nodes* negative_dest,
|
||||
std::shared_ptr<const CsgOpNode> op_node)
|
||||
: finalize(finalize),
|
||||
parent_op(parent_op),
|
||||
|
@ -675,8 +650,8 @@ std::shared_ptr<CsgLeafNode> CsgOpNode::ToLeafNode() const {
|
|||
stack.pop_back();
|
||||
} else {
|
||||
auto add_children =
|
||||
[&stack](std::shared_ptr<CsgNode> &node, OpType op, mat3x4 transform,
|
||||
CsgStackFrame::Nodes *dest1, CsgStackFrame::Nodes *dest2) {
|
||||
[&stack](std::shared_ptr<CsgNode>& node, OpType op, mat3x4 transform,
|
||||
CsgStackFrame::Nodes* dest1, CsgStackFrame::Nodes* dest2) {
|
||||
if (node->GetNodeType() == CsgNodeType::Leaf)
|
||||
dest1->push_back(std::static_pointer_cast<CsgLeafNode>(
|
||||
node->Transform(transform)));
|
||||
|
@ -702,14 +677,14 @@ std::shared_ptr<CsgLeafNode> CsgOpNode::ToLeafNode() const {
|
|||
const mat3x4 transform =
|
||||
canCollapse ? (frame->transform * Mat4(frame->op_node->transform_))
|
||||
: la::identity;
|
||||
CsgStackFrame::Nodes *pos_dest =
|
||||
CsgStackFrame::Nodes* pos_dest =
|
||||
canCollapse ? frame->positive_dest : &frame->positive_children;
|
||||
CsgStackFrame::Nodes *neg_dest =
|
||||
CsgStackFrame::Nodes* neg_dest =
|
||||
canCollapse ? frame->negative_dest : &frame->negative_children;
|
||||
for (size_t i = 0; i < impl->size(); i++) {
|
||||
const bool negative = op == OpType::Subtract && i != 0;
|
||||
CsgStackFrame::Nodes *dest1 = negative ? neg_dest : pos_dest;
|
||||
CsgStackFrame::Nodes *dest2 =
|
||||
CsgStackFrame::Nodes* dest1 = negative ? neg_dest : pos_dest;
|
||||
CsgStackFrame::Nodes* dest2 =
|
||||
(op == OpType::Subtract && i == 0) ? neg_dest : nullptr;
|
||||
add_children((*impl)[i], negative ? OpType::Add : op, transform, dest1,
|
||||
dest2);
|
||||
|
|
20
thirdparty/manifold/src/csg_tree.h
vendored
20
thirdparty/manifold/src/csg_tree.h
vendored
|
@ -13,8 +13,8 @@
|
|||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
#include "./utils.h"
|
||||
#include "manifold/manifold.h"
|
||||
#include "utils.h"
|
||||
|
||||
namespace manifold {
|
||||
|
||||
|
@ -25,14 +25,14 @@ class CsgLeafNode;
|
|||
class CsgNode : public std::enable_shared_from_this<CsgNode> {
|
||||
public:
|
||||
virtual std::shared_ptr<CsgLeafNode> ToLeafNode() const = 0;
|
||||
virtual std::shared_ptr<CsgNode> Transform(const mat3x4 &m) const = 0;
|
||||
virtual std::shared_ptr<CsgNode> Transform(const mat3x4& m) const = 0;
|
||||
virtual CsgNodeType GetNodeType() const = 0;
|
||||
|
||||
virtual std::shared_ptr<CsgNode> Boolean(
|
||||
const std::shared_ptr<CsgNode> &second, OpType op);
|
||||
const std::shared_ptr<CsgNode>& second, OpType op);
|
||||
|
||||
std::shared_ptr<CsgNode> Translate(const vec3 &t) const;
|
||||
std::shared_ptr<CsgNode> Scale(const vec3 &s) const;
|
||||
std::shared_ptr<CsgNode> Translate(const vec3& t) const;
|
||||
std::shared_ptr<CsgNode> Scale(const vec3& s) const;
|
||||
std::shared_ptr<CsgNode> Rotate(double xDegrees = 0, double yDegrees = 0,
|
||||
double zDegrees = 0) const;
|
||||
};
|
||||
|
@ -47,12 +47,12 @@ class CsgLeafNode final : public CsgNode {
|
|||
|
||||
std::shared_ptr<CsgLeafNode> ToLeafNode() const override;
|
||||
|
||||
std::shared_ptr<CsgNode> Transform(const mat3x4 &m) const override;
|
||||
std::shared_ptr<CsgNode> Transform(const mat3x4& m) const override;
|
||||
|
||||
CsgNodeType GetNodeType() const override;
|
||||
|
||||
static std::shared_ptr<CsgLeafNode> Compose(
|
||||
const std::vector<std::shared_ptr<CsgLeafNode>> &nodes);
|
||||
const std::vector<std::shared_ptr<CsgLeafNode>>& nodes);
|
||||
|
||||
private:
|
||||
mutable std::shared_ptr<const Manifold::Impl> pImpl_;
|
||||
|
@ -63,12 +63,12 @@ class CsgOpNode final : public CsgNode {
|
|||
public:
|
||||
CsgOpNode();
|
||||
|
||||
CsgOpNode(const std::vector<std::shared_ptr<CsgNode>> &children, OpType op);
|
||||
CsgOpNode(const std::vector<std::shared_ptr<CsgNode>>& children, OpType op);
|
||||
|
||||
std::shared_ptr<CsgNode> Boolean(const std::shared_ptr<CsgNode> &second,
|
||||
std::shared_ptr<CsgNode> Boolean(const std::shared_ptr<CsgNode>& second,
|
||||
OpType op) override;
|
||||
|
||||
std::shared_ptr<CsgNode> Transform(const mat3x4 &m) const override;
|
||||
std::shared_ptr<CsgNode> Transform(const mat3x4& m) const override;
|
||||
|
||||
std::shared_ptr<CsgLeafNode> ToLeafNode() const override;
|
||||
|
||||
|
|
639
thirdparty/manifold/src/edge_op.cpp
vendored
639
thirdparty/manifold/src/edge_op.cpp
vendored
|
@ -12,8 +12,11 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "./impl.h"
|
||||
#include "./parallel.h"
|
||||
#include <unordered_map>
|
||||
|
||||
#include "impl.h"
|
||||
#include "parallel.h"
|
||||
#include "shared.h"
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
@ -47,33 +50,46 @@ struct DuplicateEdge {
|
|||
struct ShortEdge {
|
||||
VecView<const Halfedge> halfedge;
|
||||
VecView<const vec3> vertPos;
|
||||
const double tolerance;
|
||||
const double epsilon;
|
||||
const int firstNewVert;
|
||||
|
||||
bool operator()(int edge) const {
|
||||
if (halfedge[edge].pairedHalfedge < 0) return false;
|
||||
const Halfedge& half = halfedge[edge];
|
||||
if (half.pairedHalfedge < 0 ||
|
||||
(half.startVert < firstNewVert && half.endVert < firstNewVert))
|
||||
return false;
|
||||
// Flag short edges
|
||||
const vec3 delta =
|
||||
vertPos[halfedge[edge].endVert] - vertPos[halfedge[edge].startVert];
|
||||
return la::dot(delta, delta) < tolerance * tolerance;
|
||||
const vec3 delta = vertPos[half.endVert] - vertPos[half.startVert];
|
||||
return la::dot(delta, delta) < epsilon * epsilon;
|
||||
}
|
||||
};
|
||||
|
||||
struct FlagEdge {
|
||||
VecView<const Halfedge> halfedge;
|
||||
VecView<const TriRef> triRef;
|
||||
const int firstNewVert;
|
||||
|
||||
bool operator()(int edge) const {
|
||||
if (halfedge[edge].pairedHalfedge < 0) return false;
|
||||
const Halfedge& half = halfedge[edge];
|
||||
if (half.pairedHalfedge < 0 || half.startVert < firstNewVert) return false;
|
||||
// Flag redundant edges - those where the startVert is surrounded by only
|
||||
// two original triangles.
|
||||
const TriRef ref0 = triRef[edge / 3];
|
||||
int current = NextHalfedge(halfedge[edge].pairedHalfedge);
|
||||
const TriRef ref1 = triRef[current / 3];
|
||||
int current = NextHalfedge(half.pairedHalfedge);
|
||||
TriRef ref1 = triRef[current / 3];
|
||||
bool ref1Updated = !ref0.SameFace(ref1);
|
||||
while (current != edge) {
|
||||
current = NextHalfedge(halfedge[current].pairedHalfedge);
|
||||
int tri = current / 3;
|
||||
const TriRef ref = triRef[tri];
|
||||
if (!ref.SameFace(ref0) && !ref.SameFace(ref1)) return false;
|
||||
if (!ref.SameFace(ref0) && !ref.SameFace(ref1)) {
|
||||
if (!ref1Updated) {
|
||||
ref1 = ref;
|
||||
ref1Updated = true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -84,9 +100,15 @@ struct SwappableEdge {
|
|||
VecView<const vec3> vertPos;
|
||||
VecView<const vec3> triNormal;
|
||||
const double tolerance;
|
||||
const int firstNewVert;
|
||||
|
||||
bool operator()(int edge) const {
|
||||
if (halfedge[edge].pairedHalfedge < 0) return false;
|
||||
const Halfedge& half = halfedge[edge];
|
||||
if (half.pairedHalfedge < 0) return false;
|
||||
if (half.startVert < firstNewVert && half.endVert < firstNewVert &&
|
||||
halfedge[NextHalfedge(edge)].endVert < firstNewVert &&
|
||||
halfedge[NextHalfedge(half.pairedHalfedge)].endVert < firstNewVert)
|
||||
return false;
|
||||
|
||||
int tri = edge / 3;
|
||||
ivec3 triEdge = TriOf(edge);
|
||||
|
@ -98,7 +120,7 @@ struct SwappableEdge {
|
|||
return false;
|
||||
|
||||
// Switch to neighbor's projection.
|
||||
edge = halfedge[edge].pairedHalfedge;
|
||||
edge = half.pairedHalfedge;
|
||||
tri = edge / 3;
|
||||
triEdge = TriOf(edge);
|
||||
projection = GetAxisAlignedProjection(triNormal[tri]);
|
||||
|
@ -109,14 +131,67 @@ struct SwappableEdge {
|
|||
}
|
||||
};
|
||||
|
||||
struct SortEntry {
|
||||
int start;
|
||||
int end;
|
||||
size_t index;
|
||||
inline bool operator<(const SortEntry& other) const {
|
||||
return start == other.start ? end < other.end : start < other.start;
|
||||
struct FlagStore {
|
||||
#if MANIFOLD_PAR == 1
|
||||
tbb::combinable<std::vector<size_t>> store;
|
||||
#endif
|
||||
std::vector<size_t> s;
|
||||
|
||||
template <typename Pred, typename F>
|
||||
void run_seq(size_t n, Pred pred, F f) {
|
||||
for (size_t i = 0; i < n; ++i)
|
||||
if (pred(i)) s.push_back(i);
|
||||
for (size_t i : s) f(i);
|
||||
s.clear();
|
||||
}
|
||||
|
||||
#if MANIFOLD_PAR == 1
|
||||
template <typename Pred, typename F>
|
||||
void run_par(size_t n, Pred pred, F f) {
|
||||
// Test pred in parallel, store i into thread-local vectors when pred(i) is
|
||||
// true. After testing pred, iterate and call f over the indices in
|
||||
// ascending order by using a heap in a single thread
|
||||
auto& store = this->store;
|
||||
tbb::parallel_for(tbb::blocked_range<size_t>(0, n),
|
||||
[&store, &pred](const auto& r) {
|
||||
auto& local = store.local();
|
||||
for (auto i = r.begin(); i < r.end(); ++i) {
|
||||
if (pred(i)) local.push_back(i);
|
||||
}
|
||||
});
|
||||
|
||||
std::vector<std::vector<size_t>> stores;
|
||||
std::vector<size_t> result;
|
||||
store.combine_each(
|
||||
[&](auto& data) { stores.emplace_back(std::move(data)); });
|
||||
std::vector<size_t> sizes;
|
||||
size_t total_size = 0;
|
||||
for (const auto& tmp : stores) {
|
||||
sizes.push_back(total_size);
|
||||
total_size += tmp.size();
|
||||
}
|
||||
result.resize(total_size);
|
||||
for_each_n(ExecutionPolicy::Seq, countAt(0), stores.size(), [&](size_t i) {
|
||||
std::copy(stores[i].begin(), stores[i].end(), result.begin() + sizes[i]);
|
||||
});
|
||||
stable_sort(autoPolicy(result.size()), result.begin(), result.end());
|
||||
for (size_t x : result) f(x);
|
||||
}
|
||||
#endif
|
||||
|
||||
template <typename Pred, typename F>
|
||||
void run(size_t n, Pred pred, F f) {
|
||||
#if MANIFOLD_PAR == 1
|
||||
if (n > 1e5) {
|
||||
run_par(n, pred, f);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
run_seq(n, pred, f);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace manifold {
|
||||
|
@ -131,41 +206,7 @@ void Manifold::Impl::CleanupTopology() {
|
|||
// In the case of a very bad triangulation, it is possible to create pinched
|
||||
// verts. They must be removed before edge collapse.
|
||||
SplitPinchedVerts();
|
||||
|
||||
while (1) {
|
||||
ZoneScopedN("DedupeEdge");
|
||||
|
||||
const size_t nbEdges = halfedge_.size();
|
||||
size_t numFlagged = 0;
|
||||
|
||||
Vec<SortEntry> entries;
|
||||
entries.reserve(nbEdges / 2);
|
||||
for (size_t i = 0; i < nbEdges; ++i) {
|
||||
if (halfedge_[i].IsForward()) {
|
||||
entries.push_back({halfedge_[i].startVert, halfedge_[i].endVert, i});
|
||||
}
|
||||
}
|
||||
|
||||
stable_sort(entries.begin(), entries.end());
|
||||
for (size_t i = 0; i < entries.size() - 1; ++i) {
|
||||
const int h0 = entries[i].index;
|
||||
const int h1 = entries[i + 1].index;
|
||||
if (halfedge_[h0].startVert == halfedge_[h1].startVert &&
|
||||
halfedge_[h0].endVert == halfedge_[h1].endVert) {
|
||||
DedupeEdge(entries[i].index);
|
||||
numFlagged++;
|
||||
}
|
||||
}
|
||||
|
||||
if (numFlagged == 0) break;
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (ManifoldParams().verbose) {
|
||||
std::cout << "found " << numFlagged << " duplicate edges to split"
|
||||
<< std::endl;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
DedupeEdges();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -184,95 +225,112 @@ void Manifold::Impl::CleanupTopology() {
|
|||
* meshes, thus decreasing the Genus(). It only increases when meshes that have
|
||||
* collapsed to just a pair of triangles are removed entirely.
|
||||
*
|
||||
* Verts with index less than firstNewVert will be left uncollapsed. This is
|
||||
* zero by default so that everything can be collapsed.
|
||||
*
|
||||
* Rather than actually removing the edges, this step merely marks them for
|
||||
* removal, by setting vertPos to NaN and halfedge to {-1, -1, -1, -1}.
|
||||
*/
|
||||
void Manifold::Impl::SimplifyTopology() {
|
||||
void Manifold::Impl::SimplifyTopology(int firstNewVert) {
|
||||
if (!halfedge_.size()) return;
|
||||
|
||||
CleanupTopology();
|
||||
CollapseShortEdges(firstNewVert);
|
||||
CollapseColinearEdges(firstNewVert);
|
||||
SwapDegenerates(firstNewVert);
|
||||
}
|
||||
|
||||
if (!ManifoldParams().cleanupTriangles) {
|
||||
return;
|
||||
}
|
||||
void Manifold::Impl::RemoveDegenerates(int firstNewVert) {
|
||||
if (!halfedge_.size()) return;
|
||||
|
||||
const size_t nbEdges = halfedge_.size();
|
||||
auto policy = autoPolicy(nbEdges, 1e5);
|
||||
CleanupTopology();
|
||||
CollapseShortEdges(firstNewVert);
|
||||
SwapDegenerates(firstNewVert);
|
||||
}
|
||||
|
||||
void Manifold::Impl::CollapseShortEdges(int firstNewVert) {
|
||||
ZoneScopedN("CollapseShortEdge");
|
||||
FlagStore s;
|
||||
size_t numFlagged = 0;
|
||||
Vec<uint8_t> bFlags(nbEdges);
|
||||
const size_t nbEdges = halfedge_.size();
|
||||
|
||||
std::vector<int> scratchBuffer;
|
||||
scratchBuffer.reserve(10);
|
||||
{
|
||||
ZoneScopedN("CollapseShortEdge");
|
||||
numFlagged = 0;
|
||||
ShortEdge se{halfedge_, vertPos_, epsilon_};
|
||||
for_each_n(policy, countAt(0_uz), nbEdges,
|
||||
[&](size_t i) { bFlags[i] = se(i); });
|
||||
for (size_t i = 0; i < nbEdges; ++i) {
|
||||
if (bFlags[i]) {
|
||||
CollapseEdge(i, scratchBuffer);
|
||||
scratchBuffer.resize(0);
|
||||
numFlagged++;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Short edges get to skip several checks and hence remove more classes of
|
||||
// degenerate triangles than flagged edges do, but this could in theory lead
|
||||
// to error stacking where a vertex moves too far. For this reason this is
|
||||
// restricted to epsilon, rather than tolerance.
|
||||
ShortEdge se{halfedge_, vertPos_, epsilon_, firstNewVert};
|
||||
s.run(nbEdges, se, [&](size_t i) {
|
||||
const bool didCollapse = CollapseEdge(i, scratchBuffer);
|
||||
if (didCollapse) numFlagged++;
|
||||
scratchBuffer.resize(0);
|
||||
});
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (ManifoldParams().verbose && numFlagged > 0) {
|
||||
std::cout << "found " << numFlagged << " short edges to collapse"
|
||||
<< std::endl;
|
||||
if (ManifoldParams().verbose > 0 && numFlagged > 0) {
|
||||
std::cout << "collapsed " << numFlagged << " short edges" << std::endl;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
{
|
||||
void Manifold::Impl::CollapseColinearEdges(int firstNewVert) {
|
||||
FlagStore s;
|
||||
size_t numFlagged = 0;
|
||||
const size_t nbEdges = halfedge_.size();
|
||||
std::vector<int> scratchBuffer;
|
||||
scratchBuffer.reserve(10);
|
||||
while (1) {
|
||||
ZoneScopedN("CollapseFlaggedEdge");
|
||||
numFlagged = 0;
|
||||
FlagEdge se{halfedge_, meshRelation_.triRef};
|
||||
for_each_n(policy, countAt(0_uz), nbEdges,
|
||||
[&](size_t i) { bFlags[i] = se(i); });
|
||||
for (size_t i = 0; i < nbEdges; ++i) {
|
||||
if (bFlags[i]) {
|
||||
CollapseEdge(i, scratchBuffer);
|
||||
scratchBuffer.resize(0);
|
||||
numFlagged++;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Collapse colinear edges, but only remove new verts, i.e. verts with
|
||||
// index
|
||||
// >= firstNewVert. This is used to keep the Boolean from changing the
|
||||
// non-intersecting parts of the input meshes. Colinear is defined not by a
|
||||
// local check, but by the global MarkCoplanar function, which keeps this
|
||||
// from being vulnerable to error stacking.
|
||||
FlagEdge se{halfedge_, meshRelation_.triRef, firstNewVert};
|
||||
s.run(nbEdges, se, [&](size_t i) {
|
||||
const bool didCollapse = CollapseEdge(i, scratchBuffer);
|
||||
if (didCollapse) numFlagged++;
|
||||
scratchBuffer.resize(0);
|
||||
});
|
||||
if (numFlagged == 0) break;
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (ManifoldParams().verbose && numFlagged > 0) {
|
||||
std::cout << "found " << numFlagged << " colinear edges to collapse"
|
||||
<< std::endl;
|
||||
}
|
||||
if (ManifoldParams().verbose > 0 && numFlagged > 0) {
|
||||
std::cout << "collapsed " << numFlagged << " colinear edges" << std::endl;
|
||||
}
|
||||
#endif
|
||||
|
||||
{
|
||||
ZoneScopedN("RecursiveEdgeSwap");
|
||||
numFlagged = 0;
|
||||
SwappableEdge se{halfedge_, vertPos_, faceNormal_, tolerance_};
|
||||
for_each_n(policy, countAt(0_uz), nbEdges,
|
||||
[&](size_t i) { bFlags[i] = se(i); });
|
||||
std::vector<int> edgeSwapStack;
|
||||
std::vector<int> visited(halfedge_.size(), -1);
|
||||
int tag = 0;
|
||||
for (size_t i = 0; i < nbEdges; ++i) {
|
||||
if (bFlags[i]) {
|
||||
numFlagged++;
|
||||
tag++;
|
||||
RecursiveEdgeSwap(i, tag, visited, edgeSwapStack, scratchBuffer);
|
||||
while (!edgeSwapStack.empty()) {
|
||||
int last = edgeSwapStack.back();
|
||||
edgeSwapStack.pop_back();
|
||||
RecursiveEdgeSwap(last, tag, visited, edgeSwapStack, scratchBuffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Manifold::Impl::SwapDegenerates(int firstNewVert) {
|
||||
ZoneScopedN("RecursiveEdgeSwap");
|
||||
FlagStore s;
|
||||
size_t numFlagged = 0;
|
||||
const size_t nbEdges = halfedge_.size();
|
||||
std::vector<int> scratchBuffer;
|
||||
scratchBuffer.reserve(10);
|
||||
|
||||
SwappableEdge se{halfedge_, vertPos_, faceNormal_, tolerance_, firstNewVert};
|
||||
std::vector<int> edgeSwapStack;
|
||||
std::vector<int> visited(halfedge_.size(), -1);
|
||||
int tag = 0;
|
||||
s.run(nbEdges, se, [&](size_t i) {
|
||||
numFlagged++;
|
||||
tag++;
|
||||
RecursiveEdgeSwap(i, tag, visited, edgeSwapStack, scratchBuffer);
|
||||
while (!edgeSwapStack.empty()) {
|
||||
int last = edgeSwapStack.back();
|
||||
edgeSwapStack.pop_back();
|
||||
RecursiveEdgeSwap(last, tag, visited, edgeSwapStack, scratchBuffer);
|
||||
}
|
||||
});
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (ManifoldParams().verbose && numFlagged > 0) {
|
||||
std::cout << "found " << numFlagged << " edges to swap" << std::endl;
|
||||
if (ManifoldParams().verbose > 0 && numFlagged > 0) {
|
||||
std::cout << "swapped " << numFlagged << " edges" << std::endl;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -283,6 +341,7 @@ void Manifold::Impl::DedupeEdge(const int edge) {
|
|||
// Orbit endVert
|
||||
const int startVert = halfedge_[edge].startVert;
|
||||
const int endVert = halfedge_[edge].endVert;
|
||||
const int endProp = halfedge_[NextHalfedge(edge)].propVert;
|
||||
int current = halfedge_[NextHalfedge(edge)].pairedHalfedge;
|
||||
while (current != edge) {
|
||||
const int vert = halfedge_[current].startVert;
|
||||
|
@ -297,36 +356,30 @@ void Manifold::Impl::DedupeEdge(const int edge) {
|
|||
UpdateVert(newVert, current, opposite);
|
||||
|
||||
int newHalfedge = halfedge_.size();
|
||||
int newFace = newHalfedge / 3;
|
||||
int oldFace = current / 3;
|
||||
int outsideVert = halfedge_[current].startVert;
|
||||
halfedge_.push_back({endVert, newVert, -1});
|
||||
halfedge_.push_back({newVert, outsideVert, -1});
|
||||
halfedge_.push_back({outsideVert, endVert, -1});
|
||||
halfedge_.push_back({endVert, newVert, -1, endProp});
|
||||
halfedge_.push_back({newVert, outsideVert, -1, endProp});
|
||||
halfedge_.push_back(
|
||||
{outsideVert, endVert, -1, halfedge_[current].propVert});
|
||||
PairUp(newHalfedge + 2, halfedge_[current].pairedHalfedge);
|
||||
PairUp(newHalfedge + 1, current);
|
||||
if (meshRelation_.triRef.size() > 0)
|
||||
meshRelation_.triRef.push_back(meshRelation_.triRef[oldFace]);
|
||||
if (meshRelation_.triProperties.size() > 0)
|
||||
meshRelation_.triProperties.push_back(
|
||||
meshRelation_.triProperties[oldFace]);
|
||||
if (faceNormal_.size() > 0) faceNormal_.push_back(faceNormal_[oldFace]);
|
||||
|
||||
newHalfedge += 3;
|
||||
++newFace;
|
||||
oldFace = opposite / 3;
|
||||
outsideVert = halfedge_[opposite].startVert;
|
||||
halfedge_.push_back({newVert, endVert, -1});
|
||||
halfedge_.push_back({endVert, outsideVert, -1});
|
||||
halfedge_.push_back({outsideVert, newVert, -1});
|
||||
halfedge_.push_back({newVert, endVert, -1, endProp}); // fix prop
|
||||
halfedge_.push_back({endVert, outsideVert, -1, endProp});
|
||||
halfedge_.push_back(
|
||||
{outsideVert, newVert, -1, halfedge_[opposite].propVert});
|
||||
PairUp(newHalfedge + 2, halfedge_[opposite].pairedHalfedge);
|
||||
PairUp(newHalfedge + 1, opposite);
|
||||
PairUp(newHalfedge, newHalfedge - 3);
|
||||
if (meshRelation_.triRef.size() > 0)
|
||||
meshRelation_.triRef.push_back(meshRelation_.triRef[oldFace]);
|
||||
if (meshRelation_.triProperties.size() > 0)
|
||||
meshRelation_.triProperties.push_back(
|
||||
meshRelation_.triProperties[oldFace]);
|
||||
if (faceNormal_.size() > 0) faceNormal_.push_back(faceNormal_[oldFace]);
|
||||
|
||||
break;
|
||||
|
@ -420,7 +473,7 @@ void Manifold::Impl::CollapseTri(const ivec3& triEdge) {
|
|||
halfedge_[pair1].pairedHalfedge = pair2;
|
||||
halfedge_[pair2].pairedHalfedge = pair1;
|
||||
for (int i : {0, 1, 2}) {
|
||||
halfedge_[triEdge[i]] = {-1, -1, -1};
|
||||
halfedge_[triEdge[i]] = {-1, -1, -1, halfedge_[triEdge[i]].propVert};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -452,16 +505,16 @@ void Manifold::Impl::RemoveIfFolded(int edge) {
|
|||
}
|
||||
}
|
||||
|
||||
// Collapses the given edge by removing startVert. May split the mesh
|
||||
// topologically if the collapse would have resulted in a 4-manifold edge. Do
|
||||
// not collapse an edge if startVert is pinched - the vert will be marked NaN,
|
||||
// but other edges may still be pointing to it.
|
||||
void Manifold::Impl::CollapseEdge(const int edge, std::vector<int>& edges) {
|
||||
// Collapses the given edge by removing startVert - returns false if the edge
|
||||
// cannot be collapsed. May split the mesh topologically if the collapse would
|
||||
// have resulted in a 4-manifold edge. Do not collapse an edge if startVert is
|
||||
// pinched - the vert would be marked NaN, but other edges could still be
|
||||
// pointing to it.
|
||||
bool Manifold::Impl::CollapseEdge(const int edge, std::vector<int>& edges) {
|
||||
Vec<TriRef>& triRef = meshRelation_.triRef;
|
||||
Vec<ivec3>& triProp = meshRelation_.triProperties;
|
||||
|
||||
const Halfedge toRemove = halfedge_[edge];
|
||||
if (toRemove.pairedHalfedge < 0) return;
|
||||
if (toRemove.pairedHalfedge < 0) return false;
|
||||
|
||||
const int endVert = toRemove.endVert;
|
||||
const ivec3 tri0edge = TriOf(edge);
|
||||
|
@ -470,23 +523,16 @@ void Manifold::Impl::CollapseEdge(const int edge, std::vector<int>& edges) {
|
|||
const vec3 pNew = vertPos_[endVert];
|
||||
const vec3 pOld = vertPos_[toRemove.startVert];
|
||||
const vec3 delta = pNew - pOld;
|
||||
const bool shortEdge = la::dot(delta, delta) < tolerance_ * tolerance_;
|
||||
|
||||
// Orbit endVert
|
||||
int current = halfedge_[tri0edge[1]].pairedHalfedge;
|
||||
while (current != tri1edge[2]) {
|
||||
current = NextHalfedge(current);
|
||||
edges.push_back(current);
|
||||
current = halfedge_[current].pairedHalfedge;
|
||||
}
|
||||
const bool shortEdge = la::dot(delta, delta) < epsilon_ * epsilon_;
|
||||
|
||||
// Orbit startVert
|
||||
int start = halfedge_[tri1edge[1]].pairedHalfedge;
|
||||
int current = tri1edge[2];
|
||||
if (!shortEdge) {
|
||||
current = start;
|
||||
TriRef refCheck = triRef[toRemove.pairedHalfedge / 3];
|
||||
vec3 pLast = vertPos_[halfedge_[tri1edge[1]].endVert];
|
||||
while (current != tri0edge[2]) {
|
||||
while (current != tri1edge[0]) {
|
||||
current = NextHalfedge(current);
|
||||
vec3 pNext = vertPos_[halfedge_[current].endVert];
|
||||
const int tri = current / 3;
|
||||
|
@ -495,28 +541,43 @@ void Manifold::Impl::CollapseEdge(const int edge, std::vector<int>& edges) {
|
|||
// Don't collapse if the edge is not redundant (this may have changed due
|
||||
// to the collapse of neighbors).
|
||||
if (!ref.SameFace(refCheck)) {
|
||||
const TriRef oldRef = refCheck;
|
||||
refCheck = triRef[edge / 3];
|
||||
if (!ref.SameFace(refCheck)) {
|
||||
return;
|
||||
} else {
|
||||
// Don't collapse if the edges separating the faces are not colinear
|
||||
// (can happen when the two faces are coplanar).
|
||||
if (CCW(projection * pOld, projection * pLast, projection * pNew,
|
||||
return false;
|
||||
}
|
||||
if (ref.meshID != oldRef.meshID || ref.faceID != oldRef.faceID ||
|
||||
la::dot(faceNormal_[toRemove.pairedHalfedge / 3],
|
||||
faceNormal_[tri]) < -0.5) {
|
||||
// Restrict collapse to colinear edges when the edge separates faces
|
||||
// or the edge is sharp. This ensures large shifts are not introduced
|
||||
// parallel to the tangent plane.
|
||||
if (CCW(projection * pLast, projection * pOld, projection * pNew,
|
||||
epsilon_) != 0)
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Don't collapse edge if it would cause a triangle to invert.
|
||||
if (CCW(projection * pNext, projection * pLast, projection * pNew,
|
||||
epsilon_) < 0)
|
||||
return;
|
||||
return false;
|
||||
|
||||
pLast = pNext;
|
||||
current = halfedge_[current].pairedHalfedge;
|
||||
}
|
||||
}
|
||||
|
||||
// Orbit endVert
|
||||
{
|
||||
int current = halfedge_[tri0edge[1]].pairedHalfedge;
|
||||
while (current != tri1edge[2]) {
|
||||
current = NextHalfedge(current);
|
||||
edges.push_back(current);
|
||||
current = halfedge_[current].pairedHalfedge;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove toRemove.startVert and replace with endVert.
|
||||
vertPos_[toRemove.startVert] = vec3(NAN);
|
||||
CollapseTri(tri1edge);
|
||||
|
@ -524,20 +585,18 @@ void Manifold::Impl::CollapseEdge(const int edge, std::vector<int>& edges) {
|
|||
// Orbit startVert
|
||||
const int tri0 = edge / 3;
|
||||
const int tri1 = toRemove.pairedHalfedge / 3;
|
||||
const int triVert0 = (edge + 1) % 3;
|
||||
const int triVert1 = toRemove.pairedHalfedge % 3;
|
||||
current = start;
|
||||
while (current != tri0edge[2]) {
|
||||
current = NextHalfedge(current);
|
||||
|
||||
if (triProp.size() > 0) {
|
||||
if (NumProp() > 0) {
|
||||
// Update the shifted triangles to the vertBary of endVert
|
||||
const int tri = current / 3;
|
||||
const int vIdx = current - 3 * tri;
|
||||
if (triRef[tri].SameFace(triRef[tri0])) {
|
||||
triProp[tri][vIdx] = triProp[tri0][triVert0];
|
||||
halfedge_[current].propVert = halfedge_[NextHalfedge(edge)].propVert;
|
||||
} else if (triRef[tri].SameFace(triRef[tri1])) {
|
||||
triProp[tri][vIdx] = triProp[tri1][triVert1];
|
||||
halfedge_[current].propVert =
|
||||
halfedge_[toRemove.pairedHalfedge].propVert;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -557,6 +616,7 @@ void Manifold::Impl::CollapseEdge(const int edge, std::vector<int>& edges) {
|
|||
UpdateVert(endVert, start, tri0edge[2]);
|
||||
CollapseTri(tri0edge);
|
||||
RemoveIfFolded(start);
|
||||
return true;
|
||||
}
|
||||
|
||||
void Manifold::Impl::RecursiveEdgeSwap(const int edge, int& tag,
|
||||
|
@ -574,8 +634,6 @@ void Manifold::Impl::RecursiveEdgeSwap(const int edge, int& tag,
|
|||
|
||||
const ivec3 tri0edge = TriOf(edge);
|
||||
const ivec3 tri1edge = TriOf(pair);
|
||||
const ivec3 perm0 = TriOf(edge % 3);
|
||||
const ivec3 perm1 = TriOf(pair % 3);
|
||||
|
||||
mat2x3 projection = GetAxisAlignedProjection(faceNormal_[edge / 3]);
|
||||
vec2 v[4];
|
||||
|
@ -611,22 +669,21 @@ void Manifold::Impl::RecursiveEdgeSwap(const int edge, int& tag,
|
|||
const double l02 = la::length(v[2] - v[0]);
|
||||
const double a = std::max(0.0, std::min(1.0, l02 / l01));
|
||||
// Update properties if applicable
|
||||
if (meshRelation_.properties.size() > 0) {
|
||||
Vec<ivec3>& triProp = meshRelation_.triProperties;
|
||||
Vec<double>& prop = meshRelation_.properties;
|
||||
triProp[tri0] = triProp[tri1];
|
||||
triProp[tri0][perm0[1]] = triProp[tri1][perm1[0]];
|
||||
triProp[tri0][perm0[0]] = triProp[tri1][perm1[2]];
|
||||
if (properties_.size() > 0) {
|
||||
Vec<double>& prop = properties_;
|
||||
halfedge_[tri0edge[1]].propVert = halfedge_[tri1edge[0]].propVert;
|
||||
halfedge_[tri0edge[0]].propVert = halfedge_[tri1edge[2]].propVert;
|
||||
halfedge_[tri0edge[2]].propVert = halfedge_[tri1edge[2]].propVert;
|
||||
const int numProp = NumProp();
|
||||
const int newProp = prop.size() / numProp;
|
||||
const int propIdx0 = triProp[tri1][perm1[0]];
|
||||
const int propIdx1 = triProp[tri1][perm1[1]];
|
||||
const int propIdx0 = halfedge_[tri1edge[0]].propVert;
|
||||
const int propIdx1 = halfedge_[tri1edge[1]].propVert;
|
||||
for (int p = 0; p < numProp; ++p) {
|
||||
prop.push_back(a * prop[numProp * propIdx0 + p] +
|
||||
(1 - a) * prop[numProp * propIdx1 + p]);
|
||||
}
|
||||
triProp[tri1][perm1[0]] = newProp;
|
||||
triProp[tri0][perm0[2]] = newProp;
|
||||
halfedge_[tri1edge[0]].propVert = newProp;
|
||||
halfedge_[tri0edge[2]].propVert = newProp;
|
||||
}
|
||||
|
||||
// if the new edge already exists, duplicate the verts and split the mesh.
|
||||
|
@ -675,22 +732,224 @@ void Manifold::Impl::RecursiveEdgeSwap(const int edge, int& tag,
|
|||
|
||||
void Manifold::Impl::SplitPinchedVerts() {
|
||||
ZoneScoped;
|
||||
std::vector<bool> vertProcessed(NumVert(), false);
|
||||
std::vector<bool> halfedgeProcessed(halfedge_.size(), false);
|
||||
for (size_t i = 0; i < halfedge_.size(); ++i) {
|
||||
if (halfedgeProcessed[i]) continue;
|
||||
int vert = halfedge_[i].startVert;
|
||||
if (vertProcessed[vert]) {
|
||||
vertPos_.push_back(vertPos_[vert]);
|
||||
vert = NumVert() - 1;
|
||||
} else {
|
||||
vertProcessed[vert] = true;
|
||||
|
||||
auto nbEdges = halfedge_.size();
|
||||
#if MANIFOLD_PAR == 1
|
||||
if (nbEdges > 1e4) {
|
||||
std::mutex mutex;
|
||||
std::vector<size_t> pinched;
|
||||
// This parallelized version is non-trivial so we can't reuse the code
|
||||
//
|
||||
// The idea here is to identify cycles of halfedges that can be iterated
|
||||
// through using ForVert. Pinched verts are vertices where there are
|
||||
// multiple cycles associated with the vertex. Each cycle is identified with
|
||||
// the largest halfedge index within the cycle, and when there are multiple
|
||||
// cycles associated with the same starting vertex but with different ids,
|
||||
// it means we have a pinched vertex. This check is done by using a single
|
||||
// atomic cas operation, the expected case is either invalid id (the vertex
|
||||
// was not processed) or with the same id.
|
||||
//
|
||||
// The local store is to store the processed halfedges, so to avoid
|
||||
// repetitive processing. Note that it only approximates the processed
|
||||
// halfedges because it is thread local. This is why we need a vector to
|
||||
// deduplicate the probematic halfedges we found.
|
||||
std::vector<std::atomic<size_t>> largestEdge(NumVert());
|
||||
for_each(ExecutionPolicy::Par, countAt(0), countAt(NumVert()),
|
||||
[&largestEdge](size_t i) {
|
||||
largestEdge[i].store(std::numeric_limits<size_t>::max());
|
||||
});
|
||||
tbb::combinable<std::vector<bool>> store(
|
||||
[nbEdges]() { return std::vector<bool>(nbEdges, false); });
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<size_t>(0, nbEdges),
|
||||
[&store, &mutex, &pinched, &largestEdge, this](const auto& r) {
|
||||
auto& local = store.local();
|
||||
std::vector<size_t> pinchedLocal;
|
||||
for (auto i = r.begin(); i < r.end(); ++i) {
|
||||
if (local[i]) continue;
|
||||
local[i] = true;
|
||||
const int vert = halfedge_[i].startVert;
|
||||
if (vert == -1) continue;
|
||||
size_t largest = i;
|
||||
ForVert(i, [&local, &largest](int current) {
|
||||
local[current] = true;
|
||||
largest = std::max(largest, static_cast<size_t>(current));
|
||||
});
|
||||
size_t expected = std::numeric_limits<size_t>::max();
|
||||
if (!largestEdge[vert].compare_exchange_strong(expected, largest) &&
|
||||
expected != largest) {
|
||||
// we know that there is another loop...
|
||||
pinchedLocal.push_back(largest);
|
||||
}
|
||||
}
|
||||
if (!pinchedLocal.empty()) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
pinched.insert(pinched.end(), pinchedLocal.begin(),
|
||||
pinchedLocal.end());
|
||||
}
|
||||
});
|
||||
|
||||
manifold::stable_sort(pinched.begin(), pinched.end());
|
||||
std::vector<bool> halfedgeProcessed(nbEdges, false);
|
||||
for (size_t i : pinched) {
|
||||
if (halfedgeProcessed[i]) continue;
|
||||
vertPos_.push_back(vertPos_[halfedge_[i].startVert]);
|
||||
const int vert = NumVert() - 1;
|
||||
ForVert(i, [this, vert, &halfedgeProcessed](int current) {
|
||||
halfedgeProcessed[current] = true;
|
||||
halfedge_[current].startVert = vert;
|
||||
halfedge_[halfedge_[current].pairedHalfedge].endVert = vert;
|
||||
});
|
||||
}
|
||||
ForVert(i, [this, &halfedgeProcessed, vert](int current) {
|
||||
halfedgeProcessed[current] = true;
|
||||
halfedge_[current].startVert = vert;
|
||||
halfedge_[halfedge_[current].pairedHalfedge].endVert = vert;
|
||||
});
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
std::vector<bool> vertProcessed(NumVert(), false);
|
||||
std::vector<bool> halfedgeProcessed(nbEdges, false);
|
||||
for (size_t i = 0; i < nbEdges; ++i) {
|
||||
if (halfedgeProcessed[i]) continue;
|
||||
int vert = halfedge_[i].startVert;
|
||||
if (vert == -1) continue;
|
||||
if (vertProcessed[vert]) {
|
||||
vertPos_.push_back(vertPos_[vert]);
|
||||
vert = NumVert() - 1;
|
||||
} else {
|
||||
vertProcessed[vert] = true;
|
||||
}
|
||||
ForVert(i, [this, &halfedgeProcessed, vert](int current) {
|
||||
halfedgeProcessed[current] = true;
|
||||
halfedge_[current].startVert = vert;
|
||||
halfedge_[halfedge_[current].pairedHalfedge].endVert = vert;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Manifold::Impl::DedupeEdges() {
|
||||
while (1) {
|
||||
ZoneScopedN("DedupeEdge");
|
||||
|
||||
const size_t nbEdges = halfedge_.size();
|
||||
std::vector<size_t> duplicates;
|
||||
auto localLoop = [&](size_t start, size_t end, std::vector<bool>& local,
|
||||
std::vector<size_t>& results) {
|
||||
// Iterate over all halfedges that start with the same vertex, and check
|
||||
// for halfedges with the same ending vertex.
|
||||
// Note: we use Vec and linear search when the number of neighbor is
|
||||
// small because unordered_set requires allocations and is expensive.
|
||||
// We switch to unordered_set when the number of neighbor is
|
||||
// larger to avoid making things quadratic.
|
||||
// We do it in two pass, the first pass to find the minimal halfedges with
|
||||
// the target start and end verts, the second pass flag all the duplicated
|
||||
// halfedges that are not having the minimal index as duplicates.
|
||||
// This ensures deterministic result.
|
||||
//
|
||||
// The local store is to store the processed halfedges, so to avoid
|
||||
// repetitive processing. Note that it only approximates the processed
|
||||
// halfedges because it is thread local.
|
||||
Vec<std::pair<int, int>> endVerts;
|
||||
std::unordered_map<int, int> endVertSet;
|
||||
for (auto i = start; i < end; ++i) {
|
||||
if (local[i] || halfedge_[i].startVert == -1 ||
|
||||
halfedge_[i].endVert == -1)
|
||||
continue;
|
||||
// we want to keep the allocation
|
||||
endVerts.clear(false);
|
||||
endVertSet.clear();
|
||||
|
||||
// first iteration, populate entries
|
||||
// this makes sure we always report the same set of entries
|
||||
ForVert(i, [&local, &endVerts, &endVertSet, this](int current) {
|
||||
local[current] = true;
|
||||
if (halfedge_[current].startVert == -1 ||
|
||||
halfedge_[current].endVert == -1) {
|
||||
return;
|
||||
}
|
||||
int endV = halfedge_[current].endVert;
|
||||
if (endVertSet.empty()) {
|
||||
auto iter = std::find_if(endVerts.begin(), endVerts.end(),
|
||||
[endV](const std::pair<int, int>& pair) {
|
||||
return pair.first == endV;
|
||||
});
|
||||
if (iter != endVerts.end()) {
|
||||
iter->second = std::min(iter->second, current);
|
||||
} else {
|
||||
endVerts.push_back({endV, current});
|
||||
if (endVerts.size() > 32) {
|
||||
endVertSet.insert(endVerts.begin(), endVerts.end());
|
||||
endVerts.clear(false);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
auto pair = endVertSet.insert({endV, current});
|
||||
if (!pair.second)
|
||||
pair.first->second = std::min(pair.first->second, current);
|
||||
}
|
||||
});
|
||||
// second iteration, actually check for duplicates
|
||||
// we always report the same set of duplicates, excluding the smallest
|
||||
// halfedge in the set of duplicates
|
||||
ForVert(i, [&endVerts, &endVertSet, &results, this](int current) {
|
||||
if (halfedge_[current].startVert == -1 ||
|
||||
halfedge_[current].endVert == -1) {
|
||||
return;
|
||||
}
|
||||
int endV = halfedge_[current].endVert;
|
||||
if (endVertSet.empty()) {
|
||||
auto iter = std::find_if(endVerts.begin(), endVerts.end(),
|
||||
[endV](const std::pair<int, int>& pair) {
|
||||
return pair.first == endV;
|
||||
});
|
||||
if (iter->second != current) results.push_back(current);
|
||||
} else {
|
||||
auto iter = endVertSet.find(endV);
|
||||
if (iter->second != current) results.push_back(current);
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
#if MANIFOLD_PAR == 1
|
||||
if (nbEdges > 1e4) {
|
||||
std::mutex mutex;
|
||||
tbb::combinable<std::vector<bool>> store(
|
||||
[nbEdges]() { return std::vector<bool>(nbEdges, false); });
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<size_t>(0, nbEdges),
|
||||
[&store, &mutex, &duplicates, &localLoop](const auto& r) {
|
||||
auto& local = store.local();
|
||||
std::vector<size_t> duplicatesLocal;
|
||||
localLoop(r.begin(), r.end(), local, duplicatesLocal);
|
||||
if (!duplicatesLocal.empty()) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
duplicates.insert(duplicates.end(), duplicatesLocal.begin(),
|
||||
duplicatesLocal.end());
|
||||
}
|
||||
});
|
||||
manifold::stable_sort(duplicates.begin(), duplicates.end());
|
||||
duplicates.resize(
|
||||
std::distance(duplicates.begin(),
|
||||
std::unique(duplicates.begin(), duplicates.end())));
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
std::vector<bool> local(nbEdges, false);
|
||||
localLoop(0, nbEdges, local, duplicates);
|
||||
}
|
||||
|
||||
size_t numFlagged = 0;
|
||||
for (size_t i : duplicates) {
|
||||
DedupeEdge(i);
|
||||
numFlagged++;
|
||||
}
|
||||
|
||||
if (numFlagged == 0) break;
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (ManifoldParams().verbose > 0) {
|
||||
std::cout << "found " << numFlagged << " duplicate edges to split"
|
||||
<< std::endl;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
} // namespace manifold
|
||||
|
|
217
thirdparty/manifold/src/face_op.cpp
vendored
217
thirdparty/manifold/src/face_op.cpp
vendored
|
@ -12,16 +12,75 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
#include "impl.h"
|
||||
#include "manifold/common.h"
|
||||
#include "manifold/polygon.h"
|
||||
#include "parallel.h"
|
||||
#include "shared.h"
|
||||
|
||||
#if (MANIFOLD_PAR == 1) && __has_include(<tbb/concurrent_map.h>)
|
||||
#include <tbb/tbb.h>
|
||||
#define TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS 1
|
||||
#include <tbb/concurrent_map.h>
|
||||
#endif
|
||||
#include <unordered_set>
|
||||
|
||||
#include "./impl.h"
|
||||
#include "./parallel.h"
|
||||
#include "manifold/polygon.h"
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
||||
/**
|
||||
* Returns an assembled set of vertex index loops of the input list of
|
||||
* Halfedges, where each vert must be referenced the same number of times as a
|
||||
* startVert and endVert. If startHalfedgeIdx is given, instead of putting
|
||||
* vertex indices into the returned polygons structure, it will use the halfedge
|
||||
* indices instead.
|
||||
*/
|
||||
std::vector<std::vector<int>> AssembleHalfedges(VecView<Halfedge>::IterC start,
|
||||
VecView<Halfedge>::IterC end,
|
||||
const int startHalfedgeIdx) {
|
||||
std::multimap<int, int> vert_edge;
|
||||
for (auto edge = start; edge != end; ++edge) {
|
||||
vert_edge.emplace(
|
||||
std::make_pair(edge->startVert, static_cast<int>(edge - start)));
|
||||
}
|
||||
|
||||
std::vector<std::vector<int>> polys;
|
||||
int startEdge = 0;
|
||||
int thisEdge = startEdge;
|
||||
while (1) {
|
||||
if (thisEdge == startEdge) {
|
||||
if (vert_edge.empty()) break;
|
||||
startEdge = vert_edge.begin()->second;
|
||||
thisEdge = startEdge;
|
||||
polys.push_back({});
|
||||
}
|
||||
polys.back().push_back(startHalfedgeIdx + thisEdge);
|
||||
const auto result = vert_edge.find((start + thisEdge)->endVert);
|
||||
DEBUG_ASSERT(result != vert_edge.end(), topologyErr, "non-manifold edge");
|
||||
thisEdge = result->second;
|
||||
vert_edge.erase(result);
|
||||
}
|
||||
return polys;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the vertex position projection to the indexed polygons.
|
||||
*/
|
||||
PolygonsIdx ProjectPolygons(const std::vector<std::vector<int>>& polys,
|
||||
const Vec<Halfedge>& halfedge,
|
||||
const Vec<vec3>& vertPos, mat2x3 projection) {
|
||||
PolygonsIdx polygons;
|
||||
for (const auto& poly : polys) {
|
||||
polygons.push_back({});
|
||||
for (const auto& edge : poly) {
|
||||
polygons.back().push_back(
|
||||
{projection * vertPos[halfedge[edge].startVert], edge});
|
||||
} // for vert
|
||||
} // for poly
|
||||
return polygons;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
namespace manifold {
|
||||
|
||||
|
@ -39,83 +98,71 @@ using AddTriangle = std::function<void(int, ivec3, vec3, TriRef)>;
|
|||
* faceNormal_ values are retained, repeated as necessary.
|
||||
*/
|
||||
void Manifold::Impl::Face2Tri(const Vec<int>& faceEdge,
|
||||
const Vec<TriRef>& halfedgeRef) {
|
||||
const Vec<TriRef>& halfedgeRef,
|
||||
bool allowConvex) {
|
||||
ZoneScoped;
|
||||
Vec<ivec3> triVerts;
|
||||
Vec<vec3> triNormal;
|
||||
Vec<ivec3> triProp;
|
||||
Vec<TriRef>& triRef = meshRelation_.triRef;
|
||||
triRef.resize(0);
|
||||
triRef.clear();
|
||||
auto processFace = [&](GeneralTriangulation general, AddTriangle addTri,
|
||||
int face) {
|
||||
const int firstEdge = faceEdge[face];
|
||||
const int lastEdge = faceEdge[face + 1];
|
||||
const int numEdge = lastEdge - firstEdge;
|
||||
if (numEdge == 0) return;
|
||||
DEBUG_ASSERT(numEdge >= 3, topologyErr, "face has less than three edges.");
|
||||
const vec3 normal = faceNormal_[face];
|
||||
|
||||
if (numEdge == 3) { // Single triangle
|
||||
int mapping[3] = {halfedge_[firstEdge].startVert,
|
||||
halfedge_[firstEdge + 1].startVert,
|
||||
halfedge_[firstEdge + 2].startVert};
|
||||
ivec3 triEdge(firstEdge, firstEdge + 1, firstEdge + 2);
|
||||
ivec3 tri(halfedge_[firstEdge].startVert,
|
||||
halfedge_[firstEdge + 1].startVert,
|
||||
halfedge_[firstEdge + 2].startVert);
|
||||
ivec3 ends(halfedge_[firstEdge].endVert, halfedge_[firstEdge + 1].endVert,
|
||||
halfedge_[firstEdge + 2].endVert);
|
||||
if (ends[0] == tri[2]) {
|
||||
std::swap(triEdge[1], triEdge[2]);
|
||||
std::swap(tri[1], tri[2]);
|
||||
std::swap(ends[1], ends[2]);
|
||||
}
|
||||
DEBUG_ASSERT(ends[0] == tri[1] && ends[1] == tri[2] && ends[2] == tri[0],
|
||||
topologyErr, "These 3 edges do not form a triangle!");
|
||||
|
||||
addTri(face, tri, normal, halfedgeRef[firstEdge]);
|
||||
addTri(face, triEdge, normal, halfedgeRef[firstEdge]);
|
||||
} else if (numEdge == 4) { // Pair of triangles
|
||||
int mapping[4] = {halfedge_[firstEdge].startVert,
|
||||
halfedge_[firstEdge + 1].startVert,
|
||||
halfedge_[firstEdge + 2].startVert,
|
||||
halfedge_[firstEdge + 3].startVert};
|
||||
const mat2x3 projection = GetAxisAlignedProjection(normal);
|
||||
auto triCCW = [&projection, this](const ivec3 tri) {
|
||||
return CCW(projection * this->vertPos_[tri[0]],
|
||||
projection * this->vertPos_[tri[1]],
|
||||
projection * this->vertPos_[tri[2]], epsilon_) >= 0;
|
||||
return CCW(projection * this->vertPos_[halfedge_[tri[0]].startVert],
|
||||
projection * this->vertPos_[halfedge_[tri[1]].startVert],
|
||||
projection * this->vertPos_[halfedge_[tri[2]].startVert],
|
||||
epsilon_) >= 0;
|
||||
};
|
||||
|
||||
ivec3 tri0(halfedge_[firstEdge].startVert, halfedge_[firstEdge].endVert,
|
||||
-1);
|
||||
ivec3 tri1(-1, -1, tri0[0]);
|
||||
for (const int i : {1, 2, 3}) {
|
||||
if (halfedge_[firstEdge + i].startVert == tri0[1]) {
|
||||
tri0[2] = halfedge_[firstEdge + i].endVert;
|
||||
tri1[0] = tri0[2];
|
||||
}
|
||||
if (halfedge_[firstEdge + i].endVert == tri0[0]) {
|
||||
tri1[1] = halfedge_[firstEdge + i].startVert;
|
||||
}
|
||||
}
|
||||
DEBUG_ASSERT(la::all(la::gequal(tri0, ivec3(0))) &&
|
||||
la::all(la::gequal(tri1, ivec3(0))),
|
||||
topologyErr, "non-manifold quad!");
|
||||
bool firstValid = triCCW(tri0) && triCCW(tri1);
|
||||
tri0[2] = tri1[1];
|
||||
tri1[2] = tri0[1];
|
||||
bool secondValid = triCCW(tri0) && triCCW(tri1);
|
||||
std::vector<int> quad = AssembleHalfedges(
|
||||
halfedge_.cbegin() + faceEdge[face],
|
||||
halfedge_.cbegin() + faceEdge[face + 1], faceEdge[face])[0];
|
||||
|
||||
if (!secondValid) {
|
||||
tri0[2] = tri1[0];
|
||||
tri1[2] = tri0[0];
|
||||
} else if (firstValid) {
|
||||
vec3 firstCross = vertPos_[tri0[0]] - vertPos_[tri1[0]];
|
||||
vec3 secondCross = vertPos_[tri0[1]] - vertPos_[tri1[1]];
|
||||
if (la::dot(firstCross, firstCross) <
|
||||
la::dot(secondCross, secondCross)) {
|
||||
tri0[2] = tri1[0];
|
||||
tri1[2] = tri0[0];
|
||||
const la::mat<int, 3, 2> tris[2] = {
|
||||
{{quad[0], quad[1], quad[2]}, {quad[0], quad[2], quad[3]}},
|
||||
{{quad[1], quad[2], quad[3]}, {quad[0], quad[1], quad[3]}}};
|
||||
|
||||
int choice = 0;
|
||||
|
||||
if (!(triCCW(tris[0][0]) && triCCW(tris[0][1]))) {
|
||||
choice = 1;
|
||||
} else if (triCCW(tris[1][0]) && triCCW(tris[1][1])) {
|
||||
vec3 diag0 = vertPos_[halfedge_[quad[0]].startVert] -
|
||||
vertPos_[halfedge_[quad[2]].startVert];
|
||||
vec3 diag1 = vertPos_[halfedge_[quad[1]].startVert] -
|
||||
vertPos_[halfedge_[quad[3]].startVert];
|
||||
if (la::length2(diag0) > la::length2(diag1)) {
|
||||
choice = 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto& tri : {tri0, tri1}) {
|
||||
for (const auto& tri : tris[choice]) {
|
||||
addTri(face, tri, normal, halfedgeRef[firstEdge]);
|
||||
}
|
||||
} else { // General triangulation
|
||||
|
@ -127,10 +174,12 @@ void Manifold::Impl::Face2Tri(const Vec<int>& faceEdge,
|
|||
auto generalTriangulation = [&](int face) {
|
||||
const vec3 normal = faceNormal_[face];
|
||||
const mat2x3 projection = GetAxisAlignedProjection(normal);
|
||||
const PolygonsIdx polys =
|
||||
Face2Polygons(halfedge_.cbegin() + faceEdge[face],
|
||||
halfedge_.cbegin() + faceEdge[face + 1], projection);
|
||||
return TriangulateIdx(polys, epsilon_);
|
||||
const PolygonsIdx polys = ProjectPolygons(
|
||||
AssembleHalfedges(halfedge_.cbegin() + faceEdge[face],
|
||||
halfedge_.cbegin() + faceEdge[face + 1],
|
||||
faceEdge[face]),
|
||||
halfedge_, vertPos_, projection);
|
||||
return TriangulateIdx(polys, epsilon_, allowConvex);
|
||||
};
|
||||
#if (MANIFOLD_PAR == 1) && __has_include(<tbb/tbb.h>)
|
||||
tbb::task_group group;
|
||||
|
@ -156,13 +205,17 @@ void Manifold::Impl::Face2Tri(const Vec<int>& faceEdge,
|
|||
// prefix sum computation (assign unique index to each face) and preallocation
|
||||
exclusive_scan(triCount.begin(), triCount.end(), triCount.begin(), 0_uz);
|
||||
triVerts.resize(triCount.back());
|
||||
triProp.resize(triCount.back());
|
||||
triNormal.resize(triCount.back());
|
||||
triRef.resize(triCount.back());
|
||||
|
||||
auto processFace2 = std::bind(
|
||||
processFace, [&](size_t face) { return std::move(results[face]); },
|
||||
[&](size_t face, ivec3 tri, vec3 normal, TriRef r) {
|
||||
triVerts[triCount[face]] = tri;
|
||||
for (const int i : {0, 1, 2}) {
|
||||
triVerts[triCount[face]][i] = halfedge_[tri[i]].startVert;
|
||||
triProp[triCount[face]][i] = halfedge_[tri[i]].propVert;
|
||||
}
|
||||
triNormal[triCount[face]] = normal;
|
||||
triRef[triCount[face]] = r;
|
||||
triCount[face]++;
|
||||
|
@ -177,8 +230,15 @@ void Manifold::Impl::Face2Tri(const Vec<int>& faceEdge,
|
|||
triRef.reserve(faceEdge.size());
|
||||
auto processFace2 = std::bind(
|
||||
processFace, generalTriangulation,
|
||||
[&](size_t _face, ivec3 tri, vec3 normal, TriRef r) {
|
||||
triVerts.push_back(tri);
|
||||
[&](size_t, ivec3 tri, vec3 normal, TriRef r) {
|
||||
ivec3 verts;
|
||||
ivec3 props;
|
||||
for (const int i : {0, 1, 2}) {
|
||||
verts[i] = halfedge_[tri[i]].startVert;
|
||||
props[i] = halfedge_[tri[i]].propVert;
|
||||
}
|
||||
triVerts.push_back(verts);
|
||||
triProp.push_back(props);
|
||||
triNormal.push_back(normal);
|
||||
triRef.push_back(r);
|
||||
},
|
||||
|
@ -189,41 +249,7 @@ void Manifold::Impl::Face2Tri(const Vec<int>& faceEdge,
|
|||
#endif
|
||||
|
||||
faceNormal_ = std::move(triNormal);
|
||||
CreateHalfedges(triVerts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a set of 2D polygons formed by the input projection of the vertices
|
||||
* of the list of Halfedges, which must be an even-manifold, meaning each vert
|
||||
* must be referenced the same number of times as a startVert and endVert.
|
||||
*/
|
||||
PolygonsIdx Manifold::Impl::Face2Polygons(VecView<Halfedge>::IterC start,
|
||||
VecView<Halfedge>::IterC end,
|
||||
mat2x3 projection) const {
|
||||
std::multimap<int, int> vert_edge;
|
||||
for (auto edge = start; edge != end; ++edge) {
|
||||
vert_edge.emplace(
|
||||
std::make_pair(edge->startVert, static_cast<int>(edge - start)));
|
||||
}
|
||||
|
||||
PolygonsIdx polys;
|
||||
int startEdge = 0;
|
||||
int thisEdge = startEdge;
|
||||
while (1) {
|
||||
if (thisEdge == startEdge) {
|
||||
if (vert_edge.empty()) break;
|
||||
startEdge = vert_edge.begin()->second;
|
||||
thisEdge = startEdge;
|
||||
polys.push_back({});
|
||||
}
|
||||
int vert = (start + thisEdge)->startVert;
|
||||
polys.back().push_back({projection * vertPos_[vert], vert});
|
||||
const auto result = vert_edge.find((start + thisEdge)->endVert);
|
||||
DEBUG_ASSERT(result != vert_edge.end(), topologyErr, "non-manifold edge");
|
||||
thisEdge = result->second;
|
||||
vert_edge.erase(result);
|
||||
}
|
||||
return polys;
|
||||
CreateHalfedges(triProp, triVerts);
|
||||
}
|
||||
|
||||
Polygons Manifold::Impl::Slice(double height) const {
|
||||
|
@ -231,12 +257,9 @@ Polygons Manifold::Impl::Slice(double height) const {
|
|||
plane.min.z = plane.max.z = height;
|
||||
Vec<Box> query;
|
||||
query.push_back(plane);
|
||||
const SparseIndices collisions =
|
||||
collider_.Collisions<false, false>(query.cview());
|
||||
|
||||
std::unordered_set<int> tris;
|
||||
for (size_t i = 0; i < collisions.size(); ++i) {
|
||||
const int tri = collisions.Get(i, 1);
|
||||
auto recordCollision = [&](int, int tri) {
|
||||
double min = std::numeric_limits<double>::infinity();
|
||||
double max = -std::numeric_limits<double>::infinity();
|
||||
for (const int j : {0, 1, 2}) {
|
||||
|
@ -248,7 +271,10 @@ Polygons Manifold::Impl::Slice(double height) const {
|
|||
if (min <= height && max > height) {
|
||||
tris.insert(tri);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto recorder = MakeSimpleRecorder(recordCollision);
|
||||
collider_.Collisions<false>(query.cview(), recorder, false);
|
||||
|
||||
Polygons polys;
|
||||
while (!tris.empty()) {
|
||||
|
@ -303,7 +329,8 @@ Polygons Manifold::Impl::Project() const {
|
|||
cusps.begin());
|
||||
|
||||
PolygonsIdx polysIndexed =
|
||||
Face2Polygons(cusps.cbegin(), cusps.cend(), projection);
|
||||
ProjectPolygons(AssembleHalfedges(cusps.cbegin(), cusps.cend(), 0), cusps,
|
||||
vertPos_, projection);
|
||||
|
||||
Polygons polys;
|
||||
for (const auto& poly : polysIndexed) {
|
||||
|
|
40
thirdparty/manifold/src/hashtable.h
vendored
40
thirdparty/manifold/src/hashtable.h
vendored
|
@ -16,13 +16,12 @@
|
|||
|
||||
#include <atomic>
|
||||
|
||||
#include "./utils.h"
|
||||
#include "./vec.h"
|
||||
#include "utils.h"
|
||||
#include "vec.h"
|
||||
|
||||
namespace {
|
||||
typedef unsigned long long int Uint64;
|
||||
typedef Uint64 (*hash_fun_t)(Uint64);
|
||||
inline constexpr Uint64 kOpen = std::numeric_limits<Uint64>::max();
|
||||
using hash_fun_t = uint64_t(uint64_t);
|
||||
inline constexpr uint64_t kOpen = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
template <typename T>
|
||||
T AtomicCAS(T& target, T compare, T val) {
|
||||
|
@ -45,13 +44,6 @@ T AtomicLoad(const T& target) {
|
|||
return tar.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
// https://stackoverflow.com/questions/664014/what-integer-hash-function-are-good-that-accepts-an-integer-hash-key
|
||||
inline Uint64 hash64bit(Uint64 x) {
|
||||
x = (x ^ (x >> 30)) * 0xbf58476d1ce4e5b9ull;
|
||||
x = (x ^ (x >> 27)) * 0x94d049bb133111ebull;
|
||||
x = x ^ (x >> 31);
|
||||
return x;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
namespace manifold {
|
||||
|
@ -59,7 +51,7 @@ namespace manifold {
|
|||
template <typename V, hash_fun_t H = hash64bit>
|
||||
class HashTableD {
|
||||
public:
|
||||
HashTableD(Vec<Uint64>& keys, Vec<V>& values, std::atomic<size_t>& used,
|
||||
HashTableD(Vec<uint64_t>& keys, Vec<V>& values, std::atomic<size_t>& used,
|
||||
uint32_t step = 1)
|
||||
: step_{step}, keys_{keys}, values_{values}, used_{used} {}
|
||||
|
||||
|
@ -70,12 +62,12 @@ class HashTableD {
|
|||
static_cast<size_t>(Size());
|
||||
}
|
||||
|
||||
void Insert(Uint64 key, const V& val) {
|
||||
void Insert(uint64_t key, const V& val) {
|
||||
uint32_t idx = H(key) & (Size() - 1);
|
||||
while (1) {
|
||||
if (Full()) return;
|
||||
Uint64& k = keys_[idx];
|
||||
const Uint64 found = AtomicCAS(k, kOpen, key);
|
||||
uint64_t& k = keys_[idx];
|
||||
const uint64_t found = AtomicCAS(k, kOpen, key);
|
||||
if (found == kOpen) {
|
||||
used_.fetch_add(1, std::memory_order_relaxed);
|
||||
values_[idx] = val;
|
||||
|
@ -86,10 +78,10 @@ class HashTableD {
|
|||
}
|
||||
}
|
||||
|
||||
V& operator[](Uint64 key) {
|
||||
V& operator[](uint64_t key) {
|
||||
uint32_t idx = H(key) & (Size() - 1);
|
||||
while (1) {
|
||||
const Uint64 k = AtomicLoad(keys_[idx]);
|
||||
const uint64_t k = AtomicLoad(keys_[idx]);
|
||||
if (k == key || k == kOpen) {
|
||||
return values_[idx];
|
||||
}
|
||||
|
@ -97,10 +89,10 @@ class HashTableD {
|
|||
}
|
||||
}
|
||||
|
||||
const V& operator[](Uint64 key) const {
|
||||
const V& operator[](uint64_t key) const {
|
||||
uint32_t idx = H(key) & (Size() - 1);
|
||||
while (1) {
|
||||
const Uint64 k = AtomicLoad(keys_[idx]);
|
||||
const uint64_t k = AtomicLoad(keys_[idx]);
|
||||
if (k == key || k == kOpen) {
|
||||
return values_[idx];
|
||||
}
|
||||
|
@ -108,13 +100,13 @@ class HashTableD {
|
|||
}
|
||||
}
|
||||
|
||||
Uint64 KeyAt(int idx) const { return AtomicLoad(keys_[idx]); }
|
||||
uint64_t KeyAt(int idx) const { return AtomicLoad(keys_[idx]); }
|
||||
V& At(int idx) { return values_[idx]; }
|
||||
const V& At(int idx) const { return values_[idx]; }
|
||||
|
||||
private:
|
||||
uint32_t step_;
|
||||
VecView<Uint64> keys_;
|
||||
VecView<uint64_t> keys_;
|
||||
VecView<V> values_;
|
||||
std::atomic<size_t>& used_;
|
||||
};
|
||||
|
@ -157,10 +149,10 @@ class HashTable {
|
|||
|
||||
Vec<V>& GetValueStore() { return values_; }
|
||||
|
||||
static Uint64 Open() { return kOpen; }
|
||||
static uint64_t Open() { return kOpen; }
|
||||
|
||||
private:
|
||||
Vec<Uint64> keys_;
|
||||
Vec<uint64_t> keys_;
|
||||
Vec<V> values_;
|
||||
std::atomic<size_t> used_ = 0;
|
||||
uint32_t step_;
|
||||
|
|
706
thirdparty/manifold/src/impl.cpp
vendored
706
thirdparty/manifold/src/impl.cpp
vendored
|
@ -12,38 +12,113 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "./impl.h"
|
||||
#include "impl.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <cstring>
|
||||
#include <map>
|
||||
#include <optional>
|
||||
|
||||
#include "./hashtable.h"
|
||||
#include "./mesh_fixes.h"
|
||||
#include "./parallel.h"
|
||||
#include "./svd.h"
|
||||
|
||||
#ifdef MANIFOLD_EXPORT
|
||||
#include <string.h>
|
||||
|
||||
#include <iostream>
|
||||
#endif
|
||||
#include "csg_tree.h"
|
||||
#include "hashtable.h"
|
||||
#include "manifold/optional_assert.h"
|
||||
#include "mesh_fixes.h"
|
||||
#include "parallel.h"
|
||||
#include "shared.h"
|
||||
#include "svd.h"
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
||||
constexpr uint64_t kRemove = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
void AtomicAddVec3(vec3& target, const vec3& add) {
|
||||
for (int i : {0, 1, 2}) {
|
||||
std::atomic<double>& tar =
|
||||
reinterpret_cast<std::atomic<double>&>(target[i]);
|
||||
double old_val = tar.load(std::memory_order_relaxed);
|
||||
while (!tar.compare_exchange_weak(old_val, old_val + add[i],
|
||||
std::memory_order_relaxed)) {
|
||||
/**
|
||||
* Returns arc cosine of 𝑥.
|
||||
*
|
||||
* @return value in range [0,M_PI]
|
||||
* @return NAN if 𝑥 ∈ {NAN,+INFINITY,-INFINITY}
|
||||
* @return NAN if 𝑥 ∉ [-1,1]
|
||||
*/
|
||||
double sun_acos(double x) {
|
||||
/*
|
||||
* Origin of acos function: FreeBSD /usr/src/lib/msun/src/e_acos.c
|
||||
* Changed the use of union to memcpy to avoid undefined behavior.
|
||||
* ====================================================
|
||||
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
|
||||
*
|
||||
* Developed at SunSoft, a Sun Microsystems, Inc. business.
|
||||
* Permission to use, copy, modify, and distribute this
|
||||
* software is freely granted, provided that this notice
|
||||
* is preserved.
|
||||
* ====================================================
|
||||
*/
|
||||
constexpr double pio2_hi =
|
||||
1.57079632679489655800e+00; /* 0x3FF921FB, 0x54442D18 */
|
||||
constexpr double pio2_lo =
|
||||
6.12323399573676603587e-17; /* 0x3C91A626, 0x33145C07 */
|
||||
constexpr double pS0 =
|
||||
1.66666666666666657415e-01; /* 0x3FC55555, 0x55555555 */
|
||||
constexpr double pS1 =
|
||||
-3.25565818622400915405e-01; /* 0xBFD4D612, 0x03EB6F7D */
|
||||
constexpr double pS2 =
|
||||
2.01212532134862925881e-01; /* 0x3FC9C155, 0x0E884455 */
|
||||
constexpr double pS3 =
|
||||
-4.00555345006794114027e-02; /* 0xBFA48228, 0xB5688F3B */
|
||||
constexpr double pS4 =
|
||||
7.91534994289814532176e-04; /* 0x3F49EFE0, 0x7501B288 */
|
||||
constexpr double pS5 =
|
||||
3.47933107596021167570e-05; /* 0x3F023DE1, 0x0DFDF709 */
|
||||
constexpr double qS1 =
|
||||
-2.40339491173441421878e+00; /* 0xC0033A27, 0x1C8A2D4B */
|
||||
constexpr double qS2 =
|
||||
2.02094576023350569471e+00; /* 0x40002AE5, 0x9C598AC8 */
|
||||
constexpr double qS3 =
|
||||
-6.88283971605453293030e-01; /* 0xBFE6066C, 0x1B8D0159 */
|
||||
constexpr double qS4 =
|
||||
7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
|
||||
auto R = [=](double z) {
|
||||
double p, q;
|
||||
p = z * (pS0 + z * (pS1 + z * (pS2 + z * (pS3 + z * (pS4 + z * pS5)))));
|
||||
q = 1.0 + z * (qS1 + z * (qS2 + z * (qS3 + z * qS4)));
|
||||
return p / q;
|
||||
};
|
||||
double z, w, s, c, df;
|
||||
uint64_t xx;
|
||||
uint32_t hx, lx, ix;
|
||||
memcpy(&xx, &x, sizeof(xx));
|
||||
hx = xx >> 32;
|
||||
ix = hx & 0x7fffffff;
|
||||
/* |x| >= 1 or nan */
|
||||
if (ix >= 0x3ff00000) {
|
||||
lx = xx;
|
||||
if (((ix - 0x3ff00000) | lx) == 0) {
|
||||
/* acos(1)=0, acos(-1)=pi */
|
||||
if (hx >> 31) return 2 * pio2_hi + 0x1p-120f;
|
||||
return 0;
|
||||
}
|
||||
return 0 / (x - x);
|
||||
}
|
||||
/* |x| < 0.5 */
|
||||
if (ix < 0x3fe00000) {
|
||||
if (ix <= 0x3c600000) /* |x| < 2**-57 */
|
||||
return pio2_hi + 0x1p-120f;
|
||||
return pio2_hi - (x - (pio2_lo - x * R(x * x)));
|
||||
}
|
||||
/* x < -0.5 */
|
||||
if (hx >> 31) {
|
||||
z = (1.0 + x) * 0.5;
|
||||
s = sqrt(z);
|
||||
w = R(z) * s - pio2_lo;
|
||||
return 2 * (pio2_hi - (s + w));
|
||||
}
|
||||
/* x > 0.5 */
|
||||
z = (1.0 - x) * 0.5;
|
||||
s = sqrt(z);
|
||||
memcpy(&xx, &s, sizeof(xx));
|
||||
xx &= 0xffffffff00000000;
|
||||
memcpy(&df, &xx, sizeof(xx));
|
||||
c = (z - df * df) / (s + df);
|
||||
w = R(z) * s + c;
|
||||
return 2 * (df + w);
|
||||
}
|
||||
|
||||
struct Transform4x3 {
|
||||
|
@ -52,143 +127,12 @@ struct Transform4x3 {
|
|||
vec3 operator()(vec3 position) { return transform * vec4(position, 1.0); }
|
||||
};
|
||||
|
||||
template <bool calculateTriNormal>
|
||||
struct AssignNormals {
|
||||
VecView<vec3> faceNormal;
|
||||
VecView<vec3> vertNormal;
|
||||
VecView<const vec3> vertPos;
|
||||
VecView<const Halfedge> halfedges;
|
||||
|
||||
void operator()(const int face) {
|
||||
vec3& triNormal = faceNormal[face];
|
||||
|
||||
ivec3 triVerts;
|
||||
for (int i : {0, 1, 2}) triVerts[i] = halfedges[3 * face + i].startVert;
|
||||
|
||||
vec3 edge[3];
|
||||
for (int i : {0, 1, 2}) {
|
||||
const int j = (i + 1) % 3;
|
||||
edge[i] = la::normalize(vertPos[triVerts[j]] - vertPos[triVerts[i]]);
|
||||
}
|
||||
|
||||
if (calculateTriNormal) {
|
||||
triNormal = la::normalize(la::cross(edge[0], edge[1]));
|
||||
if (std::isnan(triNormal.x)) triNormal = vec3(0, 0, 1);
|
||||
}
|
||||
|
||||
// corner angles
|
||||
vec3 phi;
|
||||
double dot = -la::dot(edge[2], edge[0]);
|
||||
phi[0] = dot >= 1 ? 0 : (dot <= -1 ? kPi : std::acos(dot));
|
||||
dot = -la::dot(edge[0], edge[1]);
|
||||
phi[1] = dot >= 1 ? 0 : (dot <= -1 ? kPi : std::acos(dot));
|
||||
phi[2] = kPi - phi[0] - phi[1];
|
||||
|
||||
// assign weighted sum
|
||||
for (int i : {0, 1, 2}) {
|
||||
AtomicAddVec3(vertNormal[triVerts[i]], phi[i] * triNormal);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct UpdateMeshID {
|
||||
const HashTableD<uint32_t> meshIDold2new;
|
||||
|
||||
void operator()(TriRef& ref) { ref.meshID = meshIDold2new[ref.meshID]; }
|
||||
};
|
||||
|
||||
struct CoplanarEdge {
|
||||
VecView<std::pair<int, int>> face2face;
|
||||
VecView<double> triArea;
|
||||
VecView<const Halfedge> halfedge;
|
||||
VecView<const vec3> vertPos;
|
||||
VecView<const TriRef> triRef;
|
||||
VecView<const ivec3> triProp;
|
||||
const int numProp;
|
||||
const double epsilon;
|
||||
const double tolerance;
|
||||
|
||||
void operator()(const int edgeIdx) {
|
||||
const Halfedge edge = halfedge[edgeIdx];
|
||||
const Halfedge pair = halfedge[edge.pairedHalfedge];
|
||||
const int edgeFace = edgeIdx / 3;
|
||||
const int pairFace = edge.pairedHalfedge / 3;
|
||||
|
||||
if (triRef[edgeFace].meshID != triRef[pairFace].meshID) return;
|
||||
|
||||
const vec3 base = vertPos[edge.startVert];
|
||||
const int baseNum = edgeIdx - 3 * edgeFace;
|
||||
const int jointNum = edge.pairedHalfedge - 3 * pairFace;
|
||||
|
||||
if (numProp > 0) {
|
||||
if (triProp[edgeFace][baseNum] != triProp[pairFace][Next3(jointNum)] ||
|
||||
triProp[edgeFace][Next3(baseNum)] != triProp[pairFace][jointNum])
|
||||
return;
|
||||
}
|
||||
|
||||
if (!edge.IsForward()) return;
|
||||
|
||||
const int edgeNum = baseNum == 0 ? 2 : baseNum - 1;
|
||||
const int pairNum = jointNum == 0 ? 2 : jointNum - 1;
|
||||
const vec3 jointVec = vertPos[pair.startVert] - base;
|
||||
const vec3 edgeVec =
|
||||
vertPos[halfedge[3 * edgeFace + edgeNum].startVert] - base;
|
||||
const vec3 pairVec =
|
||||
vertPos[halfedge[3 * pairFace + pairNum].startVert] - base;
|
||||
|
||||
const double length = std::max(la::length(jointVec), la::length(edgeVec));
|
||||
const double lengthPair =
|
||||
std::max(la::length(jointVec), la::length(pairVec));
|
||||
vec3 normal = la::cross(jointVec, edgeVec);
|
||||
const double area = la::length(normal);
|
||||
const double areaPair = la::length(la::cross(pairVec, jointVec));
|
||||
|
||||
// make sure we only write this once
|
||||
if (edgeIdx % 3 == 0) triArea[edgeFace] = area;
|
||||
// Don't link degenerate triangles
|
||||
if (area < length * epsilon || areaPair < lengthPair * epsilon) return;
|
||||
|
||||
const double volume = std::abs(la::dot(normal, pairVec));
|
||||
// Only operate on coplanar triangles
|
||||
if (volume > std::max(area, areaPair) * tolerance) return;
|
||||
|
||||
face2face[edgeIdx] = std::make_pair(edgeFace, pairFace);
|
||||
}
|
||||
};
|
||||
|
||||
struct CheckCoplanarity {
|
||||
VecView<int> comp2tri;
|
||||
VecView<const Halfedge> halfedge;
|
||||
VecView<const vec3> vertPos;
|
||||
std::vector<int>* components;
|
||||
const double tolerance;
|
||||
|
||||
void operator()(int tri) {
|
||||
const int component = (*components)[tri];
|
||||
const int referenceTri =
|
||||
reinterpret_cast<std::atomic<int>*>(&comp2tri[component])
|
||||
->load(std::memory_order_relaxed);
|
||||
if (referenceTri < 0 || referenceTri == tri) return;
|
||||
|
||||
const vec3 origin = vertPos[halfedge[3 * referenceTri].startVert];
|
||||
const vec3 normal = la::normalize(
|
||||
la::cross(vertPos[halfedge[3 * referenceTri + 1].startVert] - origin,
|
||||
vertPos[halfedge[3 * referenceTri + 2].startVert] - origin));
|
||||
|
||||
for (const int i : {0, 1, 2}) {
|
||||
const vec3 vert = vertPos[halfedge[3 * tri + i].startVert];
|
||||
// If any component vertex is not coplanar with the component's reference
|
||||
// triangle, unmark the entire component so that none of its triangles are
|
||||
// marked coplanar.
|
||||
if (std::abs(la::dot(normal, vert - origin)) > tolerance) {
|
||||
reinterpret_cast<std::atomic<int>*>(&comp2tri[component])
|
||||
->store(-1, std::memory_order_relaxed);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
int GetLabels(std::vector<int>& components,
|
||||
const Vec<std::pair<int, int>>& edges, int numNodes) {
|
||||
UnionFind<> uf(numNodes);
|
||||
|
@ -199,19 +143,6 @@ int GetLabels(std::vector<int>& components,
|
|||
|
||||
return uf.connectedComponents(components);
|
||||
}
|
||||
|
||||
void DedupePropVerts(manifold::Vec<ivec3>& triProp,
|
||||
const Vec<std::pair<int, int>>& vert2vert,
|
||||
size_t numPropVert) {
|
||||
ZoneScoped;
|
||||
std::vector<int> vertLabels;
|
||||
const int numLabels = GetLabels(vertLabels, vert2vert, numPropVert);
|
||||
|
||||
std::vector<int> label2vert(numLabels);
|
||||
for (size_t v = 0; v < numPropVert; ++v) label2vert[vertLabels[v]] = v;
|
||||
for (auto& prop : triProp)
|
||||
for (int i : {0, 1, 2}) prop[i] = label2vert[vertLabels[prop[i]]];
|
||||
}
|
||||
} // namespace
|
||||
|
||||
namespace manifold {
|
||||
|
@ -271,7 +202,7 @@ Manifold::Impl::Impl(Shape shape, const mat3x4 m) {
|
|||
CreateHalfedges(triVerts);
|
||||
Finish();
|
||||
InitializeOriginal();
|
||||
CreateFaces();
|
||||
MarkCoplanar();
|
||||
}
|
||||
|
||||
void Manifold::Impl::RemoveUnreferencedVerts() {
|
||||
|
@ -297,124 +228,165 @@ void Manifold::Impl::InitializeOriginal(bool keepFaceID) {
|
|||
const int meshID = ReserveIDs(1);
|
||||
meshRelation_.originalID = meshID;
|
||||
auto& triRef = meshRelation_.triRef;
|
||||
triRef.resize(NumTri());
|
||||
triRef.resize_nofill(NumTri());
|
||||
for_each_n(autoPolicy(NumTri(), 1e5), countAt(0), NumTri(),
|
||||
[meshID, keepFaceID, &triRef](const int tri) {
|
||||
triRef[tri] = {meshID, meshID, tri,
|
||||
keepFaceID ? triRef[tri].faceID : tri};
|
||||
triRef[tri] = {meshID, meshID, -1,
|
||||
keepFaceID ? triRef[tri].coplanarID : tri};
|
||||
});
|
||||
meshRelation_.meshIDtransform.clear();
|
||||
meshRelation_.meshIDtransform[meshID] = {meshID};
|
||||
}
|
||||
|
||||
void Manifold::Impl::CreateFaces() {
|
||||
void Manifold::Impl::MarkCoplanar() {
|
||||
ZoneScoped;
|
||||
Vec<std::pair<int, int>> face2face(halfedge_.size(), {-1, -1});
|
||||
Vec<std::pair<int, int>> vert2vert(halfedge_.size(), {-1, -1});
|
||||
Vec<double> triArea(NumTri());
|
||||
const int numTri = NumTri();
|
||||
struct TriPriority {
|
||||
double area2;
|
||||
int tri;
|
||||
};
|
||||
Vec<TriPriority> triPriority(numTri);
|
||||
for_each_n(autoPolicy(numTri), countAt(0), numTri,
|
||||
[&triPriority, this](int tri) {
|
||||
meshRelation_.triRef[tri].coplanarID = -1;
|
||||
if (halfedge_[3 * tri].startVert < 0) {
|
||||
triPriority[tri] = {0, tri};
|
||||
return;
|
||||
}
|
||||
const vec3 v = vertPos_[halfedge_[3 * tri].startVert];
|
||||
triPriority[tri] = {
|
||||
length2(cross(vertPos_[halfedge_[3 * tri].endVert] - v,
|
||||
vertPos_[halfedge_[3 * tri + 1].endVert] - v)),
|
||||
tri};
|
||||
});
|
||||
|
||||
const size_t numProp = NumProp();
|
||||
if (numProp > 0) {
|
||||
for_each_n(
|
||||
autoPolicy(halfedge_.size(), 1e4), countAt(0), halfedge_.size(),
|
||||
[&vert2vert, numProp, this](const int edgeIdx) {
|
||||
const Halfedge edge = halfedge_[edgeIdx];
|
||||
const Halfedge pair = halfedge_[edge.pairedHalfedge];
|
||||
const int edgeFace = edgeIdx / 3;
|
||||
const int pairFace = edge.pairedHalfedge / 3;
|
||||
stable_sort(triPriority.begin(), triPriority.end(),
|
||||
[](auto a, auto b) { return a.area2 > b.area2; });
|
||||
|
||||
if (meshRelation_.triRef[edgeFace].meshID !=
|
||||
meshRelation_.triRef[pairFace].meshID)
|
||||
return;
|
||||
Vec<int> interiorHalfedges;
|
||||
for (const auto tp : triPriority) {
|
||||
if (meshRelation_.triRef[tp.tri].coplanarID >= 0) continue;
|
||||
|
||||
const int baseNum = edgeIdx - 3 * edgeFace;
|
||||
const int jointNum = edge.pairedHalfedge - 3 * pairFace;
|
||||
meshRelation_.triRef[tp.tri].coplanarID = tp.tri;
|
||||
if (halfedge_[3 * tp.tri].startVert < 0) continue;
|
||||
const vec3 base = vertPos_[halfedge_[3 * tp.tri].startVert];
|
||||
const vec3 normal = faceNormal_[tp.tri];
|
||||
interiorHalfedges.resize(3);
|
||||
interiorHalfedges[0] = 3 * tp.tri;
|
||||
interiorHalfedges[1] = 3 * tp.tri + 1;
|
||||
interiorHalfedges[2] = 3 * tp.tri + 2;
|
||||
while (!interiorHalfedges.empty()) {
|
||||
const int h =
|
||||
NextHalfedge(halfedge_[interiorHalfedges.back()].pairedHalfedge);
|
||||
interiorHalfedges.pop_back();
|
||||
if (meshRelation_.triRef[h / 3].coplanarID >= 0) continue;
|
||||
|
||||
const int prop0 = meshRelation_.triProperties[edgeFace][baseNum];
|
||||
const int prop1 =
|
||||
meshRelation_
|
||||
.triProperties[pairFace][jointNum == 2 ? 0 : jointNum + 1];
|
||||
if (prop0 == prop1) return;
|
||||
const vec3 v = vertPos_[halfedge_[h].endVert];
|
||||
if (std::abs(dot(v - base, normal)) < tolerance_) {
|
||||
meshRelation_.triRef[h / 3].coplanarID = tp.tri;
|
||||
|
||||
bool propEqual = true;
|
||||
for (size_t p = 0; p < numProp; ++p) {
|
||||
if (meshRelation_.properties[numProp * prop0 + p] !=
|
||||
meshRelation_.properties[numProp * prop1 + p]) {
|
||||
propEqual = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (propEqual) {
|
||||
vert2vert[edgeIdx] = std::make_pair(prop0, prop1);
|
||||
}
|
||||
});
|
||||
DedupePropVerts(meshRelation_.triProperties, vert2vert, NumPropVert());
|
||||
}
|
||||
|
||||
for_each_n(autoPolicy(halfedge_.size(), 1e4), countAt(0), halfedge_.size(),
|
||||
CoplanarEdge({face2face, triArea, halfedge_, vertPos_,
|
||||
meshRelation_.triRef, meshRelation_.triProperties,
|
||||
meshRelation_.numProp, epsilon_, tolerance_}));
|
||||
|
||||
std::vector<int> components;
|
||||
const int numComponent = GetLabels(components, face2face, NumTri());
|
||||
|
||||
Vec<int> comp2tri(numComponent, -1);
|
||||
for (size_t tri = 0; tri < NumTri(); ++tri) {
|
||||
const int comp = components[tri];
|
||||
const int current = comp2tri[comp];
|
||||
if (current < 0 || triArea[tri] > triArea[current]) {
|
||||
comp2tri[comp] = tri;
|
||||
triArea[comp] = triArea[tri];
|
||||
}
|
||||
}
|
||||
|
||||
for_each_n(autoPolicy(halfedge_.size(), 1e4), countAt(0), NumTri(),
|
||||
CheckCoplanarity(
|
||||
{comp2tri, halfedge_, vertPos_, &components, tolerance_}));
|
||||
|
||||
Vec<TriRef>& triRef = meshRelation_.triRef;
|
||||
for (size_t tri = 0; tri < NumTri(); ++tri) {
|
||||
const int referenceTri = comp2tri[components[tri]];
|
||||
if (referenceTri >= 0) {
|
||||
triRef[tri].faceID = referenceTri;
|
||||
if (interiorHalfedges.empty() ||
|
||||
h != halfedge_[interiorHalfedges.back()].pairedHalfedge) {
|
||||
interiorHalfedges.push_back(h);
|
||||
} else {
|
||||
interiorHalfedges.pop_back();
|
||||
}
|
||||
const int hNext = NextHalfedge(h);
|
||||
interiorHalfedges.push_back(hNext);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the halfedge_ data structure from an input triVerts array like Mesh.
|
||||
* Dereference duplicate property vertices if they are exactly floating-point
|
||||
* equal. These unreferenced properties are then removed by CompactProps.
|
||||
*/
|
||||
void Manifold::Impl::CreateHalfedges(const Vec<ivec3>& triVerts) {
|
||||
void Manifold::Impl::DedupePropVerts() {
|
||||
ZoneScoped;
|
||||
const size_t numTri = triVerts.size();
|
||||
const size_t numProp = NumProp();
|
||||
if (numProp == 0) return;
|
||||
|
||||
Vec<std::pair<int, int>> vert2vert(halfedge_.size(), {-1, -1});
|
||||
for_each_n(autoPolicy(halfedge_.size(), 1e4), countAt(0), halfedge_.size(),
|
||||
[&vert2vert, numProp, this](const int edgeIdx) {
|
||||
const Halfedge edge = halfedge_[edgeIdx];
|
||||
const int edgeFace = edgeIdx / 3;
|
||||
const int pairFace = edge.pairedHalfedge / 3;
|
||||
|
||||
if (meshRelation_.triRef[edgeFace].meshID !=
|
||||
meshRelation_.triRef[pairFace].meshID)
|
||||
return;
|
||||
|
||||
const int prop0 = halfedge_[edgeIdx].propVert;
|
||||
const int prop1 =
|
||||
halfedge_[NextHalfedge(edge.pairedHalfedge)].propVert;
|
||||
bool propEqual = true;
|
||||
for (size_t p = 0; p < numProp; ++p) {
|
||||
if (properties_[numProp * prop0 + p] !=
|
||||
properties_[numProp * prop1 + p]) {
|
||||
propEqual = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (propEqual) {
|
||||
vert2vert[edgeIdx] = std::make_pair(prop0, prop1);
|
||||
}
|
||||
});
|
||||
|
||||
std::vector<int> vertLabels;
|
||||
const size_t numPropVert = NumPropVert();
|
||||
const int numLabels = GetLabels(vertLabels, vert2vert, numPropVert);
|
||||
|
||||
std::vector<int> label2vert(numLabels);
|
||||
for (size_t v = 0; v < numPropVert; ++v) label2vert[vertLabels[v]] = v;
|
||||
for (Halfedge& edge : halfedge_)
|
||||
edge.propVert = label2vert[vertLabels[edge.propVert]];
|
||||
}
|
||||
|
||||
constexpr int kRemovedHalfedge = -2;
|
||||
|
||||
/**
|
||||
* Create the halfedge_ data structure from a list of triangles. If the optional
|
||||
* prop2vert array is missing, it's assumed these triangles are are pointing to
|
||||
* both vert and propVert indices. If prop2vert is present, the triangles are
|
||||
* assumed to be pointing to propVert indices only. The prop2vert array is used
|
||||
* to map the propVert indices to vert indices.
|
||||
*/
|
||||
void Manifold::Impl::CreateHalfedges(const Vec<ivec3>& triProp,
|
||||
const Vec<ivec3>& triVert) {
|
||||
ZoneScoped;
|
||||
const size_t numTri = triProp.size();
|
||||
const int numHalfedge = 3 * numTri;
|
||||
// drop the old value first to avoid copy
|
||||
halfedge_.resize(0);
|
||||
halfedge_.resize(numHalfedge);
|
||||
halfedge_.clear(true);
|
||||
halfedge_.resize_nofill(numHalfedge);
|
||||
Vec<uint64_t> edge(numHalfedge);
|
||||
Vec<int> ids(numHalfedge);
|
||||
auto policy = autoPolicy(numTri, 1e5);
|
||||
sequence(ids.begin(), ids.end());
|
||||
for_each_n(policy, countAt(0), numTri,
|
||||
[this, &edge, &triVerts](const int tri) {
|
||||
const ivec3& verts = triVerts[tri];
|
||||
[this, &edge, &triProp, &triVert](const int tri) {
|
||||
const ivec3& props = triProp[tri];
|
||||
for (const int i : {0, 1, 2}) {
|
||||
const int j = (i + 1) % 3;
|
||||
const int e = 3 * tri + i;
|
||||
halfedge_[e] = {verts[i], verts[j], -1};
|
||||
const int v0 = triVert.empty() ? props[i] : triVert[tri][i];
|
||||
const int v1 = triVert.empty() ? props[j] : triVert[tri][j];
|
||||
DEBUG_ASSERT(v0 != v1, logicErr, "topological degeneracy");
|
||||
halfedge_[e] = {v0, v1, -1, props[i]};
|
||||
// Sort the forward halfedges in front of the backward ones
|
||||
// by setting the highest-order bit.
|
||||
edge[e] = uint64_t(verts[i] < verts[j] ? 1 : 0) << 63 |
|
||||
((uint64_t)std::min(verts[i], verts[j])) << 32 |
|
||||
std::max(verts[i], verts[j]);
|
||||
edge[e] = uint64_t(v0 < v1 ? 1 : 0) << 63 |
|
||||
((uint64_t)std::min(v0, v1)) << 32 |
|
||||
std::max(v0, v1);
|
||||
}
|
||||
});
|
||||
// Stable sort is required here so that halfedges from the same face are
|
||||
// paired together (the triangles were created in face order). In some
|
||||
// degenerate situations the triangulator can add the same internal edge in
|
||||
// two different faces, causing this edge to not be 2-manifold. These are
|
||||
// fixed by duplicating verts in SimplifyTopology.
|
||||
// fixed by duplicating verts in CleanupTopology.
|
||||
stable_sort(ids.begin(), ids.end(), [&edge](const int& a, const int& b) {
|
||||
return edge[a] < edge[b];
|
||||
});
|
||||
|
@ -422,26 +394,62 @@ void Manifold::Impl::CreateHalfedges(const Vec<ivec3>& triVerts) {
|
|||
// Mark opposed triangles for removal - this may strand unreferenced verts
|
||||
// which are removed later by RemoveUnreferencedVerts() and Finish().
|
||||
const int numEdge = numHalfedge / 2;
|
||||
for (int i = 0; i < numEdge; ++i) {
|
||||
|
||||
const auto body = [&](int i, int consecutiveStart, int segmentEnd) {
|
||||
const int pair0 = ids[i];
|
||||
Halfedge h0 = halfedge_[pair0];
|
||||
int k = i + numEdge;
|
||||
Halfedge& h0 = halfedge_[pair0];
|
||||
int k = consecutiveStart + numEdge;
|
||||
while (1) {
|
||||
const int pair1 = ids[k];
|
||||
Halfedge h1 = halfedge_[pair1];
|
||||
Halfedge& h1 = halfedge_[pair1];
|
||||
if (h0.startVert != h1.endVert || h0.endVert != h1.startVert) break;
|
||||
if (halfedge_[NextHalfedge(pair0)].endVert ==
|
||||
halfedge_[NextHalfedge(pair1)].endVert) {
|
||||
h0 = {-1, -1, -1};
|
||||
h1 = {-1, -1, -1};
|
||||
h0.pairedHalfedge = h1.pairedHalfedge = kRemovedHalfedge;
|
||||
// Reorder so that remaining edges pair up
|
||||
if (k != i + numEdge) std::swap(ids[i + numEdge], ids[k]);
|
||||
break;
|
||||
}
|
||||
++k;
|
||||
if (k >= numHalfedge) break;
|
||||
if (k >= segmentEnd + numEdge) break;
|
||||
}
|
||||
if (i + 1 == segmentEnd) return consecutiveStart;
|
||||
Halfedge& h1 = halfedge_[ids[i + 1]];
|
||||
if (h0.startVert == h1.startVert && h0.endVert == h1.endVert)
|
||||
return consecutiveStart;
|
||||
return i + 1;
|
||||
};
|
||||
|
||||
#if MANIFOLD_PAR == 1
|
||||
Vec<std::pair<int, int>> ranges;
|
||||
const int increment = std::min(
|
||||
std::max(numEdge / tbb::this_task_arena::max_concurrency() / 2, 1024),
|
||||
numEdge);
|
||||
const auto duplicated = [&](int a, int b) {
|
||||
const Halfedge& h0 = halfedge_[ids[a]];
|
||||
const Halfedge& h1 = halfedge_[ids[b]];
|
||||
return h0.startVert == h1.startVert && h0.endVert == h1.endVert;
|
||||
};
|
||||
int end = 0;
|
||||
while (end < numEdge) {
|
||||
const int start = end;
|
||||
end = std::min(end + increment, numEdge);
|
||||
// make sure duplicated halfedges are in the same partition
|
||||
while (end < numEdge && duplicated(end - 1, end)) end++;
|
||||
ranges.push_back(std::make_pair(start, end));
|
||||
}
|
||||
for_each(ExecutionPolicy::Par, ranges.begin(), ranges.end(),
|
||||
[&](const std::pair<int, int>& range) {
|
||||
const auto [start, end] = range;
|
||||
int consecutiveStart = start;
|
||||
for (int i = start; i < end; ++i)
|
||||
consecutiveStart = body(i, consecutiveStart, end);
|
||||
});
|
||||
#else
|
||||
int consecutiveStart = 0;
|
||||
for (int i = 0; i < numEdge; ++i)
|
||||
consecutiveStart = body(i, consecutiveStart, numEdge);
|
||||
#endif
|
||||
|
||||
// Once sorted, the first half of the range is the forward halfedges, which
|
||||
// correspond to their backward pair at the same offset in the second half
|
||||
|
@ -449,9 +457,11 @@ void Manifold::Impl::CreateHalfedges(const Vec<ivec3>& triVerts) {
|
|||
for_each_n(policy, countAt(0), numEdge, [this, &ids, numEdge](int i) {
|
||||
const int pair0 = ids[i];
|
||||
const int pair1 = ids[i + numEdge];
|
||||
if (halfedge_[pair0].startVert >= 0) {
|
||||
if (halfedge_[pair0].pairedHalfedge != kRemovedHalfedge) {
|
||||
halfedge_[pair0].pairedHalfedge = pair1;
|
||||
halfedge_[pair1].pairedHalfedge = pair0;
|
||||
} else {
|
||||
halfedge_[pair0] = halfedge_[pair1] = {-1, -1, -1};
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -468,13 +478,13 @@ void Manifold::Impl::Update() {
|
|||
collider_.UpdateBoxes(faceBox);
|
||||
}
|
||||
|
||||
void Manifold::Impl::MarkFailure(Error status) {
|
||||
void Manifold::Impl::MakeEmpty(Error status) {
|
||||
bBox_ = Box();
|
||||
vertPos_.resize(0);
|
||||
halfedge_.resize(0);
|
||||
vertNormal_.resize(0);
|
||||
faceNormal_.resize(0);
|
||||
halfedgeTangent_.resize(0);
|
||||
vertPos_.clear();
|
||||
halfedge_.clear();
|
||||
vertNormal_.clear();
|
||||
faceNormal_.clear();
|
||||
halfedgeTangent_.clear();
|
||||
meshRelation_ = MeshRelationD();
|
||||
status_ = status;
|
||||
}
|
||||
|
@ -489,15 +499,14 @@ void Manifold::Impl::WarpBatch(std::function<void(VecView<vec3>)> warpFunc) {
|
|||
warpFunc(vertPos_.view());
|
||||
CalculateBBox();
|
||||
if (!IsFinite()) {
|
||||
MarkFailure(Error::NonFiniteVertex);
|
||||
MakeEmpty(Error::NonFiniteVertex);
|
||||
return;
|
||||
}
|
||||
Update();
|
||||
faceNormal_.resize(0); // force recalculation of triNormal
|
||||
CalculateNormals();
|
||||
faceNormal_.clear(); // force recalculation of triNormal
|
||||
SetEpsilon();
|
||||
Finish();
|
||||
CreateFaces();
|
||||
MarkCoplanar();
|
||||
meshRelation_.originalID = -1;
|
||||
}
|
||||
|
||||
|
@ -511,13 +520,15 @@ Manifold::Impl Manifold::Impl::Transform(const mat3x4& transform_) const {
|
|||
return result;
|
||||
}
|
||||
if (!all(la::isfinite(transform_))) {
|
||||
result.MarkFailure(Error::NonFiniteVertex);
|
||||
result.MakeEmpty(Error::NonFiniteVertex);
|
||||
return result;
|
||||
}
|
||||
result.collider_ = collider_;
|
||||
result.meshRelation_ = meshRelation_;
|
||||
result.epsilon_ = epsilon_;
|
||||
result.tolerance_ = tolerance_;
|
||||
result.numProp_ = numProp_;
|
||||
result.properties_ = properties_;
|
||||
result.bBox_ = bBox_;
|
||||
result.halfedge_ = halfedge_;
|
||||
result.halfedgeTangent_.resize(halfedgeTangent_.size());
|
||||
|
@ -592,23 +603,73 @@ void Manifold::Impl::SetEpsilon(double minEpsilon, bool useSingle) {
|
|||
void Manifold::Impl::CalculateNormals() {
|
||||
ZoneScoped;
|
||||
vertNormal_.resize(NumVert());
|
||||
auto policy = autoPolicy(NumTri(), 1e4);
|
||||
fill(vertNormal_.begin(), vertNormal_.end(), vec3(0.0));
|
||||
bool calculateTriNormal = false;
|
||||
auto policy = autoPolicy(NumTri());
|
||||
|
||||
std::vector<std::atomic<int>> vertHalfedgeMap(NumVert());
|
||||
for_each_n(policy, countAt(0), NumVert(), [&](const size_t vert) {
|
||||
vertHalfedgeMap[vert] = std::numeric_limits<int>::max();
|
||||
});
|
||||
|
||||
auto atomicMin = [&vertHalfedgeMap](int value, int vert) {
|
||||
if (vert < 0) return;
|
||||
int old = std::numeric_limits<int>::max();
|
||||
while (!vertHalfedgeMap[vert].compare_exchange_strong(old, value))
|
||||
if (old < value) break;
|
||||
};
|
||||
if (faceNormal_.size() != NumTri()) {
|
||||
faceNormal_.resize(NumTri());
|
||||
calculateTriNormal = true;
|
||||
for_each_n(policy, countAt(0), NumTri(), [&](const int face) {
|
||||
vec3& triNormal = faceNormal_[face];
|
||||
if (halfedge_[3 * face].startVert < 0) {
|
||||
triNormal = vec3(0, 0, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
ivec3 triVerts;
|
||||
for (int i : {0, 1, 2}) {
|
||||
int v = halfedge_[3 * face + i].startVert;
|
||||
triVerts[i] = v;
|
||||
atomicMin(3 * face + i, v);
|
||||
}
|
||||
|
||||
vec3 edge[3];
|
||||
for (int i : {0, 1, 2}) {
|
||||
const int j = (i + 1) % 3;
|
||||
edge[i] = la::normalize(vertPos_[triVerts[j]] - vertPos_[triVerts[i]]);
|
||||
}
|
||||
triNormal = la::normalize(la::cross(edge[0], edge[1]));
|
||||
if (std::isnan(triNormal.x)) triNormal = vec3(0, 0, 1);
|
||||
});
|
||||
} else {
|
||||
for_each_n(policy, countAt(0), halfedge_.size(),
|
||||
[&](const int i) { atomicMin(i, halfedge_[i].startVert); });
|
||||
}
|
||||
if (calculateTriNormal)
|
||||
for_each_n(
|
||||
policy, countAt(0), NumTri(),
|
||||
AssignNormals<true>({faceNormal_, vertNormal_, vertPos_, halfedge_}));
|
||||
else
|
||||
for_each_n(
|
||||
policy, countAt(0), NumTri(),
|
||||
AssignNormals<false>({faceNormal_, vertNormal_, vertPos_, halfedge_}));
|
||||
for_each(policy, vertNormal_.begin(), vertNormal_.end(),
|
||||
[](vec3& v) { v = SafeNormalize(v); });
|
||||
|
||||
for_each_n(policy, countAt(0), NumVert(), [&](const size_t vert) {
|
||||
int firstEdge = vertHalfedgeMap[vert].load();
|
||||
// not referenced
|
||||
if (firstEdge == std::numeric_limits<int>::max()) {
|
||||
vertNormal_[vert] = vec3(0.0);
|
||||
return;
|
||||
}
|
||||
vec3 normal = vec3(0.0);
|
||||
ForVert(firstEdge, [&](int edge) {
|
||||
ivec3 triVerts = {halfedge_[edge].startVert, halfedge_[edge].endVert,
|
||||
halfedge_[NextHalfedge(edge)].endVert};
|
||||
vec3 currEdge =
|
||||
la::normalize(vertPos_[triVerts[1]] - vertPos_[triVerts[0]]);
|
||||
vec3 prevEdge =
|
||||
la::normalize(vertPos_[triVerts[0]] - vertPos_[triVerts[2]]);
|
||||
|
||||
// if it is not finite, this means that the triangle is degenerate, and we
|
||||
// should just exclude it from the normal calculation...
|
||||
if (!la::isfinite(currEdge[0]) || !la::isfinite(prevEdge[0])) return;
|
||||
double dot = -la::dot(prevEdge, currEdge);
|
||||
double phi = dot >= 1 ? 0 : (dot <= -1 ? kPi : sun_acos(dot));
|
||||
normal += phi * faceNormal_[edge / 3];
|
||||
});
|
||||
vertNormal_[vert] = SafeNormalize(normal);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -632,55 +693,13 @@ void Manifold::Impl::IncrementMeshIDs() {
|
|||
UpdateMeshID({meshIDold2new.D()}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sparse array of the bounding box overlaps between the edges of
|
||||
* the input manifold, Q and the faces of this manifold. Returned indices only
|
||||
* point to forward halfedges.
|
||||
*/
|
||||
SparseIndices Manifold::Impl::EdgeCollisions(const Impl& Q,
|
||||
bool inverted) const {
|
||||
ZoneScoped;
|
||||
Vec<TmpEdge> edges = CreateTmpEdges(Q.halfedge_);
|
||||
const size_t numEdge = edges.size();
|
||||
Vec<Box> QedgeBB(numEdge);
|
||||
const auto& vertPos = Q.vertPos_;
|
||||
auto policy = autoPolicy(numEdge, 1e5);
|
||||
for_each_n(
|
||||
policy, countAt(0), numEdge, [&QedgeBB, &edges, &vertPos](const int e) {
|
||||
QedgeBB[e] = Box(vertPos[edges[e].first], vertPos[edges[e].second]);
|
||||
});
|
||||
|
||||
SparseIndices q1p2(0);
|
||||
if (inverted)
|
||||
q1p2 = collider_.Collisions<false, true>(QedgeBB.cview());
|
||||
else
|
||||
q1p2 = collider_.Collisions<false, false>(QedgeBB.cview());
|
||||
|
||||
if (inverted)
|
||||
for_each(policy, countAt(0_uz), countAt(q1p2.size()),
|
||||
ReindexEdge<true>({edges, q1p2}));
|
||||
else
|
||||
for_each(policy, countAt(0_uz), countAt(q1p2.size()),
|
||||
ReindexEdge<false>({edges, q1p2}));
|
||||
return q1p2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sparse array of the input vertices that project inside the XY
|
||||
* bounding boxes of the faces of this manifold.
|
||||
*/
|
||||
SparseIndices Manifold::Impl::VertexCollisionsZ(VecView<const vec3> vertsIn,
|
||||
bool inverted) const {
|
||||
ZoneScoped;
|
||||
if (inverted)
|
||||
return collider_.Collisions<false, true>(vertsIn);
|
||||
else
|
||||
return collider_.Collisions<false, false>(vertsIn);
|
||||
}
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
/**
|
||||
* Debugging output using high precision OBJ files with specialized comments
|
||||
*/
|
||||
std::ostream& operator<<(std::ostream& stream, const Manifold::Impl& impl) {
|
||||
stream << std::setprecision(17); // for double precision
|
||||
stream << std::setprecision(19); // for double precision
|
||||
stream << std::fixed; // for uniformity in output numbers
|
||||
stream << "# ======= begin mesh ======" << std::endl;
|
||||
stream << "# tolerance = " << impl.tolerance_ << std::endl;
|
||||
stream << "# epsilon = " << impl.epsilon_ << std::endl;
|
||||
|
@ -699,13 +718,19 @@ std::ostream& operator<<(std::ostream& stream, const Manifold::Impl& impl) {
|
|||
stream << "# ======== end mesh =======" << std::endl;
|
||||
return stream;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef MANIFOLD_EXPORT
|
||||
Manifold Manifold::ImportMeshGL64(std::istream& stream) {
|
||||
/**
|
||||
* Import a mesh from a Wavefront OBJ file that was exported with Write. This
|
||||
* function is the counterpart to Write and should be used with it. This
|
||||
* function is not guaranteed to be able to import OBJ files not written by the
|
||||
* Write function.
|
||||
*/
|
||||
Manifold Manifold::ReadOBJ(std::istream& stream) {
|
||||
if (!stream.good()) return Invalid();
|
||||
|
||||
MeshGL64 mesh;
|
||||
std::optional<double> epsilon;
|
||||
stream.precision(17);
|
||||
stream >> std::setprecision(19);
|
||||
while (true) {
|
||||
char c = stream.get();
|
||||
if (stream.eof()) break;
|
||||
|
@ -718,7 +743,7 @@ Manifold Manifold::ImportMeshGL64(std::istream& stream) {
|
|||
stream.get(tmp.data(), SIZE, '\n');
|
||||
if (strncmp(tmp.data(), "tolerance", SIZE) == 0) {
|
||||
// skip 3 letters
|
||||
for (int i : {0, 1, 2}) stream.get();
|
||||
for (int _ : {0, 1, 2}) stream.get();
|
||||
stream >> mesh.tolerance;
|
||||
} else if (strncmp(tmp.data(), "epsilon =", SIZE) == 0) {
|
||||
double tmp;
|
||||
|
@ -727,7 +752,7 @@ Manifold Manifold::ImportMeshGL64(std::istream& stream) {
|
|||
} else {
|
||||
// add it back because it is not what we want
|
||||
int end = 0;
|
||||
while (tmp[end] != 0 && end < SIZE) end++;
|
||||
while (end < SIZE && tmp[end] != 0) end++;
|
||||
while (--end > -1) stream.putback(tmp[end]);
|
||||
}
|
||||
c = stream.get();
|
||||
|
@ -739,29 +764,42 @@ Manifold Manifold::ImportMeshGL64(std::istream& stream) {
|
|||
break;
|
||||
}
|
||||
case 'v':
|
||||
for (int i : {0, 1, 2}) {
|
||||
for (int _ : {0, 1, 2}) {
|
||||
double x;
|
||||
stream >> x;
|
||||
mesh.vertProperties.push_back(x);
|
||||
}
|
||||
break;
|
||||
case 'f':
|
||||
for (int i : {0, 1, 2}) {
|
||||
for (int _ : {0, 1, 2}) {
|
||||
uint64_t x;
|
||||
stream >> x;
|
||||
mesh.triVerts.push_back(x - 1);
|
||||
}
|
||||
break;
|
||||
case '\r':
|
||||
case '\n':
|
||||
break;
|
||||
default:
|
||||
DEBUG_ASSERT(false, userErr, "unexpected character in MeshGL64 import");
|
||||
DEBUG_ASSERT(false, userErr, "unexpected character in Manifold import");
|
||||
}
|
||||
}
|
||||
auto m = std::make_shared<Manifold::Impl>(mesh);
|
||||
if (epsilon) m->SetEpsilon(*epsilon);
|
||||
return Manifold(m);
|
||||
}
|
||||
|
||||
/**
|
||||
* Export the mesh to a Wavefront OBJ file in a way that preserves the full
|
||||
* 64-bit precision of the vertex positions, as well as storing metadata such as
|
||||
* the tolerance and epsilon. Useful for debugging and testing. Files written
|
||||
* by WriteOBJ should be read back in with ReadOBJ.
|
||||
*/
|
||||
bool Manifold::WriteOBJ(std::ostream& stream) const {
|
||||
if (!stream.good()) return false;
|
||||
stream << *this->GetCsgLeafNode().GetImpl();
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace manifold
|
||||
|
|
230
thirdparty/manifold/src/impl.h
vendored
230
thirdparty/manifold/src/impl.h
vendored
|
@ -15,12 +15,11 @@
|
|||
#pragma once
|
||||
#include <map>
|
||||
|
||||
#include "./collider.h"
|
||||
#include "./shared.h"
|
||||
#include "./sparse.h"
|
||||
#include "./vec.h"
|
||||
#include "collider.h"
|
||||
#include "manifold/common.h"
|
||||
#include "manifold/manifold.h"
|
||||
#include "manifold/polygon.h"
|
||||
#include "shared.h"
|
||||
#include "vec.h"
|
||||
|
||||
namespace manifold {
|
||||
|
||||
|
@ -34,11 +33,8 @@ struct Manifold::Impl {
|
|||
struct MeshRelationD {
|
||||
/// The originalID of this Manifold if it is an original; -1 otherwise.
|
||||
int originalID = -1;
|
||||
int numProp = 0;
|
||||
Vec<double> properties;
|
||||
std::map<int, Relation> meshIDtransform;
|
||||
Vec<TriRef> triRef;
|
||||
Vec<ivec3> triProperties;
|
||||
};
|
||||
struct BaryIndices {
|
||||
int tri, start4, end4;
|
||||
|
@ -47,9 +43,13 @@ struct Manifold::Impl {
|
|||
Box bBox_;
|
||||
double epsilon_ = -1;
|
||||
double tolerance_ = -1;
|
||||
int numProp_ = 0;
|
||||
Error status_ = Error::NoError;
|
||||
Vec<vec3> vertPos_;
|
||||
Vec<Halfedge> halfedge_;
|
||||
Vec<double> properties_;
|
||||
// Note that vertNormal_ is not precise due to the use of an approximated acos
|
||||
// function
|
||||
Vec<vec3> vertNormal_;
|
||||
Vec<vec3> faceNormal_;
|
||||
Vec<vec4> halfedgeTangent_;
|
||||
|
@ -69,153 +69,185 @@ struct Manifold::Impl {
|
|||
const uint32_t numTri = meshGL.NumTri();
|
||||
|
||||
if (meshGL.numProp < 3) {
|
||||
MarkFailure(Error::MissingPositionProperties);
|
||||
MakeEmpty(Error::MissingPositionProperties);
|
||||
return;
|
||||
}
|
||||
|
||||
if (meshGL.mergeFromVert.size() != meshGL.mergeToVert.size()) {
|
||||
MarkFailure(Error::MergeVectorsDifferentLengths);
|
||||
MakeEmpty(Error::MergeVectorsDifferentLengths);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!meshGL.runTransform.empty() &&
|
||||
12 * meshGL.runOriginalID.size() != meshGL.runTransform.size()) {
|
||||
MarkFailure(Error::TransformWrongLength);
|
||||
MakeEmpty(Error::TransformWrongLength);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!meshGL.runOriginalID.empty() && !meshGL.runIndex.empty() &&
|
||||
meshGL.runOriginalID.size() + 1 != meshGL.runIndex.size() &&
|
||||
meshGL.runOriginalID.size() != meshGL.runIndex.size()) {
|
||||
MarkFailure(Error::RunIndexWrongLength);
|
||||
MakeEmpty(Error::RunIndexWrongLength);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!meshGL.faceID.empty() && meshGL.faceID.size() != meshGL.NumTri()) {
|
||||
MarkFailure(Error::FaceIDWrongLength);
|
||||
MakeEmpty(Error::FaceIDWrongLength);
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<int> prop2vert(numVert);
|
||||
std::iota(prop2vert.begin(), prop2vert.end(), 0);
|
||||
for (size_t i = 0; i < meshGL.mergeFromVert.size(); ++i) {
|
||||
const uint32_t from = meshGL.mergeFromVert[i];
|
||||
const uint32_t to = meshGL.mergeToVert[i];
|
||||
if (from >= numVert || to >= numVert) {
|
||||
MarkFailure(Error::MergeIndexOutOfBounds);
|
||||
return;
|
||||
if (!manifold::all_of(meshGL.vertProperties.begin(),
|
||||
meshGL.vertProperties.end(),
|
||||
[](Precision x) { return std::isfinite(x); })) {
|
||||
MakeEmpty(Error::NonFiniteVertex);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!manifold::all_of(meshGL.runTransform.begin(),
|
||||
meshGL.runTransform.end(),
|
||||
[](Precision x) { return std::isfinite(x); })) {
|
||||
MakeEmpty(Error::InvalidConstruction);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!manifold::all_of(meshGL.halfedgeTangent.begin(),
|
||||
meshGL.halfedgeTangent.end(),
|
||||
[](Precision x) { return std::isfinite(x); })) {
|
||||
MakeEmpty(Error::InvalidConstruction);
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<int> prop2vert;
|
||||
if (!meshGL.mergeFromVert.empty()) {
|
||||
prop2vert.resize(numVert);
|
||||
std::iota(prop2vert.begin(), prop2vert.end(), 0);
|
||||
for (size_t i = 0; i < meshGL.mergeFromVert.size(); ++i) {
|
||||
const uint32_t from = meshGL.mergeFromVert[i];
|
||||
const uint32_t to = meshGL.mergeToVert[i];
|
||||
if (from >= numVert || to >= numVert) {
|
||||
MakeEmpty(Error::MergeIndexOutOfBounds);
|
||||
return;
|
||||
}
|
||||
prop2vert[from] = to;
|
||||
}
|
||||
prop2vert[from] = to;
|
||||
}
|
||||
|
||||
const auto numProp = meshGL.numProp - 3;
|
||||
meshRelation_.numProp = numProp;
|
||||
meshRelation_.properties.resize(meshGL.NumVert() * numProp);
|
||||
numProp_ = numProp;
|
||||
properties_.resize_nofill(meshGL.NumVert() * numProp);
|
||||
tolerance_ = meshGL.tolerance;
|
||||
// This will have unreferenced duplicate positions that will be removed by
|
||||
// Impl::RemoveUnreferencedVerts().
|
||||
vertPos_.resize(meshGL.NumVert());
|
||||
vertPos_.resize_nofill(meshGL.NumVert());
|
||||
|
||||
for (size_t i = 0; i < meshGL.NumVert(); ++i) {
|
||||
for (const int j : {0, 1, 2})
|
||||
vertPos_[i][j] = meshGL.vertProperties[meshGL.numProp * i + j];
|
||||
for (size_t j = 0; j < numProp; ++j)
|
||||
meshRelation_.properties[i * numProp + j] =
|
||||
properties_[i * numProp + j] =
|
||||
meshGL.vertProperties[meshGL.numProp * i + 3 + j];
|
||||
}
|
||||
|
||||
halfedgeTangent_.resize(meshGL.halfedgeTangent.size() / 4);
|
||||
halfedgeTangent_.resize_nofill(meshGL.halfedgeTangent.size() / 4);
|
||||
for (size_t i = 0; i < halfedgeTangent_.size(); ++i) {
|
||||
for (const int j : {0, 1, 2, 3})
|
||||
halfedgeTangent_[i][j] = meshGL.halfedgeTangent[4 * i + j];
|
||||
}
|
||||
|
||||
Vec<TriRef> triRef;
|
||||
if (!meshGL.runOriginalID.empty()) {
|
||||
auto runIndex = meshGL.runIndex;
|
||||
const auto runEnd = meshGL.triVerts.size();
|
||||
if (runIndex.empty()) {
|
||||
runIndex = {0, static_cast<I>(runEnd)};
|
||||
} else if (runIndex.size() == meshGL.runOriginalID.size()) {
|
||||
runIndex.push_back(runEnd);
|
||||
}
|
||||
triRef.resize(meshGL.NumTri());
|
||||
const auto startID = Impl::ReserveIDs(meshGL.runOriginalID.size());
|
||||
for (size_t i = 0; i < meshGL.runOriginalID.size(); ++i) {
|
||||
const int meshID = startID + i;
|
||||
const int originalID = meshGL.runOriginalID[i];
|
||||
for (size_t tri = runIndex[i] / 3; tri < runIndex[i + 1] / 3; ++tri) {
|
||||
TriRef& ref = triRef[tri];
|
||||
ref.meshID = meshID;
|
||||
ref.originalID = originalID;
|
||||
ref.tri = meshGL.faceID.empty() ? tri : meshGL.faceID[tri];
|
||||
ref.faceID = tri;
|
||||
}
|
||||
triRef.resize_nofill(meshGL.NumTri());
|
||||
|
||||
if (meshGL.runTransform.empty()) {
|
||||
meshRelation_.meshIDtransform[meshID] = {originalID};
|
||||
} else {
|
||||
const Precision* m = meshGL.runTransform.data() + 12 * i;
|
||||
meshRelation_.meshIDtransform[meshID] = {originalID,
|
||||
{{m[0], m[1], m[2]},
|
||||
{m[3], m[4], m[5]},
|
||||
{m[6], m[7], m[8]},
|
||||
{m[9], m[10], m[11]}}};
|
||||
}
|
||||
auto runIndex = meshGL.runIndex;
|
||||
const auto runEnd = meshGL.triVerts.size();
|
||||
if (runIndex.empty()) {
|
||||
runIndex = {0, static_cast<I>(runEnd)};
|
||||
} else if (runIndex.size() == meshGL.runOriginalID.size()) {
|
||||
runIndex.push_back(runEnd);
|
||||
} else if (runIndex.size() == 1) {
|
||||
runIndex.push_back(runEnd);
|
||||
}
|
||||
|
||||
const auto startID = Impl::ReserveIDs(meshGL.runOriginalID.size());
|
||||
auto runOriginalID = meshGL.runOriginalID;
|
||||
if (runOriginalID.empty()) {
|
||||
runOriginalID.push_back(startID);
|
||||
}
|
||||
for (size_t i = 0; i < runOriginalID.size(); ++i) {
|
||||
const int meshID = startID + i;
|
||||
const int originalID = runOriginalID[i];
|
||||
for (size_t tri = runIndex[i] / 3; tri < runIndex[i + 1] / 3; ++tri) {
|
||||
TriRef& ref = triRef[tri];
|
||||
ref.meshID = meshID;
|
||||
ref.originalID = originalID;
|
||||
ref.faceID = meshGL.faceID.empty() ? -1 : meshGL.faceID[tri];
|
||||
ref.coplanarID = tri;
|
||||
}
|
||||
|
||||
if (meshGL.runTransform.empty()) {
|
||||
meshRelation_.meshIDtransform[meshID] = {originalID};
|
||||
} else {
|
||||
const Precision* m = meshGL.runTransform.data() + 12 * i;
|
||||
meshRelation_.meshIDtransform[meshID] = {originalID,
|
||||
{{m[0], m[1], m[2]},
|
||||
{m[3], m[4], m[5]},
|
||||
{m[6], m[7], m[8]},
|
||||
{m[9], m[10], m[11]}}};
|
||||
}
|
||||
}
|
||||
|
||||
Vec<ivec3> triVerts;
|
||||
triVerts.reserve(numTri);
|
||||
Vec<ivec3> triProp;
|
||||
triProp.reserve(numTri);
|
||||
Vec<ivec3> triVert;
|
||||
const bool needsPropMap = numProp > 0 && !prop2vert.empty();
|
||||
if (needsPropMap) triVert.reserve(numTri);
|
||||
if (triRef.size() > 0) meshRelation_.triRef.reserve(numTri);
|
||||
for (size_t i = 0; i < numTri; ++i) {
|
||||
ivec3 tri;
|
||||
ivec3 triP, triV;
|
||||
for (const size_t j : {0, 1, 2}) {
|
||||
uint32_t vert = (uint32_t)meshGL.triVerts[3 * i + j];
|
||||
if (vert >= numVert) {
|
||||
MarkFailure(Error::VertexOutOfBounds);
|
||||
MakeEmpty(Error::VertexOutOfBounds);
|
||||
return;
|
||||
}
|
||||
tri[j] = prop2vert[vert];
|
||||
triP[j] = vert;
|
||||
triV[j] = prop2vert.empty() ? vert : prop2vert[vert];
|
||||
}
|
||||
if (tri[0] != tri[1] && tri[1] != tri[2] && tri[2] != tri[0]) {
|
||||
triVerts.push_back(tri);
|
||||
if (triV[0] != triV[1] && triV[1] != triV[2] && triV[2] != triV[0]) {
|
||||
if (needsPropMap) {
|
||||
triProp.push_back(triP);
|
||||
triVert.push_back(triV);
|
||||
} else {
|
||||
triProp.push_back(triV);
|
||||
}
|
||||
if (triRef.size() > 0) {
|
||||
meshRelation_.triRef.push_back(triRef[i]);
|
||||
}
|
||||
if (numProp > 0) {
|
||||
meshRelation_.triProperties.push_back(
|
||||
ivec3(static_cast<uint32_t>(meshGL.triVerts[3 * i]),
|
||||
static_cast<uint32_t>(meshGL.triVerts[3 * i + 1]),
|
||||
static_cast<uint32_t>(meshGL.triVerts[3 * i + 2])));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CreateHalfedges(triVerts);
|
||||
CreateHalfedges(triProp, triVert);
|
||||
if (!IsManifold()) {
|
||||
MarkFailure(Error::NotManifold);
|
||||
MakeEmpty(Error::NotManifold);
|
||||
return;
|
||||
}
|
||||
|
||||
CalculateBBox();
|
||||
SetEpsilon(-1, std::is_same<Precision, float>::value);
|
||||
|
||||
SplitPinchedVerts();
|
||||
|
||||
// we need to split pinched verts before calculating vertex normals, because
|
||||
// the algorithm doesn't work with pinched verts
|
||||
CleanupTopology();
|
||||
CalculateNormals();
|
||||
|
||||
if (meshGL.runOriginalID.empty()) {
|
||||
InitializeOriginal();
|
||||
}
|
||||
DedupePropVerts();
|
||||
MarkCoplanar();
|
||||
|
||||
CreateFaces();
|
||||
|
||||
SimplifyTopology();
|
||||
RemoveDegenerates();
|
||||
RemoveUnreferencedVerts();
|
||||
Finish();
|
||||
|
||||
if (!IsFinite()) {
|
||||
MarkFailure(Error::NonFiniteVertex);
|
||||
MakeEmpty(Error::NonFiniteVertex);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -224,7 +256,8 @@ struct Manifold::Impl {
|
|||
meshRelation_.originalID = -1;
|
||||
}
|
||||
|
||||
inline void ForVert(int halfedge, std::function<void(int halfedge)> func) {
|
||||
template <typename F>
|
||||
inline void ForVert(int halfedge, F func) {
|
||||
int current = halfedge;
|
||||
do {
|
||||
current = NextHalfedge(halfedge_[current].pairedHalfedge);
|
||||
|
@ -247,30 +280,28 @@ struct Manifold::Impl {
|
|||
} while (current != halfedge);
|
||||
}
|
||||
|
||||
void CreateFaces();
|
||||
void MarkCoplanar();
|
||||
void DedupePropVerts();
|
||||
void RemoveUnreferencedVerts();
|
||||
void InitializeOriginal(bool keepFaceID = false);
|
||||
void CreateHalfedges(const Vec<ivec3>& triVerts);
|
||||
void CreateHalfedges(const Vec<ivec3>& triProp,
|
||||
const Vec<ivec3>& triVert = {});
|
||||
void CalculateNormals();
|
||||
void IncrementMeshIDs();
|
||||
|
||||
void Update();
|
||||
void MarkFailure(Error status);
|
||||
void MakeEmpty(Error status);
|
||||
void Warp(std::function<void(vec3&)> warpFunc);
|
||||
void WarpBatch(std::function<void(VecView<vec3>)> warpFunc);
|
||||
Impl Transform(const mat3x4& transform) const;
|
||||
SparseIndices EdgeCollisions(const Impl& B, bool inverted = false) const;
|
||||
SparseIndices VertexCollisionsZ(VecView<const vec3> vertsIn,
|
||||
bool inverted = false) const;
|
||||
|
||||
bool IsEmpty() const { return NumTri() == 0; }
|
||||
size_t NumVert() const { return vertPos_.size(); }
|
||||
size_t NumEdge() const { return halfedge_.size() / 2; }
|
||||
size_t NumTri() const { return halfedge_.size() / 3; }
|
||||
size_t NumProp() const { return meshRelation_.numProp; }
|
||||
size_t NumProp() const { return numProp_; }
|
||||
size_t NumPropVert() const {
|
||||
return NumProp() == 0 ? NumVert()
|
||||
: meshRelation_.properties.size() / NumProp();
|
||||
return NumProp() == 0 ? NumVert() : properties_.size() / NumProp();
|
||||
}
|
||||
|
||||
// properties.cpp
|
||||
|
@ -299,18 +330,20 @@ struct Manifold::Impl {
|
|||
void GatherFaces(const Impl& old, const Vec<int>& faceNew2Old);
|
||||
|
||||
// face_op.cpp
|
||||
void Face2Tri(const Vec<int>& faceEdge, const Vec<TriRef>& halfedgeRef);
|
||||
PolygonsIdx Face2Polygons(VecView<Halfedge>::IterC start,
|
||||
VecView<Halfedge>::IterC end,
|
||||
mat2x3 projection) const;
|
||||
void Face2Tri(const Vec<int>& faceEdge, const Vec<TriRef>& halfedgeRef,
|
||||
bool allowConvex = false);
|
||||
Polygons Slice(double height) const;
|
||||
Polygons Project() const;
|
||||
|
||||
// edge_op.cpp
|
||||
void CleanupTopology();
|
||||
void SimplifyTopology();
|
||||
void SimplifyTopology(int firstNewVert = 0);
|
||||
void RemoveDegenerates(int firstNewVert = 0);
|
||||
void CollapseShortEdges(int firstNewVert = 0);
|
||||
void CollapseColinearEdges(int firstNewVert = 0);
|
||||
void SwapDegenerates(int firstNewVert = 0);
|
||||
void DedupeEdge(int edge);
|
||||
void CollapseEdge(int edge, std::vector<int>& edges);
|
||||
bool CollapseEdge(int edge, std::vector<int>& edges);
|
||||
void RecursiveEdgeSwap(int edge, int& tag, std::vector<int>& visited,
|
||||
std::vector<int>& edgeSwapStack,
|
||||
std::vector<int>& edges);
|
||||
|
@ -320,6 +353,7 @@ struct Manifold::Impl {
|
|||
void FormLoop(int current, int end);
|
||||
void CollapseTri(const ivec3& triEdge);
|
||||
void SplitPinchedVerts();
|
||||
void DedupeEdges();
|
||||
|
||||
// subdivision.cpp
|
||||
int GetNeighbor(int tri) const;
|
||||
|
@ -353,8 +387,6 @@ struct Manifold::Impl {
|
|||
void Hull(VecView<vec3> vertPos);
|
||||
};
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
extern std::mutex dump_lock;
|
||||
std::ostream& operator<<(std::ostream& stream, const Manifold::Impl& impl);
|
||||
#endif
|
||||
} // namespace manifold
|
||||
|
|
27
thirdparty/manifold/src/iters.h
vendored
27
thirdparty/manifold/src/iters.h
vendored
|
@ -14,6 +14,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <iterator>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
|
||||
namespace manifold {
|
||||
|
@ -22,7 +23,7 @@ template <typename F, typename Iter>
|
|||
struct TransformIterator {
|
||||
private:
|
||||
Iter iter;
|
||||
F f;
|
||||
std::optional<F> f;
|
||||
|
||||
public:
|
||||
using pointer = void;
|
||||
|
@ -36,16 +37,28 @@ struct TransformIterator {
|
|||
|
||||
constexpr TransformIterator(Iter iter, F f) : iter(iter), f(f) {}
|
||||
|
||||
TransformIterator(const TransformIterator& other)
|
||||
: iter(other.iter), f(other.f) {}
|
||||
|
||||
TransformIterator(TransformIterator&& other) : iter(other.iter), f(other.f) {}
|
||||
|
||||
TransformIterator& operator=(const TransformIterator& other) {
|
||||
if (this == &other) return *this;
|
||||
// don't copy function, should be the same
|
||||
iter = other.iter;
|
||||
f.emplace(*other.f);
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr reference operator*() const { return f(*iter); }
|
||||
TransformIterator& operator=(TransformIterator&& other) {
|
||||
if (this == &other) return *this;
|
||||
iter = other.iter;
|
||||
f.emplace(*other.f);
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr reference operator[](size_t i) const { return f(iter[i]); }
|
||||
constexpr reference operator*() const { return (*f)(*iter); }
|
||||
|
||||
constexpr reference operator[](size_t i) const { return (*f)(iter[i]); }
|
||||
|
||||
// prefix increment
|
||||
TransformIterator& operator++() {
|
||||
|
@ -74,7 +87,7 @@ struct TransformIterator {
|
|||
}
|
||||
|
||||
constexpr TransformIterator operator+(size_t n) const {
|
||||
return TransformIterator(iter + n, f);
|
||||
return TransformIterator(iter + n, *f);
|
||||
}
|
||||
|
||||
TransformIterator& operator+=(size_t n) {
|
||||
|
@ -83,7 +96,7 @@ struct TransformIterator {
|
|||
}
|
||||
|
||||
constexpr TransformIterator operator-(size_t n) const {
|
||||
return TransformIterator(iter - n, f);
|
||||
return TransformIterator(iter - n, *f);
|
||||
}
|
||||
|
||||
TransformIterator& operator-=(size_t n) {
|
||||
|
@ -108,7 +121,7 @@ struct TransformIterator {
|
|||
}
|
||||
|
||||
constexpr operator TransformIterator<F, const Iter>() const {
|
||||
return TransformIterator(f, iter);
|
||||
return TransformIterator(*f, iter);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
169
thirdparty/manifold/src/manifold.cpp
vendored
169
thirdparty/manifold/src/manifold.cpp
vendored
|
@ -16,36 +16,17 @@
|
|||
#include <map>
|
||||
#include <numeric>
|
||||
|
||||
#include "./boolean3.h"
|
||||
#include "./csg_tree.h"
|
||||
#include "./impl.h"
|
||||
#include "./parallel.h"
|
||||
#include "boolean3.h"
|
||||
#include "csg_tree.h"
|
||||
#include "impl.h"
|
||||
#include "parallel.h"
|
||||
#include "shared.h"
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
||||
ExecutionParams manifoldParams;
|
||||
|
||||
struct UpdateProperties {
|
||||
double* properties;
|
||||
const int numProp;
|
||||
const double* oldProperties;
|
||||
const int numOldProp;
|
||||
const vec3* vertPos;
|
||||
const ivec3* triProperties;
|
||||
const Halfedge* halfedges;
|
||||
std::function<void(double*, vec3, const double*)> propFunc;
|
||||
|
||||
void operator()(int tri) {
|
||||
for (int i : {0, 1, 2}) {
|
||||
const int vert = halfedges[3 * tri + i].startVert;
|
||||
const int propVert = triProperties[tri][i];
|
||||
propFunc(properties + numProp * propVert, vertPos[vert],
|
||||
oldProperties + numOldProp * propVert);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Manifold Halfspace(Box bBox, vec3 normal, double originOffset) {
|
||||
normal = la::normalize(normal);
|
||||
Manifold cutter = Manifold::Cube(vec3(2.0), true).Translate({1.0, 0.0, 0.0});
|
||||
|
@ -94,11 +75,12 @@ MeshGLP<Precision, I> GetMeshGLImpl(const manifold::Manifold::Impl& impl,
|
|||
VecView<const TriRef> triRef = impl.meshRelation_.triRef;
|
||||
// Don't sort originals - keep them in order
|
||||
if (!isOriginal) {
|
||||
std::sort(triNew2Old.begin(), triNew2Old.end(), [triRef](int a, int b) {
|
||||
return triRef[a].originalID == triRef[b].originalID
|
||||
? triRef[a].meshID < triRef[b].meshID
|
||||
: triRef[a].originalID < triRef[b].originalID;
|
||||
});
|
||||
std::stable_sort(triNew2Old.begin(), triNew2Old.end(),
|
||||
[triRef](int a, int b) {
|
||||
return triRef[a].originalID == triRef[b].originalID
|
||||
? triRef[a].meshID < triRef[b].meshID
|
||||
: triRef[a].originalID < triRef[b].originalID;
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<mat3> runNormalTransform;
|
||||
|
@ -128,7 +110,7 @@ MeshGLP<Precision, I> GetMeshGLImpl(const manifold::Manifold::Impl& impl,
|
|||
const auto ref = triRef[oldTri];
|
||||
const int meshID = ref.meshID;
|
||||
|
||||
out.faceID[tri] = ref.tri;
|
||||
out.faceID[tri] = ref.faceID >= 0 ? ref.faceID : ref.coplanarID;
|
||||
for (const int i : {0, 1, 2})
|
||||
out.triVerts[3 * tri + i] = impl.halfedge_[3 * oldTri + i].startVert;
|
||||
|
||||
|
@ -166,9 +148,8 @@ MeshGLP<Precision, I> GetMeshGLImpl(const manifold::Manifold::Impl& impl,
|
|||
for (size_t run = 0; run < out.runOriginalID.size(); ++run) {
|
||||
for (size_t tri = out.runIndex[run] / 3; tri < out.runIndex[run + 1] / 3;
|
||||
++tri) {
|
||||
const ivec3 triProp = impl.meshRelation_.triProperties[triNew2Old[tri]];
|
||||
for (const int i : {0, 1, 2}) {
|
||||
const int prop = triProp[i];
|
||||
const int prop = impl.halfedge_[3 * triNew2Old[tri] + i].propVert;
|
||||
const int vert = out.triVerts[3 * tri + i];
|
||||
|
||||
auto& bin = vertPropPair[vert];
|
||||
|
@ -189,8 +170,7 @@ MeshGLP<Precision, I> GetMeshGLImpl(const manifold::Manifold::Impl& impl,
|
|||
out.vertProperties.push_back(impl.vertPos_[vert][p]);
|
||||
}
|
||||
for (int p = 0; p < numProp; ++p) {
|
||||
out.vertProperties.push_back(
|
||||
impl.meshRelation_.properties[prop * numProp + p]);
|
||||
out.vertProperties.push_back(impl.properties_[prop * numProp + p]);
|
||||
}
|
||||
|
||||
if (updateNormals) {
|
||||
|
@ -332,11 +312,9 @@ MeshGL64 Manifold::GetMeshGL64(int normalIdx) const {
|
|||
bool Manifold::IsEmpty() const { return GetCsgLeafNode().GetImpl()->IsEmpty(); }
|
||||
/**
|
||||
* Returns the reason for an input Mesh producing an empty Manifold. This Status
|
||||
* only applies to Manifolds newly-created from an input Mesh - once they are
|
||||
* combined into a new Manifold via operations, the status reverts to NoError,
|
||||
* simply processing the problem mesh as empty. Likewise, empty meshes may still
|
||||
* show NoError, for instance if they are small enough relative to their
|
||||
* tolerance to be collapsed to nothing.
|
||||
* will carry on through operations like NaN propogation, ensuring an errored
|
||||
* mesh doesn't get mysteriously lost. Empty meshes may still show
|
||||
* NoError, for instance the intersection of non-overlapping meshes.
|
||||
*/
|
||||
Manifold::Error Manifold::Status() const {
|
||||
return GetCsgLeafNode().GetImpl()->status_;
|
||||
|
@ -404,7 +382,7 @@ Manifold Manifold::SetTolerance(double tolerance) const {
|
|||
auto impl = std::make_shared<Impl>(*GetCsgLeafNode().GetImpl());
|
||||
if (tolerance > impl->tolerance_) {
|
||||
impl->tolerance_ = tolerance;
|
||||
impl->CreateFaces();
|
||||
impl->MarkCoplanar();
|
||||
impl->SimplifyTopology();
|
||||
impl->Finish();
|
||||
} else {
|
||||
|
@ -415,6 +393,27 @@ Manifold Manifold::SetTolerance(double tolerance) const {
|
|||
return Manifold(impl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a copy of the manifold simplified to the given tolerance, but with its
|
||||
* actual tolerance value unchanged. If the tolerance is not given or is less
|
||||
* than the current tolerance, the current tolerance is used for simplification.
|
||||
* The result will contain a subset of the original verts and all surfaces will
|
||||
* have moved by less than tolerance.
|
||||
*/
|
||||
Manifold Manifold::Simplify(double tolerance) const {
|
||||
auto impl = std::make_shared<Impl>(*GetCsgLeafNode().GetImpl());
|
||||
const double oldTolerance = impl->tolerance_;
|
||||
if (tolerance == 0) tolerance = oldTolerance;
|
||||
if (tolerance > oldTolerance) {
|
||||
impl->tolerance_ = tolerance;
|
||||
impl->MarkCoplanar();
|
||||
}
|
||||
impl->SimplifyTopology();
|
||||
impl->Finish();
|
||||
impl->tolerance_ = oldTolerance;
|
||||
return Manifold(impl);
|
||||
}
|
||||
|
||||
/**
|
||||
* The genus is a topological property of the manifold, representing the number
|
||||
* of "handles". A sphere is 0, torus 1, etc. It is only meaningful for a single
|
||||
|
@ -450,9 +449,10 @@ int Manifold::OriginalID() const {
|
|||
|
||||
/**
|
||||
* This removes all relations (originalID, faceID, transform) to ancestor meshes
|
||||
* and this new Manifold is marked an original. It also collapses colinear edges
|
||||
* - these don't get collapsed at boundaries where originalID changes, so the
|
||||
* reset may allow flat faces to be further simplified.
|
||||
* and this new Manifold is marked an original. It also recreates faces
|
||||
* - these don't get joined at boundaries where originalID changes, so the
|
||||
* reset may allow triangles of flat faces to be further collapsed with
|
||||
* Simplify().
|
||||
*/
|
||||
Manifold Manifold::AsOriginal() const {
|
||||
auto oldImpl = GetCsgLeafNode().GetImpl();
|
||||
|
@ -463,9 +463,7 @@ Manifold Manifold::AsOriginal() const {
|
|||
}
|
||||
auto newImpl = std::make_shared<Impl>(*oldImpl);
|
||||
newImpl->InitializeOriginal();
|
||||
newImpl->CreateFaces();
|
||||
newImpl->SimplifyTopology();
|
||||
newImpl->Finish();
|
||||
newImpl->MarkCoplanar();
|
||||
newImpl->InitializeOriginal(true);
|
||||
return Manifold(std::make_shared<CsgLeafNode>(newImpl));
|
||||
}
|
||||
|
@ -497,22 +495,6 @@ size_t Manifold::NumDegenerateTris() const {
|
|||
return GetCsgLeafNode().GetImpl()->NumDegenerateTris();
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a checksum-style verification of the collider, simply returning the
|
||||
* total number of edge-face bounding box overlaps between this and other.
|
||||
*
|
||||
* @param other A Manifold to overlap with.
|
||||
*/
|
||||
size_t Manifold::NumOverlaps(const Manifold& other) const {
|
||||
SparseIndices overlaps = GetCsgLeafNode().GetImpl()->EdgeCollisions(
|
||||
*other.GetCsgLeafNode().GetImpl());
|
||||
int num_overlaps = overlaps.size();
|
||||
|
||||
overlaps = other.GetCsgLeafNode().GetImpl()->EdgeCollisions(
|
||||
*GetCsgLeafNode().GetImpl());
|
||||
return num_overlaps + overlaps.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Move this Manifold in space. This operation can be chained. Transforms are
|
||||
* combined and applied lazily.
|
||||
|
@ -635,38 +617,33 @@ Manifold Manifold::SetProperties(
|
|||
propFunc) const {
|
||||
auto pImpl = std::make_shared<Impl>(*GetCsgLeafNode().GetImpl());
|
||||
const int oldNumProp = NumProp();
|
||||
const Vec<double> oldProperties = pImpl->meshRelation_.properties;
|
||||
const Vec<double> oldProperties = pImpl->properties_;
|
||||
|
||||
auto& triProperties = pImpl->meshRelation_.triProperties;
|
||||
if (numProp == 0) {
|
||||
triProperties.resize(0);
|
||||
pImpl->meshRelation_.properties.resize(0);
|
||||
pImpl->properties_.clear();
|
||||
} else {
|
||||
if (triProperties.size() == 0) {
|
||||
const int numTri = NumTri();
|
||||
triProperties.resize(numTri);
|
||||
for (int i = 0; i < numTri; ++i) {
|
||||
for (const int j : {0, 1, 2}) {
|
||||
triProperties[i][j] = pImpl->halfedge_[3 * i + j].startVert;
|
||||
}
|
||||
}
|
||||
pImpl->meshRelation_.properties = Vec<double>(numProp * NumVert(), 0);
|
||||
} else {
|
||||
pImpl->meshRelation_.properties = Vec<double>(numProp * NumPropVert(), 0);
|
||||
}
|
||||
pImpl->properties_ = Vec<double>(numProp * NumPropVert(), 0);
|
||||
for_each_n(
|
||||
propFunc == nullptr ? ExecutionPolicy::Par : ExecutionPolicy::Seq,
|
||||
countAt(0), NumTri(),
|
||||
UpdateProperties(
|
||||
{pImpl->meshRelation_.properties.data(), numProp,
|
||||
oldProperties.data(), oldNumProp, pImpl->vertPos_.data(),
|
||||
triProperties.data(), pImpl->halfedge_.data(),
|
||||
propFunc == nullptr ? [](double* newProp, vec3 position,
|
||||
const double* oldProp) { *newProp = 0; }
|
||||
: propFunc}));
|
||||
countAt(0), NumTri(), [&](int tri) {
|
||||
for (int i : {0, 1, 2}) {
|
||||
const Halfedge& edge = pImpl->halfedge_[3 * tri + i];
|
||||
const int vert = edge.startVert;
|
||||
const int propVert = edge.propVert;
|
||||
if (propFunc == nullptr) {
|
||||
for (int p = 0; p < numProp; ++p) {
|
||||
pImpl->properties_[numProp * propVert + p] = 0;
|
||||
}
|
||||
} else {
|
||||
propFunc(&pImpl->properties_[numProp * propVert],
|
||||
pImpl->vertPos_[vert],
|
||||
oldProperties.data() + oldNumProp * propVert);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pImpl->meshRelation_.numProp = numProp;
|
||||
pImpl->numProp_ = numProp;
|
||||
return Manifold(std::make_shared<CsgLeafNode>(pImpl));
|
||||
}
|
||||
|
||||
|
@ -754,14 +731,15 @@ Manifold Manifold::SmoothOut(double minSharpAngle, double minSmoothness) const {
|
|||
auto pImpl = std::make_shared<Impl>(*GetCsgLeafNode().GetImpl());
|
||||
if (!IsEmpty()) {
|
||||
if (minSmoothness == 0) {
|
||||
const int numProp = pImpl->meshRelation_.numProp;
|
||||
Vec<double> properties = pImpl->meshRelation_.properties;
|
||||
Vec<ivec3> triProperties = pImpl->meshRelation_.triProperties;
|
||||
const int numProp = pImpl->numProp_;
|
||||
Vec<double> properties = pImpl->properties_;
|
||||
Vec<Halfedge> halfedge = pImpl->halfedge_;
|
||||
pImpl->SetNormals(0, minSharpAngle);
|
||||
pImpl->CreateTangents(0);
|
||||
pImpl->meshRelation_.numProp = numProp;
|
||||
pImpl->meshRelation_.properties.swap(properties);
|
||||
pImpl->meshRelation_.triProperties.swap(triProperties);
|
||||
// Reset the properties to the original values, removing temporary normals
|
||||
pImpl->numProp_ = numProp;
|
||||
pImpl->properties_.swap(properties);
|
||||
pImpl->halfedge_.swap(halfedge);
|
||||
} else {
|
||||
pImpl->CreateTangents(pImpl->SharpenEdges(minSharpAngle, minSmoothness));
|
||||
}
|
||||
|
@ -783,8 +761,7 @@ Manifold Manifold::SmoothOut(double minSharpAngle, double minSmoothness) const {
|
|||
Manifold Manifold::Refine(int n) const {
|
||||
auto pImpl = std::make_shared<Impl>(*GetCsgLeafNode().GetImpl());
|
||||
if (n > 1) {
|
||||
pImpl->Refine(
|
||||
[n](vec3 edge, vec4 tangentStart, vec4 tangentEnd) { return n - 1; });
|
||||
pImpl->Refine([n](vec3, vec4, vec4) { return n - 1; });
|
||||
}
|
||||
return Manifold(std::make_shared<CsgLeafNode>(pImpl));
|
||||
}
|
||||
|
@ -802,7 +779,7 @@ Manifold Manifold::Refine(int n) const {
|
|||
Manifold Manifold::RefineToLength(double length) const {
|
||||
length = std::abs(length);
|
||||
auto pImpl = std::make_shared<Impl>(*GetCsgLeafNode().GetImpl());
|
||||
pImpl->Refine([length](vec3 edge, vec4 tangentStart, vec4 tangentEnd) {
|
||||
pImpl->Refine([length](vec3 edge, vec4, vec4) {
|
||||
return static_cast<int>(la::length(edge) / length);
|
||||
});
|
||||
return Manifold(std::make_shared<CsgLeafNode>(pImpl));
|
||||
|
|
2
thirdparty/manifold/src/mesh_fixes.h
vendored
2
thirdparty/manifold/src/mesh_fixes.h
vendored
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
#include "./shared.h"
|
||||
#include "shared.h"
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
|
214
thirdparty/manifold/src/parallel.h
vendored
214
thirdparty/manifold/src/parallel.h
vendored
|
@ -17,7 +17,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "./iters.h"
|
||||
#include "iters.h"
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
#include <tbb/combinable.h>
|
||||
#include <tbb/parallel_for.h>
|
||||
|
@ -114,20 +114,20 @@ template <typename T, typename InputIter, typename OutputIter, typename BinOp>
|
|||
struct ScanBody {
|
||||
T sum;
|
||||
T identity;
|
||||
BinOp &f;
|
||||
BinOp& f;
|
||||
InputIter input;
|
||||
OutputIter output;
|
||||
|
||||
ScanBody(T sum, T identity, BinOp &f, InputIter input, OutputIter output)
|
||||
ScanBody(T sum, T identity, BinOp& f, InputIter input, OutputIter output)
|
||||
: sum(sum), identity(identity), f(f), input(input), output(output) {}
|
||||
ScanBody(ScanBody &b, tbb::split)
|
||||
ScanBody(ScanBody& b, tbb::split)
|
||||
: sum(b.identity),
|
||||
identity(b.identity),
|
||||
f(b.f),
|
||||
input(b.input),
|
||||
output(b.output) {}
|
||||
template <typename Tag>
|
||||
void operator()(const tbb::blocked_range<size_t> &r, Tag) {
|
||||
void operator()(const tbb::blocked_range<size_t>& r, Tag) {
|
||||
T temp = sum;
|
||||
for (size_t i = r.begin(); i < r.end(); ++i) {
|
||||
T inputTmp = input[i];
|
||||
|
@ -137,23 +137,23 @@ struct ScanBody {
|
|||
sum = temp;
|
||||
}
|
||||
T get_sum() const { return sum; }
|
||||
void reverse_join(ScanBody &a) { sum = f(a.sum, sum); }
|
||||
void assign(ScanBody &b) { sum = b.sum; }
|
||||
void reverse_join(ScanBody& a) { sum = f(a.sum, sum); }
|
||||
void assign(ScanBody& b) { sum = b.sum; }
|
||||
};
|
||||
|
||||
template <typename InputIter, typename OutputIter, typename P>
|
||||
struct CopyIfScanBody {
|
||||
size_t sum;
|
||||
P &pred;
|
||||
P& pred;
|
||||
InputIter input;
|
||||
OutputIter output;
|
||||
|
||||
CopyIfScanBody(P &pred, InputIter input, OutputIter output)
|
||||
CopyIfScanBody(P& pred, InputIter input, OutputIter output)
|
||||
: sum(0), pred(pred), input(input), output(output) {}
|
||||
CopyIfScanBody(CopyIfScanBody &b, tbb::split)
|
||||
CopyIfScanBody(CopyIfScanBody& b, tbb::split)
|
||||
: sum(0), pred(b.pred), input(b.input), output(b.output) {}
|
||||
template <typename Tag>
|
||||
void operator()(const tbb::blocked_range<size_t> &r, Tag) {
|
||||
void operator()(const tbb::blocked_range<size_t>& r, Tag) {
|
||||
size_t temp = sum;
|
||||
for (size_t i = r.begin(); i < r.end(); ++i) {
|
||||
if (pred(i)) {
|
||||
|
@ -164,8 +164,8 @@ struct CopyIfScanBody {
|
|||
sum = temp;
|
||||
}
|
||||
size_t get_sum() const { return sum; }
|
||||
void reverse_join(CopyIfScanBody &a) { sum = a.sum + sum; }
|
||||
void assign(CopyIfScanBody &b) { sum = b.sum; }
|
||||
void reverse_join(CopyIfScanBody& a) { sum = a.sum + sum; }
|
||||
void assign(CopyIfScanBody& b) { sum = b.sum; }
|
||||
};
|
||||
|
||||
template <typename N, const int K>
|
||||
|
@ -173,11 +173,11 @@ struct Hist {
|
|||
using SizeType = N;
|
||||
static constexpr int k = K;
|
||||
N hist[k][256] = {{0}};
|
||||
void merge(const Hist<N, K> &other) {
|
||||
void merge(const Hist<N, K>& other) {
|
||||
for (int i = 0; i < k; ++i)
|
||||
for (int j = 0; j < 256; ++j) hist[i][j] += other.hist[i][j];
|
||||
}
|
||||
void prefixSum(N total, bool *canSkip) {
|
||||
void prefixSum(N total, bool* canSkip) {
|
||||
for (int i = 0; i < k; ++i) {
|
||||
size_t count = 0;
|
||||
for (int j = 0; j < 256; ++j) {
|
||||
|
@ -191,8 +191,8 @@ struct Hist {
|
|||
};
|
||||
|
||||
template <typename T, typename H>
|
||||
void histogram(T *ptr, typename H::SizeType n, H &hist) {
|
||||
auto worker = [](T *ptr, typename H::SizeType n, H &hist) {
|
||||
void histogram(T* ptr, typename H::SizeType n, H& hist) {
|
||||
auto worker = [](T* ptr, typename H::SizeType n, H& hist) {
|
||||
for (typename H::SizeType i = 0; i < n; ++i)
|
||||
for (int k = 0; k < hist.k; ++k)
|
||||
++hist.hist[k][(ptr[i] >> (8 * k)) & 0xFF];
|
||||
|
@ -203,21 +203,21 @@ void histogram(T *ptr, typename H::SizeType n, H &hist) {
|
|||
tbb::combinable<H> store;
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<typename H::SizeType>(0, n, kSeqThreshold),
|
||||
[&worker, &store, ptr](const auto &r) {
|
||||
[&worker, &store, ptr](const auto& r) {
|
||||
worker(ptr + r.begin(), r.end() - r.begin(), store.local());
|
||||
});
|
||||
store.combine_each([&hist](const H &h) { hist.merge(h); });
|
||||
store.combine_each([&hist](const H& h) { hist.merge(h); });
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, typename H>
|
||||
void shuffle(T *src, T *target, typename H::SizeType n, H &hist, int k) {
|
||||
void shuffle(T* src, T* target, typename H::SizeType n, H& hist, int k) {
|
||||
for (typename H::SizeType i = 0; i < n; ++i)
|
||||
target[hist.hist[k][(src[i] >> (8 * k)) & 0xFF]++] = src[i];
|
||||
}
|
||||
|
||||
template <typename T, typename SizeType>
|
||||
bool LSB_radix_sort(T *input, T *tmp, SizeType n) {
|
||||
bool LSB_radix_sort(T* input, T* tmp, SizeType n) {
|
||||
Hist<SizeType, sizeof(T) / sizeof(char)> hist;
|
||||
if (std::is_sorted(input, input + n)) return false;
|
||||
histogram(input, n, hist);
|
||||
|
@ -240,9 +240,9 @@ struct SortedRange {
|
|||
SizeType offset = 0, length = 0;
|
||||
bool inTmp = false;
|
||||
|
||||
SortedRange(T *input, T *tmp, SizeType offset = 0, SizeType length = 0)
|
||||
SortedRange(T* input, T* tmp, SizeType offset = 0, SizeType length = 0)
|
||||
: input(input), tmp(tmp), offset(offset), length(length) {}
|
||||
SortedRange(SortedRange<T, SizeType> &r, tbb::split)
|
||||
SortedRange(SortedRange<T, SizeType>& r, tbb::split)
|
||||
: input(r.input), tmp(r.tmp) {}
|
||||
// FIXME: no idea why thread sanitizer reports data race here
|
||||
#if defined(__has_feature)
|
||||
|
@ -251,7 +251,7 @@ struct SortedRange {
|
|||
#endif
|
||||
#endif
|
||||
void
|
||||
operator()(const tbb::blocked_range<SizeType> &range) {
|
||||
operator()(const tbb::blocked_range<SizeType>& range) {
|
||||
SortedRange<T, SizeType> rhs(input, tmp, range.begin(),
|
||||
range.end() - range.begin());
|
||||
rhs.inTmp =
|
||||
|
@ -267,7 +267,7 @@ struct SortedRange {
|
|||
copy(src + offset, src + offset + length, target + offset);
|
||||
return !inTmp;
|
||||
}
|
||||
void join(const SortedRange<T, SizeType> &rhs) {
|
||||
void join(const SortedRange<T, SizeType>& rhs) {
|
||||
if (inTmp != rhs.inTmp) {
|
||||
if (length < rhs.length)
|
||||
inTmp = swapBuffer();
|
||||
|
@ -286,8 +286,8 @@ struct SortedRange {
|
|||
};
|
||||
|
||||
template <typename T, typename SizeTy>
|
||||
void radix_sort(T *input, SizeTy n) {
|
||||
T *aux = new T[n];
|
||||
void radix_sort(T* input, SizeTy n) {
|
||||
T* aux = new T[n];
|
||||
SizeTy blockSize = std::max(n / tbb::this_task_arena::max_concurrency() / 4,
|
||||
static_cast<SizeTy>(kSeqThreshold / sizeof(T)));
|
||||
SortedRange<T, SizeTy> result(input, aux);
|
||||
|
@ -306,7 +306,7 @@ void mergeSort(ExecutionPolicy policy, Iterator first, Iterator last,
|
|||
// apparently this prioritizes threads inside here?
|
||||
tbb::this_task_arena::isolate([&] {
|
||||
size_t length = std::distance(first, last);
|
||||
T *tmp = new T[length];
|
||||
T* tmp = new T[length];
|
||||
copy(policy, first, last, tmp);
|
||||
details::mergeSortRec(tmp, first, 0, length, comp);
|
||||
delete[] tmp;
|
||||
|
@ -376,13 +376,16 @@ void for_each(ExecutionPolicy policy, Iter first, Iter last, F f) {
|
|||
typename std::iterator_traits<Iter>::iterator_category,
|
||||
std::random_access_iterator_tag>,
|
||||
"You can only parallelize RandomAccessIterator.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
tbb::parallel_for(tbb::blocked_range<Iter>(first, last),
|
||||
[&f](const tbb::blocked_range<Iter> &range) {
|
||||
for (Iter i = range.begin(); i != range.end(); i++)
|
||||
f(*i);
|
||||
});
|
||||
tbb::this_task_arena::isolate([&]() {
|
||||
tbb::parallel_for(tbb::blocked_range<Iter>(first, last),
|
||||
[&f](const tbb::blocked_range<Iter>& range) {
|
||||
for (Iter i = range.begin(); i != range.end(); i++)
|
||||
f(*i);
|
||||
});
|
||||
});
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -412,16 +415,19 @@ T reduce(ExecutionPolicy policy, InputIter first, InputIter last, T init,
|
|||
typename std::iterator_traits<InputIter>::iterator_category,
|
||||
std::random_access_iterator_tag>,
|
||||
"You can only parallelize RandomAccessIterator.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
// should we use deterministic reduce here?
|
||||
return tbb::parallel_reduce(
|
||||
tbb::blocked_range<InputIter>(first, last, details::kSeqThreshold),
|
||||
init,
|
||||
[&f](const tbb::blocked_range<InputIter> &range, T value) {
|
||||
return std::reduce(range.begin(), range.end(), value, f);
|
||||
},
|
||||
f);
|
||||
return tbb::this_task_arena::isolate([&]() {
|
||||
return tbb::parallel_reduce(
|
||||
tbb::blocked_range<InputIter>(first, last, details::kSeqThreshold),
|
||||
init,
|
||||
[&f](const tbb::blocked_range<InputIter>& range, T value) {
|
||||
return std::reduce(range.begin(), range.end(), value, f);
|
||||
},
|
||||
f);
|
||||
});
|
||||
}
|
||||
#endif
|
||||
return std::reduce(first, last, init, f);
|
||||
|
@ -488,21 +494,24 @@ void inclusive_scan(ExecutionPolicy policy, InputIter first, InputIter last,
|
|||
typename std::iterator_traits<OutputIter>::iterator_category,
|
||||
std::random_access_iterator_tag>,
|
||||
"You can only parallelize RandomAccessIterator.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
tbb::parallel_scan(
|
||||
tbb::blocked_range<size_t>(0, std::distance(first, last)),
|
||||
static_cast<T>(0),
|
||||
[&](const tbb::blocked_range<size_t> &range, T sum,
|
||||
bool is_final_scan) {
|
||||
T temp = sum;
|
||||
for (size_t i = range.begin(); i < range.end(); ++i) {
|
||||
temp = temp + first[i];
|
||||
if (is_final_scan) d_first[i] = temp;
|
||||
}
|
||||
return temp;
|
||||
},
|
||||
std::plus<T>());
|
||||
tbb::this_task_arena::isolate([&]() {
|
||||
tbb::parallel_scan(
|
||||
tbb::blocked_range<size_t>(0, std::distance(first, last)),
|
||||
static_cast<T>(0),
|
||||
[&](const tbb::blocked_range<size_t>& range, T sum,
|
||||
bool is_final_scan) {
|
||||
T temp = sum;
|
||||
for (size_t i = range.begin(); i < range.end(); ++i) {
|
||||
temp = temp + first[i];
|
||||
if (is_final_scan) d_first[i] = temp;
|
||||
}
|
||||
return temp;
|
||||
},
|
||||
std::plus<T>());
|
||||
});
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -550,12 +559,16 @@ void exclusive_scan(ExecutionPolicy policy, InputIter first, InputIter last,
|
|||
typename std::iterator_traits<OutputIter>::iterator_category,
|
||||
std::random_access_iterator_tag>,
|
||||
"You can only parallelize RandomAccessIterator.");
|
||||
(void)policy;
|
||||
(void)identity;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
details::ScanBody<T, InputIter, OutputIter, BinOp> body(init, identity, f,
|
||||
first, d_first);
|
||||
tbb::parallel_scan(
|
||||
tbb::blocked_range<size_t>(0, std::distance(first, last)), body);
|
||||
tbb::this_task_arena::isolate([&]() {
|
||||
tbb::parallel_scan(
|
||||
tbb::blocked_range<size_t>(0, std::distance(first, last)), body);
|
||||
});
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -603,15 +616,18 @@ void transform(ExecutionPolicy policy, InputIter first, InputIter last,
|
|||
typename std::iterator_traits<OutputIter>::iterator_category,
|
||||
std::random_access_iterator_tag>,
|
||||
"You can only parallelize RandomAccessIterator.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
tbb::parallel_for(tbb::blocked_range<size_t>(
|
||||
0, static_cast<size_t>(std::distance(first, last))),
|
||||
[&](const tbb::blocked_range<size_t> &range) {
|
||||
std::transform(first + range.begin(),
|
||||
first + range.end(),
|
||||
d_first + range.begin(), f);
|
||||
});
|
||||
tbb::this_task_arena::isolate([&]() {
|
||||
tbb::parallel_for(tbb::blocked_range<size_t>(
|
||||
0, static_cast<size_t>(std::distance(first, last))),
|
||||
[&](const tbb::blocked_range<size_t>& range) {
|
||||
std::transform(first + range.begin(),
|
||||
first + range.end(),
|
||||
d_first + range.begin(), f);
|
||||
});
|
||||
});
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -647,15 +663,18 @@ void copy(ExecutionPolicy policy, InputIter first, InputIter last,
|
|||
typename std::iterator_traits<OutputIter>::iterator_category,
|
||||
std::random_access_iterator_tag>,
|
||||
"You can only parallelize RandomAccessIterator.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
tbb::parallel_for(tbb::blocked_range<size_t>(
|
||||
0, static_cast<size_t>(std::distance(first, last)),
|
||||
details::kSeqThreshold),
|
||||
[&](const tbb::blocked_range<size_t> &range) {
|
||||
std::copy(first + range.begin(), first + range.end(),
|
||||
d_first + range.begin());
|
||||
});
|
||||
tbb::this_task_arena::isolate([&]() {
|
||||
tbb::parallel_for(tbb::blocked_range<size_t>(
|
||||
0, static_cast<size_t>(std::distance(first, last)),
|
||||
details::kSeqThreshold),
|
||||
[&](const tbb::blocked_range<size_t>& range) {
|
||||
std::copy(first + range.begin(), first + range.end(),
|
||||
d_first + range.begin());
|
||||
});
|
||||
});
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -704,12 +723,15 @@ void fill(ExecutionPolicy policy, OutputIter first, OutputIter last, T value) {
|
|||
typename std::iterator_traits<OutputIter>::iterator_category,
|
||||
std::random_access_iterator_tag>,
|
||||
"You can only parallelize RandomAccessIterator.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
tbb::parallel_for(tbb::blocked_range<OutputIter>(first, last),
|
||||
[&](const tbb::blocked_range<OutputIter> &range) {
|
||||
std::fill(range.begin(), range.end(), value);
|
||||
});
|
||||
tbb::this_task_arena::isolate([&]() {
|
||||
tbb::parallel_for(tbb::blocked_range<OutputIter>(first, last),
|
||||
[&](const tbb::blocked_range<OutputIter>& range) {
|
||||
std::fill(range.begin(), range.end(), value);
|
||||
});
|
||||
});
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -727,6 +749,7 @@ void fill(OutputIter first, OutputIter last, T value) {
|
|||
template <typename InputIter, typename P>
|
||||
size_t count_if(ExecutionPolicy policy, InputIter first, InputIter last,
|
||||
P pred) {
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
return reduce(policy, TransformIterator(first, pred),
|
||||
|
@ -751,18 +774,21 @@ bool all_of(ExecutionPolicy policy, InputIter first, InputIter last, P pred) {
|
|||
typename std::iterator_traits<InputIter>::iterator_category,
|
||||
std::random_access_iterator_tag>,
|
||||
"You can only parallelize RandomAccessIterator.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
// should we use deterministic reduce here?
|
||||
return tbb::parallel_reduce(
|
||||
tbb::blocked_range<InputIter>(first, last), true,
|
||||
[&](const tbb::blocked_range<InputIter> &range, bool value) {
|
||||
if (!value) return false;
|
||||
for (InputIter i = range.begin(); i != range.end(); i++)
|
||||
if (!pred(*i)) return false;
|
||||
return true;
|
||||
},
|
||||
[](bool a, bool b) { return a && b; });
|
||||
return tbb::this_task_arena::isolate([&]() {
|
||||
return tbb::parallel_reduce(
|
||||
tbb::blocked_range<InputIter>(first, last), true,
|
||||
[&](const tbb::blocked_range<InputIter>& range, bool value) {
|
||||
if (!value) return false;
|
||||
for (InputIter i = range.begin(); i != range.end(); i++)
|
||||
if (!pred(*i)) return false;
|
||||
return true;
|
||||
},
|
||||
[](bool a, bool b) { return a && b; });
|
||||
});
|
||||
}
|
||||
#endif
|
||||
return std::all_of(first, last, pred);
|
||||
|
@ -798,13 +824,16 @@ OutputIter copy_if(ExecutionPolicy policy, InputIter first, InputIter last,
|
|||
typename std::iterator_traits<OutputIter>::iterator_category,
|
||||
std::random_access_iterator_tag>,
|
||||
"You can only parallelize RandomAccessIterator.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
auto pred2 = [&](size_t i) { return pred(first[i]); };
|
||||
details::CopyIfScanBody body(pred2, first, d_first);
|
||||
tbb::parallel_scan(
|
||||
tbb::blocked_range<size_t>(0, std::distance(first, last)), body);
|
||||
return d_first + body.get_sum();
|
||||
tbb::this_task_arena::isolate([&]() {
|
||||
tbb::parallel_scan(
|
||||
tbb::blocked_range<size_t>(0, std::distance(first, last)), body);
|
||||
return d_first + body.get_sum();
|
||||
});
|
||||
}
|
||||
#endif
|
||||
return std::copy_if(first, last, d_first, pred);
|
||||
|
@ -845,9 +874,10 @@ Iter remove_if(ExecutionPolicy policy, Iter first, Iter last, P pred) {
|
|||
static_assert(std::is_trivially_destructible_v<T>,
|
||||
"Our simple implementation does not support types that are "
|
||||
"not trivially destructable.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
T *tmp = new T[std::distance(first, last)];
|
||||
T* tmp = new T[std::distance(first, last)];
|
||||
auto back =
|
||||
copy_if(policy, first, last, tmp, [&](T v) { return !pred(v); });
|
||||
copy(policy, tmp, back, first);
|
||||
|
@ -892,9 +922,10 @@ Iter remove(ExecutionPolicy policy, Iter first, Iter last, T value) {
|
|||
static_assert(std::is_trivially_destructible_v<T>,
|
||||
"Our simple implementation does not support types that are "
|
||||
"not trivially destructable.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par) {
|
||||
T *tmp = new T[std::distance(first, last)];
|
||||
T* tmp = new T[std::distance(first, last)];
|
||||
auto back =
|
||||
copy_if(policy, first, last, tmp, [&](T v) { return v != value; });
|
||||
copy(policy, tmp, back, first);
|
||||
|
@ -939,13 +970,14 @@ Iter unique(ExecutionPolicy policy, Iter first, Iter last) {
|
|||
static_assert(std::is_trivially_destructible_v<T>,
|
||||
"Our simple implementation does not support types that are "
|
||||
"not trivially destructable.");
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
if (policy == ExecutionPolicy::Par && first != last) {
|
||||
Iter newSrcStart = first;
|
||||
// cap the maximum buffer size, proved to be beneficial for unique with huge
|
||||
// array size
|
||||
constexpr size_t MAX_BUFFER_SIZE = 1 << 16;
|
||||
T *tmp = new T[std::min(MAX_BUFFER_SIZE,
|
||||
T* tmp = new T[std::min(MAX_BUFFER_SIZE,
|
||||
static_cast<size_t>(std::distance(first, last)))];
|
||||
auto pred = [&](size_t i) { return tmp[i] != tmp[i + 1]; };
|
||||
do {
|
||||
|
@ -957,7 +989,9 @@ Iter unique(ExecutionPolicy policy, Iter first, Iter last) {
|
|||
// this is not a typo, the index i is offset by 1, so to compare an
|
||||
// element with its predecessor we need to compare i and i + 1.
|
||||
details::CopyIfScanBody body(pred, tmp + 1, first + 1);
|
||||
tbb::parallel_scan(tbb::blocked_range<size_t>(0, length - 1), body);
|
||||
tbb::this_task_arena::isolate([&]() {
|
||||
tbb::parallel_scan(tbb::blocked_range<size_t>(0, length - 1), body);
|
||||
});
|
||||
first += body.get_sum() + 1;
|
||||
newSrcStart += length;
|
||||
} while (newSrcStart != last);
|
||||
|
@ -992,6 +1026,7 @@ Iter unique(Iter first, Iter last) {
|
|||
template <typename Iterator,
|
||||
typename T = typename std::iterator_traits<Iterator>::value_type>
|
||||
void stable_sort(ExecutionPolicy policy, Iterator first, Iterator last) {
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
details::SortFunctor<Iterator, T>()(policy, first, last);
|
||||
#else
|
||||
|
@ -1023,6 +1058,7 @@ template <typename Iterator,
|
|||
typename Comp = decltype(std::less<T>())>
|
||||
void stable_sort(ExecutionPolicy policy, Iterator first, Iterator last,
|
||||
Comp comp) {
|
||||
(void)policy;
|
||||
#if (MANIFOLD_PAR == 1)
|
||||
details::mergeSort(policy, first, last, comp);
|
||||
#else
|
||||
|
|
250
thirdparty/manifold/src/polygon.cpp
vendored
250
thirdparty/manifold/src/polygon.cpp
vendored
|
@ -18,15 +18,20 @@
|
|||
#include <map>
|
||||
#include <set>
|
||||
|
||||
#include "./collider.h"
|
||||
#include "./parallel.h"
|
||||
#include "./utils.h"
|
||||
#include "manifold/manifold.h"
|
||||
#include "manifold/optional_assert.h"
|
||||
#include "parallel.h"
|
||||
#include "tree2d.h"
|
||||
#include "utils.h"
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
#include <iomanip>
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
||||
static ExecutionParams params;
|
||||
constexpr int TRIANGULATOR_VERBOSE_LEVEL = 2;
|
||||
|
||||
constexpr double kBest = -std::numeric_limits<double>::infinity();
|
||||
|
||||
|
@ -40,9 +45,9 @@ struct PolyEdge {
|
|||
int startVert, endVert;
|
||||
};
|
||||
|
||||
std::vector<PolyEdge> Polygons2Edges(const PolygonsIdx &polys) {
|
||||
std::vector<PolyEdge> Polygons2Edges(const PolygonsIdx& polys) {
|
||||
std::vector<PolyEdge> halfedges;
|
||||
for (const auto &poly : polys) {
|
||||
for (const auto& poly : polys) {
|
||||
for (size_t i = 1; i < poly.size(); ++i) {
|
||||
halfedges.push_back({poly[i - 1].idx, poly[i].idx});
|
||||
}
|
||||
|
@ -51,10 +56,10 @@ std::vector<PolyEdge> Polygons2Edges(const PolygonsIdx &polys) {
|
|||
return halfedges;
|
||||
}
|
||||
|
||||
std::vector<PolyEdge> Triangles2Edges(const std::vector<ivec3> &triangles) {
|
||||
std::vector<PolyEdge> Triangles2Edges(const std::vector<ivec3>& triangles) {
|
||||
std::vector<PolyEdge> halfedges;
|
||||
halfedges.reserve(triangles.size() * 3);
|
||||
for (const ivec3 &tri : triangles) {
|
||||
for (const ivec3& tri : triangles) {
|
||||
halfedges.push_back({tri[0], tri[1]});
|
||||
halfedges.push_back({tri[1], tri[2]});
|
||||
halfedges.push_back({tri[2], tri[0]});
|
||||
|
@ -62,7 +67,7 @@ std::vector<PolyEdge> Triangles2Edges(const std::vector<ivec3> &triangles) {
|
|||
return halfedges;
|
||||
}
|
||||
|
||||
void CheckTopology(const std::vector<PolyEdge> &halfedges) {
|
||||
void CheckTopology(const std::vector<PolyEdge>& halfedges) {
|
||||
DEBUG_ASSERT(halfedges.size() % 2 == 0, topologyErr,
|
||||
"Odd number of halfedges.");
|
||||
size_t n_edges = halfedges.size() / 2;
|
||||
|
@ -83,8 +88,8 @@ void CheckTopology(const std::vector<PolyEdge> &halfedges) {
|
|||
backward.resize(n_edges);
|
||||
|
||||
std::for_each(backward.begin(), backward.end(),
|
||||
[](PolyEdge &e) { std::swap(e.startVert, e.endVert); });
|
||||
auto cmp = [](const PolyEdge &a, const PolyEdge &b) {
|
||||
[](PolyEdge& e) { std::swap(e.startVert, e.endVert); });
|
||||
auto cmp = [](const PolyEdge& a, const PolyEdge& b) {
|
||||
return a.startVert < b.startVert ||
|
||||
(a.startVert == b.startVert && a.endVert < b.endVert);
|
||||
};
|
||||
|
@ -97,8 +102,8 @@ void CheckTopology(const std::vector<PolyEdge> &halfedges) {
|
|||
}
|
||||
}
|
||||
|
||||
void CheckTopology(const std::vector<ivec3> &triangles,
|
||||
const PolygonsIdx &polys) {
|
||||
void CheckTopology(const std::vector<ivec3>& triangles,
|
||||
const PolygonsIdx& polys) {
|
||||
std::vector<PolyEdge> halfedges = Triangles2Edges(triangles);
|
||||
std::vector<PolyEdge> openEdges = Polygons2Edges(polys);
|
||||
for (PolyEdge e : openEdges) {
|
||||
|
@ -107,23 +112,24 @@ void CheckTopology(const std::vector<ivec3> &triangles,
|
|||
CheckTopology(halfedges);
|
||||
}
|
||||
|
||||
void CheckGeometry(const std::vector<ivec3> &triangles,
|
||||
const PolygonsIdx &polys, double epsilon) {
|
||||
void CheckGeometry(const std::vector<ivec3>& triangles,
|
||||
const PolygonsIdx& polys, double epsilon) {
|
||||
std::unordered_map<int, vec2> vertPos;
|
||||
for (const auto &poly : polys) {
|
||||
for (const auto& poly : polys) {
|
||||
for (size_t i = 0; i < poly.size(); ++i) {
|
||||
vertPos[poly[i].idx] = poly[i].pos;
|
||||
}
|
||||
}
|
||||
DEBUG_ASSERT(std::all_of(triangles.begin(), triangles.end(),
|
||||
[&vertPos, epsilon](const ivec3 &tri) {
|
||||
[&vertPos, epsilon](const ivec3& tri) {
|
||||
return CCW(vertPos[tri[0]], vertPos[tri[1]],
|
||||
vertPos[tri[2]], epsilon) >= 0;
|
||||
}),
|
||||
geometryErr, "triangulation is not entirely CCW!");
|
||||
}
|
||||
|
||||
void Dump(const PolygonsIdx &polys, double epsilon) {
|
||||
void Dump(const PolygonsIdx& polys, double epsilon) {
|
||||
std::cout << std::setprecision(19);
|
||||
std::cout << "Polygon 0 " << epsilon << " " << polys.size() << std::endl;
|
||||
for (auto poly : polys) {
|
||||
std::cout << poly.size() << std::endl;
|
||||
|
@ -141,12 +147,18 @@ void Dump(const PolygonsIdx &polys, double epsilon) {
|
|||
}
|
||||
}
|
||||
|
||||
void PrintFailure(const std::exception &e, const PolygonsIdx &polys,
|
||||
std::vector<ivec3> &triangles, double epsilon) {
|
||||
std::atomic<int> numFailures(0);
|
||||
|
||||
void PrintFailure(const std::exception& e, const PolygonsIdx& polys,
|
||||
std::vector<ivec3>& triangles, double epsilon) {
|
||||
// only print the first triangulation failure
|
||||
if (numFailures.fetch_add(1) != 0) return;
|
||||
std::cout << std::setprecision(19);
|
||||
std::cout << "-----------------------------------" << std::endl;
|
||||
std::cout << "Triangulation failed! Precision = " << epsilon << std::endl;
|
||||
std::cout << e.what() << std::endl;
|
||||
if (triangles.size() > 1000 && !PolygonParams().verbose) {
|
||||
if (triangles.size() > 1000 &&
|
||||
ManifoldParams().verbose < TRIANGULATOR_VERBOSE_LEVEL) {
|
||||
std::cout << "Output truncated due to producing " << triangles.size()
|
||||
<< " triangles." << std::endl;
|
||||
return;
|
||||
|
@ -159,8 +171,9 @@ void PrintFailure(const std::exception &e, const PolygonsIdx &polys,
|
|||
}
|
||||
}
|
||||
|
||||
#define PRINT(msg) \
|
||||
if (params.verbose) std::cout << msg << std::endl;
|
||||
#define PRINT(msg) \
|
||||
if (ManifoldParams().verbose >= TRIANGULATOR_VERBOSE_LEVEL) \
|
||||
std::cout << msg << std::endl;
|
||||
#else
|
||||
#define PRINT(msg)
|
||||
#endif
|
||||
|
@ -170,8 +183,8 @@ void PrintFailure(const std::exception &e, const PolygonsIdx &polys,
|
|||
* Exactly colinear edges and zero-length edges are treated conservatively as
|
||||
* reflex. Does not check for overlaps.
|
||||
*/
|
||||
bool IsConvex(const PolygonsIdx &polys, double epsilon) {
|
||||
for (const SimplePolygonIdx &poly : polys) {
|
||||
bool IsConvex(const PolygonsIdx& polys, double epsilon) {
|
||||
for (const SimplePolygonIdx& poly : polys) {
|
||||
const vec2 firstEdge = poly[0].pos - poly[poly.size() - 1].pos;
|
||||
// Zero-length edges comes out NaN, which won't trip the early return, but
|
||||
// it's okay because that zero-length edge will also get tested
|
||||
|
@ -193,14 +206,14 @@ bool IsConvex(const PolygonsIdx &polys, double epsilon) {
|
|||
* Triangulates a set of convex polygons by alternating instead of a fan, to
|
||||
* avoid creating high-degree vertices.
|
||||
*/
|
||||
std::vector<ivec3> TriangulateConvex(const PolygonsIdx &polys) {
|
||||
std::vector<ivec3> TriangulateConvex(const PolygonsIdx& polys) {
|
||||
const size_t numTri = manifold::transform_reduce(
|
||||
polys.begin(), polys.end(), 0_uz,
|
||||
[](size_t a, size_t b) { return a + b; },
|
||||
[](const SimplePolygonIdx &poly) { return poly.size() - 2; });
|
||||
[](const SimplePolygonIdx& poly) { return poly.size() - 2; });
|
||||
std::vector<ivec3> triangles;
|
||||
triangles.reserve(numTri);
|
||||
for (const SimplePolygonIdx &poly : polys) {
|
||||
for (const SimplePolygonIdx& poly : polys) {
|
||||
size_t i = 0;
|
||||
size_t k = poly.size() - 1;
|
||||
bool right = true;
|
||||
|
@ -222,9 +235,7 @@ std::vector<ivec3> TriangulateConvex(const PolygonsIdx &polys) {
|
|||
* Ear-clipping triangulator based on David Eberly's approach from Geometric
|
||||
* Tools, but adjusted to handle epsilon-valid polygons, and including a
|
||||
* fallback that ensures a manifold triangulation even for overlapping polygons.
|
||||
* This is an O(n^2) algorithm, but hopefully this is not a big problem as the
|
||||
* number of edges in a given polygon is generally much less than the number of
|
||||
* triangles in a mesh, and relatively few faces even need triangulation.
|
||||
* This is reduced from an O(n^2) algorithm by means of our BVH Collider.
|
||||
*
|
||||
* The main adjustments for robustness involve clipping the sharpest ears first
|
||||
* (a known technique to get higher triangle quality), and doing an exhaustive
|
||||
|
@ -234,11 +245,11 @@ std::vector<ivec3> TriangulateConvex(const PolygonsIdx &polys) {
|
|||
|
||||
class EarClip {
|
||||
public:
|
||||
EarClip(const PolygonsIdx &polys, double epsilon) : epsilon_(epsilon) {
|
||||
EarClip(const PolygonsIdx& polys, double epsilon) : epsilon_(epsilon) {
|
||||
ZoneScoped;
|
||||
|
||||
size_t numVert = 0;
|
||||
for (const SimplePolygonIdx &poly : polys) {
|
||||
for (const SimplePolygonIdx& poly : polys) {
|
||||
numVert += poly.size();
|
||||
}
|
||||
polygon_.reserve(numVert + 2 * polys.size());
|
||||
|
@ -272,19 +283,19 @@ class EarClip {
|
|||
|
||||
private:
|
||||
struct Vert;
|
||||
typedef std::vector<Vert>::iterator VertItr;
|
||||
typedef std::vector<Vert>::const_iterator VertItrC;
|
||||
using VertItr = std::vector<Vert>::iterator;
|
||||
using VertItrC = std::vector<Vert>::const_iterator;
|
||||
struct MaxX {
|
||||
bool operator()(const VertItr &a, const VertItr &b) const {
|
||||
bool operator()(const VertItr& a, const VertItr& b) const {
|
||||
return a->pos.x > b->pos.x;
|
||||
}
|
||||
};
|
||||
struct MinCost {
|
||||
bool operator()(const VertItr &a, const VertItr &b) const {
|
||||
bool operator()(const VertItr& a, const VertItr& b) const {
|
||||
return a->cost < b->cost;
|
||||
}
|
||||
};
|
||||
typedef std::set<VertItr, MinCost>::iterator qItr;
|
||||
using qItr = std::set<VertItr, MinCost>::iterator;
|
||||
|
||||
// The flat list where all the Verts are stored. Not used much for traversal.
|
||||
std::vector<Vert> polygon_;
|
||||
|
@ -306,9 +317,8 @@ class EarClip {
|
|||
double epsilon_;
|
||||
|
||||
struct IdxCollider {
|
||||
Collider collider;
|
||||
Vec<PolyVert> points;
|
||||
std::vector<VertItr> itr;
|
||||
SparseIndices ind;
|
||||
};
|
||||
|
||||
// A circularly-linked list representing the polygon(s) that still need to be
|
||||
|
@ -399,20 +409,9 @@ class EarClip {
|
|||
return true;
|
||||
}
|
||||
|
||||
// A major key to robustness is to only clip convex ears, but this is
|
||||
// difficult to determine when an edge is folded back on itself. This
|
||||
// function walks down the kinks in a degenerate portion of a polygon until
|
||||
// it finds a clear geometric result. In the vast majority of cases the loop
|
||||
// will only need one or two iterations.
|
||||
// Returns true for convex or colinear ears.
|
||||
bool IsConvex(double epsilon) const {
|
||||
const int convexity = CCW(left->pos, pos, right->pos, epsilon);
|
||||
if (convexity != 0) {
|
||||
return convexity > 0;
|
||||
}
|
||||
if (la::dot(left->pos - pos, right->pos - pos) <= 0) {
|
||||
return true;
|
||||
}
|
||||
return left->InsideEdge(left->right, epsilon, true);
|
||||
return CCW(left->pos, pos, right->pos, epsilon) >= 0;
|
||||
}
|
||||
|
||||
// Subtly different from !IsConvex because IsConvex will return true for
|
||||
|
@ -489,7 +488,7 @@ class EarClip {
|
|||
// values < -epsilon so they will never affect validity. The first
|
||||
// totalCost is designed to give priority to sharper angles. Any cost < (-1
|
||||
// - epsilon) has satisfied the Delaunay condition.
|
||||
double EarCost(double epsilon, IdxCollider &collider) const {
|
||||
double EarCost(double epsilon, IdxCollider& collider) const {
|
||||
vec2 openSide = left->pos - right->pos;
|
||||
const vec2 center = 0.5 * (left->pos + right->pos);
|
||||
const double scale = 4 / la::dot(openSide, openSide);
|
||||
|
@ -502,38 +501,32 @@ class EarClip {
|
|||
return totalCost;
|
||||
}
|
||||
|
||||
Box earBox = Box{vec3(center.x - radius, center.y - radius, 0),
|
||||
vec3(center.x + radius, center.y + radius, 0)};
|
||||
earBox.Union(vec3(pos, 0));
|
||||
collider.collider.Collisions(VecView<const Box>(&earBox, 1),
|
||||
collider.ind);
|
||||
Rect earBox = Rect(vec2(center.x - radius, center.y - radius),
|
||||
vec2(center.x + radius, center.y + radius));
|
||||
earBox.Union(pos);
|
||||
earBox.min -= epsilon;
|
||||
earBox.max += epsilon;
|
||||
|
||||
const int lid = left->mesh_idx;
|
||||
const int rid = right->mesh_idx;
|
||||
|
||||
totalCost = transform_reduce(
|
||||
countAt(0), countAt(collider.ind.size()), totalCost,
|
||||
[](double a, double b) { return std::max(a, b); },
|
||||
[&](size_t i) {
|
||||
const VertItr test = collider.itr[collider.ind.Get(i, true)];
|
||||
if (!Clipped(test) && test->mesh_idx != mesh_idx &&
|
||||
test->mesh_idx != lid &&
|
||||
test->mesh_idx != rid) { // Skip duplicated verts
|
||||
double cost = Cost(test, openSide, epsilon);
|
||||
if (cost < -epsilon) {
|
||||
cost = DelaunayCost(test->pos - center, scale, epsilon);
|
||||
}
|
||||
return cost;
|
||||
}
|
||||
return std::numeric_limits<double>::lowest();
|
||||
});
|
||||
collider.ind.Clear();
|
||||
QueryTwoDTree(collider.points, earBox, [&](PolyVert point) {
|
||||
const VertItr test = collider.itr[point.idx];
|
||||
if (!Clipped(test) && test->mesh_idx != mesh_idx &&
|
||||
test->mesh_idx != lid &&
|
||||
test->mesh_idx != rid) { // Skip duplicated verts
|
||||
double cost = Cost(test, openSide, epsilon);
|
||||
if (cost < -epsilon) {
|
||||
cost = DelaunayCost(test->pos - center, scale, epsilon);
|
||||
}
|
||||
if (cost > totalCost) totalCost = cost;
|
||||
}
|
||||
});
|
||||
return totalCost;
|
||||
}
|
||||
|
||||
void PrintVert() const {
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (!params.verbose) return;
|
||||
if (ManifoldParams().verbose < TRIANGULATOR_VERBOSE_LEVEL) return;
|
||||
std::cout << "vert: " << mesh_idx << ", left: " << left->mesh_idx
|
||||
<< ", right: " << right->mesh_idx << ", cost: " << cost
|
||||
<< std::endl;
|
||||
|
@ -603,8 +596,7 @@ class EarClip {
|
|||
|
||||
// If an ear will make a degenerate triangle, clip it early to avoid
|
||||
// difficulty in key-holing. This function is recursive, as the process of
|
||||
// clipping may cause the neighbors to degenerate. Reflex degenerates *must
|
||||
// not* be clipped, unless they have a short edge.
|
||||
// clipping may cause the neighbors to degenerate.
|
||||
void ClipIfDegenerate(VertItr ear) {
|
||||
if (Clipped(ear)) {
|
||||
return;
|
||||
|
@ -614,8 +606,7 @@ class EarClip {
|
|||
}
|
||||
if (ear->IsShort(epsilon_) ||
|
||||
(CCW(ear->left->pos, ear->pos, ear->right->pos, epsilon_) == 0 &&
|
||||
la::dot(ear->left->pos - ear->pos, ear->right->pos - ear->pos) > 0 &&
|
||||
ear->IsConvex(epsilon_))) {
|
||||
la::dot(ear->left->pos - ear->pos, ear->right->pos - ear->pos) > 0)) {
|
||||
ClipEar(ear);
|
||||
ClipIfDegenerate(ear->left);
|
||||
ClipIfDegenerate(ear->right);
|
||||
|
@ -623,11 +614,18 @@ class EarClip {
|
|||
}
|
||||
|
||||
// Build the circular list polygon structures.
|
||||
std::vector<VertItr> Initialize(const PolygonsIdx &polys) {
|
||||
std::vector<VertItr> Initialize(const PolygonsIdx& polys) {
|
||||
std::vector<VertItr> starts;
|
||||
for (const SimplePolygonIdx &poly : polys) {
|
||||
const auto invalidItr = polygon_.begin();
|
||||
for (const SimplePolygonIdx& poly : polys) {
|
||||
auto vert = poly.begin();
|
||||
polygon_.push_back({vert->idx, 0.0, earsQueue_.end(), vert->pos});
|
||||
polygon_.push_back({vert->idx,
|
||||
0.0,
|
||||
earsQueue_.end(),
|
||||
vert->pos,
|
||||
{0, 0},
|
||||
invalidItr,
|
||||
invalidItr});
|
||||
const VertItr first = std::prev(polygon_.end());
|
||||
|
||||
bBox_.Union(first->pos);
|
||||
|
@ -639,7 +637,13 @@ class EarClip {
|
|||
for (++vert; vert != poly.end(); ++vert) {
|
||||
bBox_.Union(vert->pos);
|
||||
|
||||
polygon_.push_back({vert->idx, 0.0, earsQueue_.end(), vert->pos});
|
||||
polygon_.push_back({vert->idx,
|
||||
0.0,
|
||||
earsQueue_.end(),
|
||||
vert->pos,
|
||||
{0, 0},
|
||||
invalidItr,
|
||||
invalidItr});
|
||||
VertItr next = std::prev(polygon_.end());
|
||||
|
||||
Link(last, next);
|
||||
|
@ -740,7 +744,7 @@ class EarClip {
|
|||
JoinPolygons(start, connector);
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (params.verbose) {
|
||||
if (ManifoldParams().verbose >= TRIANGULATOR_VERBOSE_LEVEL) {
|
||||
std::cout << "connected " << start->mesh_idx << " to "
|
||||
<< connector->mesh_idx << std::endl;
|
||||
}
|
||||
|
@ -767,7 +771,8 @@ class EarClip {
|
|||
above * CCW(start->pos, vert->pos, connector->pos, epsilon_);
|
||||
if (vert->pos.x > start->pos.x - epsilon_ &&
|
||||
vert->pos.y * above > start->pos.y * above - epsilon_ &&
|
||||
(inside > 0 || (inside == 0 && vert->pos.x < connector->pos.x)) &&
|
||||
(inside > 0 || (inside == 0 && vert->pos.x < connector->pos.x &&
|
||||
vert->pos.y * above < connector->pos.y * above)) &&
|
||||
vert->InsideEdge(edge, epsilon_, true) && vert->IsReflex(epsilon_)) {
|
||||
connector = vert;
|
||||
}
|
||||
|
@ -803,7 +808,7 @@ class EarClip {
|
|||
|
||||
// Recalculate the cost of the Vert v ear, updating it in the queue by
|
||||
// removing and reinserting it.
|
||||
void ProcessEar(VertItr v, IdxCollider &collider) {
|
||||
void ProcessEar(VertItr v, IdxCollider& collider) {
|
||||
if (v->ear != earsQueue_.end()) {
|
||||
earsQueue_.erase(v->ear);
|
||||
v->ear = earsQueue_.end();
|
||||
|
@ -823,35 +828,16 @@ class EarClip {
|
|||
// epsilon_. Each ear uses this BVH to quickly find a subset of vertices to
|
||||
// check for cost.
|
||||
IdxCollider VertCollider(VertItr start) const {
|
||||
Vec<Box> vertBox;
|
||||
Vec<uint32_t> vertMorton;
|
||||
ZoneScoped;
|
||||
std::vector<VertItr> itr;
|
||||
const Box box(vec3(bBox_.min, 0), vec3(bBox_.max, 0));
|
||||
|
||||
Loop(start, [&vertBox, &vertMorton, &itr, &box, this](VertItr v) {
|
||||
Vec<PolyVert> points;
|
||||
Loop(start, [&itr, &points](VertItr v) {
|
||||
points.push_back({v->pos, static_cast<int>(itr.size())});
|
||||
itr.push_back(v);
|
||||
const vec3 pos(v->pos, 0);
|
||||
vertBox.push_back({pos - epsilon_, pos + epsilon_});
|
||||
vertMorton.push_back(Collider::MortonCode(pos, box));
|
||||
});
|
||||
|
||||
if (itr.empty()) {
|
||||
return {Collider(), itr};
|
||||
}
|
||||
|
||||
const int numVert = itr.size();
|
||||
Vec<int> vertNew2Old(numVert);
|
||||
sequence(vertNew2Old.begin(), vertNew2Old.end());
|
||||
|
||||
stable_sort(vertNew2Old.begin(), vertNew2Old.end(),
|
||||
[&vertMorton](const int a, const int b) {
|
||||
return vertMorton[a] < vertMorton[b];
|
||||
});
|
||||
Permute(vertMorton, vertNew2Old);
|
||||
Permute(vertBox, vertNew2Old);
|
||||
Permute(itr, vertNew2Old);
|
||||
|
||||
return {Collider(vertBox, vertMorton), itr};
|
||||
BuildTwoDTree(points);
|
||||
return {std::move(points), std::move(itr)};
|
||||
}
|
||||
|
||||
// The main ear-clipping loop. This is called once for each simple polygon -
|
||||
|
@ -907,7 +893,7 @@ class EarClip {
|
|||
|
||||
void Dump(VertItrC start) const {
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (!params.verbose) return;
|
||||
if (ManifoldParams().verbose < TRIANGULATOR_VERBOSE_LEVEL) return;
|
||||
VertItrC v = start;
|
||||
std::cout << "show(array([" << std::setprecision(15) << std::endl;
|
||||
do {
|
||||
|
@ -944,16 +930,21 @@ namespace manifold {
|
|||
* references back to the original vertices.
|
||||
* @param epsilon The value of ε, bounding the uncertainty of the
|
||||
* input.
|
||||
* @param allowConvex If true (default), the triangulator will use a fast
|
||||
* triangulation if the input is convex, falling back to ear-clipping if not.
|
||||
* The triangle quality may be lower, so set to false to disable this
|
||||
* optimization.
|
||||
* @return std::vector<ivec3> The triangles, referencing the original
|
||||
* vertex indicies.
|
||||
*/
|
||||
std::vector<ivec3> TriangulateIdx(const PolygonsIdx &polys, double epsilon) {
|
||||
std::vector<ivec3> TriangulateIdx(const PolygonsIdx& polys, double epsilon,
|
||||
bool allowConvex) {
|
||||
std::vector<ivec3> triangles;
|
||||
double updatedEpsilon = epsilon;
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
try {
|
||||
#endif
|
||||
if (IsConvex(polys, epsilon)) { // fast path
|
||||
if (allowConvex && IsConvex(polys, epsilon)) { // fast path
|
||||
triangles = TriangulateConvex(polys);
|
||||
} else {
|
||||
EarClip triangulator(polys, epsilon);
|
||||
|
@ -961,18 +952,18 @@ std::vector<ivec3> TriangulateIdx(const PolygonsIdx &polys, double epsilon) {
|
|||
updatedEpsilon = triangulator.GetPrecision();
|
||||
}
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (params.intermediateChecks) {
|
||||
if (ManifoldParams().intermediateChecks) {
|
||||
CheckTopology(triangles, polys);
|
||||
if (!params.processOverlaps) {
|
||||
if (!ManifoldParams().processOverlaps) {
|
||||
CheckGeometry(triangles, polys, 2 * updatedEpsilon);
|
||||
}
|
||||
}
|
||||
} catch (const geometryErr &e) {
|
||||
if (!params.suppressErrors) {
|
||||
} catch (const geometryErr& e) {
|
||||
if (!ManifoldParams().suppressErrors) {
|
||||
PrintFailure(e, polys, triangles, updatedEpsilon);
|
||||
}
|
||||
throw;
|
||||
} catch (const std::exception &e) {
|
||||
} catch (const std::exception& e) {
|
||||
PrintFailure(e, polys, triangles, updatedEpsilon);
|
||||
throw;
|
||||
}
|
||||
|
@ -989,22 +980,25 @@ std::vector<ivec3> TriangulateIdx(const PolygonsIdx &polys, double epsilon) {
|
|||
* polygons and/or holes.
|
||||
* @param epsilon The value of ε, bounding the uncertainty of the
|
||||
* input.
|
||||
* @param allowConvex If true (default), the triangulator will use a fast
|
||||
* triangulation if the input is convex, falling back to ear-clipping if not.
|
||||
* The triangle quality may be lower, so set to false to disable this
|
||||
* optimization.
|
||||
* @return std::vector<ivec3> The triangles, referencing the original
|
||||
* polygon points in order.
|
||||
*/
|
||||
std::vector<ivec3> Triangulate(const Polygons &polygons, double epsilon) {
|
||||
std::vector<ivec3> Triangulate(const Polygons& polygons, double epsilon,
|
||||
bool allowConvex) {
|
||||
int idx = 0;
|
||||
PolygonsIdx polygonsIndexed;
|
||||
for (const auto &poly : polygons) {
|
||||
for (const auto& poly : polygons) {
|
||||
SimplePolygonIdx simpleIndexed;
|
||||
for (const vec2 &polyVert : poly) {
|
||||
for (const vec2& polyVert : poly) {
|
||||
simpleIndexed.push_back({polyVert, idx++});
|
||||
}
|
||||
polygonsIndexed.push_back(simpleIndexed);
|
||||
}
|
||||
return TriangulateIdx(polygonsIndexed, epsilon);
|
||||
return TriangulateIdx(polygonsIndexed, epsilon, allowConvex);
|
||||
}
|
||||
|
||||
ExecutionParams &PolygonParams() { return params; }
|
||||
|
||||
} // namespace manifold
|
||||
|
|
235
thirdparty/manifold/src/properties.cpp
vendored
235
thirdparty/manifold/src/properties.cpp
vendored
|
@ -14,9 +14,13 @@
|
|||
|
||||
#include <limits>
|
||||
|
||||
#include "./impl.h"
|
||||
#include "./parallel.h"
|
||||
#include "./tri_dist.h"
|
||||
#if MANIFOLD_PAR == 1
|
||||
#include <tbb/combinable.h>
|
||||
#endif
|
||||
|
||||
#include "impl.h"
|
||||
#include "parallel.h"
|
||||
#include "tri_dist.h"
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
@ -64,48 +68,6 @@ struct CurvatureAngles {
|
|||
}
|
||||
};
|
||||
|
||||
struct UpdateProperties {
|
||||
VecView<ivec3> triProp;
|
||||
VecView<double> properties;
|
||||
VecView<uint8_t> counters;
|
||||
|
||||
VecView<const double> oldProperties;
|
||||
VecView<const Halfedge> halfedge;
|
||||
VecView<const double> meanCurvature;
|
||||
VecView<const double> gaussianCurvature;
|
||||
const int oldNumProp;
|
||||
const int numProp;
|
||||
const int gaussianIdx;
|
||||
const int meanIdx;
|
||||
|
||||
void operator()(const size_t tri) {
|
||||
for (const int i : {0, 1, 2}) {
|
||||
const int vert = halfedge[3 * tri + i].startVert;
|
||||
if (oldNumProp == 0) {
|
||||
triProp[tri][i] = vert;
|
||||
}
|
||||
const int propVert = triProp[tri][i];
|
||||
|
||||
auto old = std::atomic_exchange(
|
||||
reinterpret_cast<std::atomic<uint8_t>*>(&counters[propVert]),
|
||||
static_cast<uint8_t>(1));
|
||||
if (old == 1) continue;
|
||||
|
||||
for (int p = 0; p < oldNumProp; ++p) {
|
||||
properties[numProp * propVert + p] =
|
||||
oldProperties[oldNumProp * propVert + p];
|
||||
}
|
||||
|
||||
if (gaussianIdx >= 0) {
|
||||
properties[numProp * propVert + gaussianIdx] = gaussianCurvature[vert];
|
||||
}
|
||||
if (meanIdx >= 0) {
|
||||
properties[numProp * propVert + meanIdx] = meanCurvature[vert];
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct CheckHalfedges {
|
||||
VecView<const Halfedge> halfedges;
|
||||
|
||||
|
@ -138,33 +100,8 @@ struct CheckCCW {
|
|||
for (int i : {0, 1, 2})
|
||||
v[i] = projection * vertPos[halfedges[3 * face + i].startVert];
|
||||
|
||||
int ccw = CCW(v[0], v[1], v[2], std::abs(tol));
|
||||
bool check = tol > 0 ? ccw >= 0 : ccw == 0;
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (tol > 0 && !check) {
|
||||
vec2 v1 = v[1] - v[0];
|
||||
vec2 v2 = v[2] - v[0];
|
||||
double area = v1.x * v2.y - v1.y * v2.x;
|
||||
double base2 = std::max(la::dot(v1, v1), la::dot(v2, v2));
|
||||
double base = std::sqrt(base2);
|
||||
vec3 V0 = vertPos[halfedges[3 * face].startVert];
|
||||
vec3 V1 = vertPos[halfedges[3 * face + 1].startVert];
|
||||
vec3 V2 = vertPos[halfedges[3 * face + 2].startVert];
|
||||
vec3 norm = la::cross(V1 - V0, V2 - V0);
|
||||
printf(
|
||||
"Tri %ld does not match normal, approx height = %g, base = %g\n"
|
||||
"tol = %g, area2 = %g, base2*tol2 = %g\n"
|
||||
"normal = %g, %g, %g\n"
|
||||
"norm = %g, %g, %g\nverts: %d, %d, %d\n",
|
||||
static_cast<long>(face), area / base, base, tol, area * area,
|
||||
base2 * tol * tol, triNormal[face].x, triNormal[face].y,
|
||||
triNormal[face].z, norm.x, norm.y, norm.z,
|
||||
halfedges[3 * face].startVert, halfedges[3 * face + 1].startVert,
|
||||
halfedges[3 * face + 2].startVert);
|
||||
}
|
||||
#endif
|
||||
return check;
|
||||
const int ccw = CCW(v[0], v[1], v[2], std::abs(tol));
|
||||
return tol > 0 ? ccw >= 0 : ccw == 0;
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
@ -211,55 +148,58 @@ std::mutex dump_lock;
|
|||
* Note that this is not checking for epsilon-validity.
|
||||
*/
|
||||
bool Manifold::Impl::IsSelfIntersecting() const {
|
||||
const double epsilonSq = epsilon_ * epsilon_;
|
||||
const double ep = 2 * epsilon_;
|
||||
const double epsilonSq = ep * ep;
|
||||
Vec<Box> faceBox;
|
||||
Vec<uint32_t> faceMorton;
|
||||
GetFaceBoxMorton(faceBox, faceMorton);
|
||||
SparseIndices collisions = collider_.Collisions<true>(faceBox.cview());
|
||||
|
||||
const bool verbose = ManifoldParams().verbose;
|
||||
return !all_of(countAt(0), countAt(collisions.size()), [&](size_t i) {
|
||||
size_t x = collisions.Get(i, false);
|
||||
size_t y = collisions.Get(i, true);
|
||||
std::array<vec3, 3> tri_x, tri_y;
|
||||
std::atomic<bool> intersecting(false);
|
||||
|
||||
auto f = [&](int tri0, int tri1) {
|
||||
std::array<vec3, 3> triVerts0, triVerts1;
|
||||
for (int i : {0, 1, 2}) {
|
||||
tri_x[i] = vertPos_[halfedge_[3 * x + i].startVert];
|
||||
tri_y[i] = vertPos_[halfedge_[3 * y + i].startVert];
|
||||
triVerts0[i] = vertPos_[halfedge_[3 * tri0 + i].startVert];
|
||||
triVerts1[i] = vertPos_[halfedge_[3 * tri1 + i].startVert];
|
||||
}
|
||||
// if triangles x and y share a vertex, return true to skip the
|
||||
// if triangles tri0 and tri1 share a vertex, return true to skip the
|
||||
// check. we relax the sharing criteria a bit to allow for at most
|
||||
// distance epsilon squared
|
||||
for (int i : {0, 1, 2})
|
||||
for (int j : {0, 1, 2})
|
||||
if (distance2(tri_x[i], tri_y[j]) <= epsilonSq) return true;
|
||||
if (distance2(triVerts0[i], triVerts1[j]) <= epsilonSq) return;
|
||||
|
||||
if (DistanceTriangleTriangleSquared(tri_x, tri_y) == 0.0) {
|
||||
if (DistanceTriangleTriangleSquared(triVerts0, triVerts1) == 0.0) {
|
||||
// try to move the triangles around the normal of the other face
|
||||
std::array<vec3, 3> tmp_x, tmp_y;
|
||||
for (int i : {0, 1, 2}) tmp_x[i] = tri_x[i] + epsilon_ * faceNormal_[y];
|
||||
if (DistanceTriangleTriangleSquared(tmp_x, tri_y) > 0.0) return true;
|
||||
for (int i : {0, 1, 2}) tmp_x[i] = tri_x[i] - epsilon_ * faceNormal_[y];
|
||||
if (DistanceTriangleTriangleSquared(tmp_x, tri_y) > 0.0) return true;
|
||||
for (int i : {0, 1, 2}) tmp_y[i] = tri_y[i] + epsilon_ * faceNormal_[x];
|
||||
if (DistanceTriangleTriangleSquared(tri_x, tmp_y) > 0.0) return true;
|
||||
for (int i : {0, 1, 2}) tmp_y[i] = tri_y[i] - epsilon_ * faceNormal_[x];
|
||||
if (DistanceTriangleTriangleSquared(tri_x, tmp_y) > 0.0) return true;
|
||||
std::array<vec3, 3> tmp0, tmp1;
|
||||
for (int i : {0, 1, 2}) tmp0[i] = triVerts0[i] + ep * faceNormal_[tri1];
|
||||
if (DistanceTriangleTriangleSquared(tmp0, triVerts1) > 0.0) return;
|
||||
for (int i : {0, 1, 2}) tmp0[i] = triVerts0[i] - ep * faceNormal_[tri1];
|
||||
if (DistanceTriangleTriangleSquared(tmp0, triVerts1) > 0.0) return;
|
||||
for (int i : {0, 1, 2}) tmp1[i] = triVerts1[i] + ep * faceNormal_[tri0];
|
||||
if (DistanceTriangleTriangleSquared(triVerts0, tmp1) > 0.0) return;
|
||||
for (int i : {0, 1, 2}) tmp1[i] = triVerts1[i] - ep * faceNormal_[tri0];
|
||||
if (DistanceTriangleTriangleSquared(triVerts0, tmp1) > 0.0) return;
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
if (verbose) {
|
||||
if (ManifoldParams().verbose > 0) {
|
||||
dump_lock.lock();
|
||||
std::cout << "intersecting:" << std::endl;
|
||||
for (int i : {0, 1, 2}) std::cout << tri_x[i] << " ";
|
||||
for (int i : {0, 1, 2}) std::cout << triVerts0[i] << " ";
|
||||
std::cout << std::endl;
|
||||
for (int i : {0, 1, 2}) std::cout << tri_y[i] << " ";
|
||||
for (int i : {0, 1, 2}) std::cout << triVerts1[i] << " ";
|
||||
std::cout << std::endl;
|
||||
dump_lock.unlock();
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
intersecting.store(true);
|
||||
}
|
||||
return true;
|
||||
});
|
||||
};
|
||||
|
||||
auto recorder = MakeSimpleRecorder(f);
|
||||
collider_.Collisions<true>(faceBox.cview(), recorder);
|
||||
|
||||
return intersecting.load();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -335,20 +275,36 @@ void Manifold::Impl::CalculateCurvature(int gaussianIdx, int meanIdx) {
|
|||
|
||||
const int oldNumProp = NumProp();
|
||||
const int numProp = std::max(oldNumProp, std::max(gaussianIdx, meanIdx) + 1);
|
||||
const Vec<double> oldProperties = meshRelation_.properties;
|
||||
meshRelation_.properties = Vec<double>(numProp * NumPropVert(), 0);
|
||||
meshRelation_.numProp = numProp;
|
||||
if (meshRelation_.triProperties.size() == 0) {
|
||||
meshRelation_.triProperties.resize(NumTri());
|
||||
}
|
||||
const Vec<double> oldProperties = properties_;
|
||||
properties_ = Vec<double>(numProp * NumPropVert(), 0);
|
||||
numProp_ = numProp;
|
||||
|
||||
const Vec<uint8_t> counters(NumPropVert(), 0);
|
||||
for_each_n(
|
||||
policy, countAt(0_uz), NumTri(),
|
||||
UpdateProperties({meshRelation_.triProperties, meshRelation_.properties,
|
||||
counters, oldProperties, halfedge_, vertMeanCurvature,
|
||||
vertGaussianCurvature, oldNumProp, numProp, gaussianIdx,
|
||||
meanIdx}));
|
||||
Vec<uint8_t> counters(NumPropVert(), 0);
|
||||
for_each_n(policy, countAt(0_uz), NumTri(), [&](const size_t tri) {
|
||||
for (const int i : {0, 1, 2}) {
|
||||
const Halfedge& edge = halfedge_[3 * tri + i];
|
||||
const int vert = edge.startVert;
|
||||
const int propVert = edge.propVert;
|
||||
|
||||
auto old = std::atomic_exchange(
|
||||
reinterpret_cast<std::atomic<uint8_t>*>(&counters[propVert]),
|
||||
static_cast<uint8_t>(1));
|
||||
if (old == 1) continue;
|
||||
|
||||
for (int p = 0; p < oldNumProp; ++p) {
|
||||
properties_[numProp * propVert + p] =
|
||||
oldProperties[oldNumProp * propVert + p];
|
||||
}
|
||||
|
||||
if (gaussianIdx >= 0) {
|
||||
properties_[numProp * propVert + gaussianIdx] =
|
||||
vertGaussianCurvature[vert];
|
||||
}
|
||||
if (meanIdx >= 0) {
|
||||
properties_[numProp * propVert + meanIdx] = vertMeanCurvature[vert];
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -404,6 +360,36 @@ bool Manifold::Impl::IsIndexInBounds(VecView<const ivec3> triVerts) const {
|
|||
return minmax[0] >= 0 && minmax[1] < static_cast<int>(NumVert());
|
||||
}
|
||||
|
||||
struct MinDistanceRecorder {
|
||||
using Local = double;
|
||||
const Manifold::Impl &self, &other;
|
||||
#if MANIFOLD_PAR == 1
|
||||
tbb::combinable<double> store = tbb::combinable<double>(
|
||||
[]() { return std::numeric_limits<double>::infinity(); });
|
||||
Local& local() { return store.local(); }
|
||||
double get() {
|
||||
double result = std::numeric_limits<double>::infinity();
|
||||
store.combine_each([&](double& val) { result = std::min(result, val); });
|
||||
return result;
|
||||
}
|
||||
#else
|
||||
double result = std::numeric_limits<double>::infinity();
|
||||
Local& local() { return result; }
|
||||
double get() { return result; }
|
||||
#endif
|
||||
|
||||
void record(int triOther, int tri, double& minDistance) {
|
||||
std::array<vec3, 3> p;
|
||||
std::array<vec3, 3> q;
|
||||
|
||||
for (const int j : {0, 1, 2}) {
|
||||
p[j] = self.vertPos_[self.halfedge_[3 * tri + j].startVert];
|
||||
q[j] = other.vertPos_[other.halfedge_[3 * triOther + j].startVert];
|
||||
}
|
||||
minDistance = std::min(minDistance, DistanceTriangleTriangleSquared(p, q));
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns the minimum gap between two manifolds. Returns a double between
|
||||
* 0 and searchLength.
|
||||
|
@ -422,26 +408,11 @@ double Manifold::Impl::MinGap(const Manifold::Impl& other,
|
|||
box.max + vec3(searchLength));
|
||||
});
|
||||
|
||||
SparseIndices collisions = collider_.Collisions(faceBoxOther.cview());
|
||||
|
||||
double minDistanceSquared = transform_reduce(
|
||||
countAt(0_uz), countAt(collisions.size()), searchLength * searchLength,
|
||||
[](double a, double b) { return std::min(a, b); },
|
||||
[&collisions, this, &other](int i) {
|
||||
const int tri = collisions.Get(i, 1);
|
||||
const int triOther = collisions.Get(i, 0);
|
||||
|
||||
std::array<vec3, 3> p;
|
||||
std::array<vec3, 3> q;
|
||||
|
||||
for (const int j : {0, 1, 2}) {
|
||||
p[j] = vertPos_[halfedge_[3 * tri + j].startVert];
|
||||
q[j] = other.vertPos_[other.halfedge_[3 * triOther + j].startVert];
|
||||
}
|
||||
|
||||
return DistanceTriangleTriangleSquared(p, q);
|
||||
});
|
||||
|
||||
MinDistanceRecorder recorder{*this, other};
|
||||
collider_.Collisions<false, Box, MinDistanceRecorder>(faceBoxOther.cview(),
|
||||
recorder, false);
|
||||
double minDistanceSquared =
|
||||
std::min(recorder.get(), searchLength * searchLength);
|
||||
return sqrt(minDistanceSquared);
|
||||
};
|
||||
|
||||
|
|
9
thirdparty/manifold/src/quickhull.cpp
vendored
9
thirdparty/manifold/src/quickhull.cpp
vendored
|
@ -20,7 +20,7 @@
|
|||
#include <algorithm>
|
||||
#include <limits>
|
||||
|
||||
#include "./impl.h"
|
||||
#include "impl.h"
|
||||
|
||||
namespace manifold {
|
||||
|
||||
|
@ -289,7 +289,7 @@ std::pair<Vec<Halfedge>, Vec<vec3>> QuickHull::buildMesh(double epsilon) {
|
|||
for_each(
|
||||
autoPolicy(halfedges.size()), halfedges.begin(), halfedges.end(),
|
||||
[&](Halfedge& he) { he.pairedHalfedge = mapping[he.pairedHalfedge]; });
|
||||
counts.resize(originalVertexData.size() + 1);
|
||||
counts.resize_nofill(originalVertexData.size() + 1);
|
||||
fill(counts.begin(), counts.end(), 0);
|
||||
|
||||
// remove unused vertices
|
||||
|
@ -804,7 +804,7 @@ void QuickHull::setupInitialTetrahedron() {
|
|||
|
||||
std::unique_ptr<Vec<size_t>> QuickHull::getIndexVectorFromPool() {
|
||||
auto r = indexVectorPool.get();
|
||||
r->resize(0);
|
||||
r->clear();
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -851,10 +851,9 @@ void Manifold::Impl::Hull(VecView<vec3> vertPos) {
|
|||
std::tie(halfedge_, vertPos_) = qh.buildMesh();
|
||||
CalculateBBox();
|
||||
SetEpsilon();
|
||||
CalculateNormals();
|
||||
InitializeOriginal();
|
||||
Finish();
|
||||
CreateFaces();
|
||||
MarkCoplanar();
|
||||
}
|
||||
|
||||
} // namespace manifold
|
||||
|
|
4
thirdparty/manifold/src/quickhull.h
vendored
4
thirdparty/manifold/src/quickhull.h
vendored
|
@ -58,8 +58,8 @@
|
|||
#include <deque>
|
||||
#include <vector>
|
||||
|
||||
#include "./shared.h"
|
||||
#include "./vec.h"
|
||||
#include "shared.h"
|
||||
#include "vec.h"
|
||||
|
||||
namespace manifold {
|
||||
|
||||
|
|
38
thirdparty/manifold/src/sdf.cpp
vendored
38
thirdparty/manifold/src/sdf.cpp
vendored
|
@ -12,12 +12,12 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "./hashtable.h"
|
||||
#include "./impl.h"
|
||||
#include "./parallel.h"
|
||||
#include "./utils.h"
|
||||
#include "./vec.h"
|
||||
#include "hashtable.h"
|
||||
#include "impl.h"
|
||||
#include "manifold/manifold.h"
|
||||
#include "parallel.h"
|
||||
#include "utils.h"
|
||||
#include "vec.h"
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
@ -95,13 +95,14 @@ ivec4 Neighbor(ivec4 base, int i) {
|
|||
return neighborIndex;
|
||||
}
|
||||
|
||||
Uint64 EncodeIndex(ivec4 gridPos, ivec3 gridPow) {
|
||||
return static_cast<Uint64>(gridPos.w) | static_cast<Uint64>(gridPos.z) << 1 |
|
||||
static_cast<Uint64>(gridPos.y) << (1 + gridPow.z) |
|
||||
static_cast<Uint64>(gridPos.x) << (1 + gridPow.z + gridPow.y);
|
||||
uint64_t EncodeIndex(ivec4 gridPos, ivec3 gridPow) {
|
||||
return static_cast<uint64_t>(gridPos.w) |
|
||||
static_cast<uint64_t>(gridPos.z) << 1 |
|
||||
static_cast<uint64_t>(gridPos.y) << (1 + gridPow.z) |
|
||||
static_cast<uint64_t>(gridPos.x) << (1 + gridPow.z + gridPow.y);
|
||||
}
|
||||
|
||||
ivec4 DecodeIndex(Uint64 idx, ivec3 gridPow) {
|
||||
ivec4 DecodeIndex(uint64_t idx, ivec3 gridPow) {
|
||||
ivec4 gridPos;
|
||||
gridPos.w = idx & 1;
|
||||
idx = idx >> 1;
|
||||
|
@ -211,7 +212,7 @@ struct NearSurface {
|
|||
const double level;
|
||||
const double tol;
|
||||
|
||||
inline void operator()(Uint64 index) {
|
||||
inline void operator()(uint64_t index) {
|
||||
ZoneScoped;
|
||||
if (gridVerts.Full()) return;
|
||||
|
||||
|
@ -296,7 +297,7 @@ struct ComputeVerts {
|
|||
|
||||
void operator()(int idx) {
|
||||
ZoneScoped;
|
||||
Uint64 baseKey = gridVerts.KeyAt(idx);
|
||||
uint64_t baseKey = gridVerts.KeyAt(idx);
|
||||
if (baseKey == kOpen) return;
|
||||
|
||||
GridVert& gridVert = gridVerts.At(idx);
|
||||
|
@ -358,7 +359,7 @@ struct BuildTris {
|
|||
|
||||
void operator()(int idx) {
|
||||
ZoneScoped;
|
||||
Uint64 baseKey = gridVerts.KeyAt(idx);
|
||||
uint64_t baseKey = gridVerts.KeyAt(idx);
|
||||
if (baseKey == kOpen) return;
|
||||
|
||||
const GridVert& base = gridVerts.At(idx);
|
||||
|
@ -467,7 +468,7 @@ Manifold Manifold::LevelSet(std::function<double(vec3)> sdf, Box bounds,
|
|||
const vec3 spacing = dim / (vec3(gridSize - 1));
|
||||
|
||||
const ivec3 gridPow(la::log2(gridSize + 2) + 1);
|
||||
const Uint64 maxIndex = EncodeIndex(ivec4(gridSize + 2, 1), gridPow);
|
||||
const uint64_t maxIndex = EncodeIndex(ivec4(gridSize + 2, 1), gridPow);
|
||||
|
||||
// Parallel policies violate will crash language runtimes with runtime locks
|
||||
// that expect to not be called back by unregistered threads. This allows
|
||||
|
@ -479,15 +480,15 @@ Manifold Manifold::LevelSet(std::function<double(vec3)> sdf, Box bounds,
|
|||
Vec<double> voxels(maxIndex);
|
||||
for_each_n(
|
||||
pol, countAt(0_uz), maxIndex,
|
||||
[&voxels, sdf, level, origin, spacing, gridSize, gridPow](Uint64 idx) {
|
||||
[&voxels, sdf, level, origin, spacing, gridSize, gridPow](uint64_t idx) {
|
||||
voxels[idx] = BoundedSDF(DecodeIndex(idx, gridPow) - kVoxelOffset,
|
||||
origin, spacing, gridSize, level, sdf);
|
||||
});
|
||||
|
||||
size_t tableSize = std::min(
|
||||
2 * maxIndex, static_cast<Uint64>(10 * la::pow(maxIndex, 0.667)));
|
||||
2 * maxIndex, static_cast<uint64_t>(10 * la::pow(maxIndex, 0.667)));
|
||||
HashTable<GridVert> gridVerts(tableSize);
|
||||
vertPos.resize(gridVerts.Size() * 7);
|
||||
vertPos.resize_nofill(gridVerts.Size() * 7);
|
||||
|
||||
while (1) {
|
||||
Vec<int> index(1, 0);
|
||||
|
@ -497,7 +498,7 @@ Manifold Manifold::LevelSet(std::function<double(vec3)> sdf, Box bounds,
|
|||
|
||||
if (gridVerts.Full()) { // Resize HashTable
|
||||
const vec3 lastVert = vertPos[index[0] - 1];
|
||||
const Uint64 lastIndex =
|
||||
const uint64_t lastIndex =
|
||||
EncodeIndex(ivec4(ivec3((lastVert - origin) / spacing), 1), gridPow);
|
||||
const double ratio = static_cast<double>(maxIndex) / lastIndex;
|
||||
|
||||
|
@ -529,6 +530,7 @@ Manifold Manifold::LevelSet(std::function<double(vec3)> sdf, Box bounds,
|
|||
pImpl_->RemoveUnreferencedVerts();
|
||||
pImpl_->Finish();
|
||||
pImpl_->InitializeOriginal();
|
||||
pImpl_->MarkCoplanar();
|
||||
return Manifold(pImpl_);
|
||||
}
|
||||
} // namespace manifold
|
||||
|
|
34
thirdparty/manifold/src/shared.h
vendored
34
thirdparty/manifold/src/shared.h
vendored
|
@ -14,10 +14,9 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "./parallel.h"
|
||||
#include "./sparse.h"
|
||||
#include "./utils.h"
|
||||
#include "./vec.h"
|
||||
#include "parallel.h"
|
||||
#include "utils.h"
|
||||
#include "vec.h"
|
||||
|
||||
namespace manifold {
|
||||
|
||||
|
@ -120,6 +119,7 @@ inline vec3 GetBarycentric(const vec3& v, const mat3& triPos,
|
|||
struct Halfedge {
|
||||
int startVert, endVert;
|
||||
int pairedHalfedge;
|
||||
int propVert;
|
||||
bool IsForward() const { return startVert < endVert; }
|
||||
bool operator<(const Halfedge& other) const {
|
||||
return startVert == other.startVert ? endVert < other.endVert
|
||||
|
@ -142,12 +142,13 @@ struct TriRef {
|
|||
int originalID;
|
||||
/// Probably the triangle index of the original triangle this was part of:
|
||||
/// Mesh.triVerts[tri], but it's an input, so just pass it along unchanged.
|
||||
int tri;
|
||||
/// Triangles with the same face ID are coplanar.
|
||||
int faceID;
|
||||
/// Triangles with the same coplanar ID are coplanar.
|
||||
int coplanarID;
|
||||
|
||||
bool SameFace(const TriRef& other) const {
|
||||
return meshID == other.meshID && faceID == other.faceID;
|
||||
return meshID == other.meshID && coplanarID == other.coplanarID &&
|
||||
faceID == other.faceID;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -188,22 +189,12 @@ Vec<TmpEdge> inline CreateTmpEdges(const Vec<Halfedge>& halfedge) {
|
|||
return edges;
|
||||
}
|
||||
|
||||
template <const bool inverted>
|
||||
struct ReindexEdge {
|
||||
VecView<const TmpEdge> edges;
|
||||
SparseIndices& indices;
|
||||
|
||||
void operator()(size_t i) {
|
||||
int& edge = indices.Get(i, inverted);
|
||||
edge = edges[edge].halfedgeIdx;
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
inline std::ostream& operator<<(std::ostream& stream, const Halfedge& edge) {
|
||||
return stream << "startVert = " << edge.startVert
|
||||
<< ", endVert = " << edge.endVert
|
||||
<< ", pairedHalfedge = " << edge.pairedHalfedge;
|
||||
<< ", pairedHalfedge = " << edge.pairedHalfedge
|
||||
<< ", propVert = " << edge.propVert;
|
||||
}
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& stream, const Barycentric& bary) {
|
||||
|
@ -212,8 +203,9 @@ inline std::ostream& operator<<(std::ostream& stream, const Barycentric& bary) {
|
|||
|
||||
inline std::ostream& operator<<(std::ostream& stream, const TriRef& ref) {
|
||||
return stream << "meshID: " << ref.meshID
|
||||
<< ", originalID: " << ref.originalID << ", tri: " << ref.tri
|
||||
<< ", faceID: " << ref.faceID;
|
||||
<< ", originalID: " << ref.originalID
|
||||
<< ", faceID: " << ref.faceID
|
||||
<< ", coplanarID: " << ref.coplanarID;
|
||||
}
|
||||
#endif
|
||||
} // namespace manifold
|
||||
|
|
321
thirdparty/manifold/src/smoothing.cpp
vendored
321
thirdparty/manifold/src/smoothing.cpp
vendored
|
@ -12,8 +12,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "./impl.h"
|
||||
#include "./parallel.h"
|
||||
#include "impl.h"
|
||||
#include "parallel.h"
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
@ -254,13 +254,10 @@ namespace manifold {
|
|||
* normalIdx shows the beginning of where normals are stored in the properties.
|
||||
*/
|
||||
vec3 Manifold::Impl::GetNormal(int halfedge, int normalIdx) const {
|
||||
const int tri = halfedge / 3;
|
||||
const int j = halfedge % 3;
|
||||
const int prop = meshRelation_.triProperties[tri][j];
|
||||
const int prop = halfedge_[halfedge].propVert;
|
||||
vec3 normal;
|
||||
for (const int i : {0, 1, 2}) {
|
||||
normal[i] =
|
||||
meshRelation_.properties[prop * meshRelation_.numProp + normalIdx + i];
|
||||
normal[i] = properties_[prop * numProp_ + normalIdx + i];
|
||||
}
|
||||
return normal;
|
||||
}
|
||||
|
@ -320,12 +317,12 @@ bool Manifold::Impl::IsMarkedInsideQuad(int halfedge) const {
|
|||
|
||||
// sharpenedEdges are referenced to the input Mesh, but the triangles have
|
||||
// been sorted in creating the Manifold, so the indices are converted using
|
||||
// meshRelation_.
|
||||
// meshRelation_.faceID, which temporarily holds the mapping.
|
||||
std::vector<Smoothness> Manifold::Impl::UpdateSharpenedEdges(
|
||||
const std::vector<Smoothness>& sharpenedEdges) const {
|
||||
std::unordered_map<int, int> oldHalfedge2New;
|
||||
for (size_t tri = 0; tri < NumTri(); ++tri) {
|
||||
int oldTri = meshRelation_.triRef[tri].tri;
|
||||
int oldTri = meshRelation_.triRef[tri].faceID;
|
||||
for (int i : {0, 1, 2}) oldHalfedge2New[3 * oldTri + i] = 3 * tri + i;
|
||||
}
|
||||
std::vector<Smoothness> newSharp = sharpenedEdges;
|
||||
|
@ -371,7 +368,7 @@ Vec<bool> Manifold::Impl::FlatFaces() const {
|
|||
// gets -1, and if there are more than one it gets -2.
|
||||
Vec<int> Manifold::Impl::VertFlatFace(const Vec<bool>& flatFaces) const {
|
||||
Vec<int> vertFlatFace(NumVert(), -1);
|
||||
Vec<TriRef> vertRef(NumVert(), {-1, -1, -1});
|
||||
Vec<TriRef> vertRef(NumVert(), {-1, -1, -1, -1});
|
||||
for (size_t tri = 0; tri < NumTri(); ++tri) {
|
||||
if (flatFaces[tri]) {
|
||||
for (const int j : {0, 1, 2}) {
|
||||
|
@ -439,7 +436,6 @@ void Manifold::Impl::SetNormals(int normalIdx, double minSharpAngle) {
|
|||
if (normalIdx < 0) return;
|
||||
|
||||
const int oldNumProp = NumProp();
|
||||
const int numTri = NumTri();
|
||||
|
||||
Vec<bool> triIsFlatFace = FlatFaces();
|
||||
Vec<int> vertFlatFace = VertFlatFace(triIsFlatFace);
|
||||
|
@ -470,164 +466,153 @@ void Manifold::Impl::SetNormals(int normalIdx, double minSharpAngle) {
|
|||
|
||||
const int numProp = std::max(oldNumProp, normalIdx + 3);
|
||||
Vec<double> oldProperties(numProp * NumPropVert(), 0);
|
||||
meshRelation_.properties.swap(oldProperties);
|
||||
meshRelation_.numProp = numProp;
|
||||
if (meshRelation_.triProperties.size() == 0) {
|
||||
meshRelation_.triProperties.resize(numTri);
|
||||
for_each_n(autoPolicy(numTri, 1e5), countAt(0), numTri, [this](int tri) {
|
||||
for (const int j : {0, 1, 2})
|
||||
meshRelation_.triProperties[tri][j] = halfedge_[3 * tri + j].startVert;
|
||||
});
|
||||
}
|
||||
Vec<ivec3> oldTriProp(numTri, {-1, -1, -1});
|
||||
meshRelation_.triProperties.swap(oldTriProp);
|
||||
properties_.swap(oldProperties);
|
||||
numProp_ = numProp;
|
||||
|
||||
for (int tri = 0; tri < numTri; ++tri) {
|
||||
for (const int i : {0, 1, 2}) {
|
||||
if (meshRelation_.triProperties[tri][i] >= 0) continue;
|
||||
int startEdge = 3 * tri + i;
|
||||
const int vert = halfedge_[startEdge].startVert;
|
||||
Vec<int> oldHalfedgeProp(halfedge_.size());
|
||||
for_each_n(autoPolicy(halfedge_.size(), 1e5), countAt(0), halfedge_.size(),
|
||||
[this, &oldHalfedgeProp](int i) {
|
||||
oldHalfedgeProp[i] = halfedge_[i].propVert;
|
||||
halfedge_[i].propVert = -1;
|
||||
});
|
||||
|
||||
if (vertNumSharp[vert] < 2) { // vertex has single normal
|
||||
const vec3 normal = vertFlatFace[vert] >= 0
|
||||
? faceNormal_[vertFlatFace[vert]]
|
||||
: vertNormal_[vert];
|
||||
int lastProp = -1;
|
||||
ForVert(startEdge, [&](int current) {
|
||||
const int thisTri = current / 3;
|
||||
const int j = current - 3 * thisTri;
|
||||
const int prop = oldTriProp[thisTri][j];
|
||||
meshRelation_.triProperties[thisTri][j] = prop;
|
||||
if (prop == lastProp) return;
|
||||
lastProp = prop;
|
||||
// update property vertex
|
||||
auto start = oldProperties.begin() + prop * oldNumProp;
|
||||
const int numEdge = halfedge_.size();
|
||||
for (int startEdge = 0; startEdge < numEdge; ++startEdge) {
|
||||
if (halfedge_[startEdge].propVert >= 0) continue;
|
||||
const int vert = halfedge_[startEdge].startVert;
|
||||
|
||||
if (vertNumSharp[vert] < 2) { // vertex has single normal
|
||||
const vec3 normal = vertFlatFace[vert] >= 0
|
||||
? faceNormal_[vertFlatFace[vert]]
|
||||
: vertNormal_[vert];
|
||||
int lastProp = -1;
|
||||
ForVert(startEdge, [&](int current) {
|
||||
const int prop = oldHalfedgeProp[current];
|
||||
halfedge_[current].propVert = prop;
|
||||
if (prop == lastProp) return;
|
||||
lastProp = prop;
|
||||
// update property vertex
|
||||
auto start = oldProperties.begin() + prop * oldNumProp;
|
||||
std::copy(start, start + oldNumProp,
|
||||
properties_.begin() + prop * numProp);
|
||||
for (const int i : {0, 1, 2})
|
||||
properties_[prop * numProp + normalIdx + i] = normal[i];
|
||||
});
|
||||
} else { // vertex has multiple normals
|
||||
const vec3 centerPos = vertPos_[vert];
|
||||
// Length degree
|
||||
std::vector<int> group;
|
||||
// Length number of normals
|
||||
std::vector<vec3> normals;
|
||||
int current = startEdge;
|
||||
int prevFace = current / 3;
|
||||
|
||||
do { // find a sharp edge to start on
|
||||
int next = NextHalfedge(halfedge_[current].pairedHalfedge);
|
||||
const int face = next / 3;
|
||||
|
||||
const double dihedral = degrees(
|
||||
std::acos(la::dot(faceNormal_[face], faceNormal_[prevFace])));
|
||||
if (dihedral > minSharpAngle ||
|
||||
triIsFlatFace[face] != triIsFlatFace[prevFace] ||
|
||||
(triIsFlatFace[face] && triIsFlatFace[prevFace] &&
|
||||
!meshRelation_.triRef[face].SameFace(
|
||||
meshRelation_.triRef[prevFace]))) {
|
||||
break;
|
||||
}
|
||||
current = next;
|
||||
prevFace = face;
|
||||
} while (current != startEdge);
|
||||
|
||||
const int endEdge = current;
|
||||
|
||||
struct FaceEdge {
|
||||
int face;
|
||||
vec3 edgeVec;
|
||||
};
|
||||
|
||||
// calculate pseudo-normals between each sharp edge
|
||||
ForVert<FaceEdge>(
|
||||
endEdge,
|
||||
[this, centerPos, &vertNumSharp, &vertFlatFace](int current) {
|
||||
if (IsInsideQuad(current)) {
|
||||
return FaceEdge({current / 3, vec3(NAN)});
|
||||
}
|
||||
const int vert = halfedge_[current].endVert;
|
||||
vec3 pos = vertPos_[vert];
|
||||
if (vertNumSharp[vert] < 2) {
|
||||
// opposite vert has fixed normal
|
||||
const vec3 normal = vertFlatFace[vert] >= 0
|
||||
? faceNormal_[vertFlatFace[vert]]
|
||||
: vertNormal_[vert];
|
||||
// Flair out the normal we're calculating to give the edge a
|
||||
// more constant curvature to meet the opposite normal. Achieve
|
||||
// this by pointing the tangent toward the opposite bezier
|
||||
// control point instead of the vert itself.
|
||||
pos += vec3(
|
||||
TangentFromNormal(normal, halfedge_[current].pairedHalfedge));
|
||||
}
|
||||
return FaceEdge({current / 3, SafeNormalize(pos - centerPos)});
|
||||
},
|
||||
[this, &triIsFlatFace, &normals, &group, minSharpAngle](
|
||||
int, const FaceEdge& here, FaceEdge& next) {
|
||||
const double dihedral = degrees(std::acos(
|
||||
la::dot(faceNormal_[here.face], faceNormal_[next.face])));
|
||||
if (dihedral > minSharpAngle ||
|
||||
triIsFlatFace[here.face] != triIsFlatFace[next.face] ||
|
||||
(triIsFlatFace[here.face] && triIsFlatFace[next.face] &&
|
||||
!meshRelation_.triRef[here.face].SameFace(
|
||||
meshRelation_.triRef[next.face]))) {
|
||||
normals.push_back(vec3(0.0));
|
||||
}
|
||||
group.push_back(normals.size() - 1);
|
||||
if (std::isfinite(next.edgeVec.x)) {
|
||||
normals.back() +=
|
||||
SafeNormalize(la::cross(next.edgeVec, here.edgeVec)) *
|
||||
AngleBetween(here.edgeVec, next.edgeVec);
|
||||
} else {
|
||||
next.edgeVec = here.edgeVec;
|
||||
}
|
||||
});
|
||||
|
||||
for (auto& normal : normals) {
|
||||
normal = SafeNormalize(normal);
|
||||
}
|
||||
|
||||
int lastGroup = 0;
|
||||
int lastProp = -1;
|
||||
int newProp = -1;
|
||||
int idx = 0;
|
||||
ForVert(endEdge, [&](int current1) {
|
||||
const int prop = oldHalfedgeProp[current1];
|
||||
auto start = oldProperties.begin() + prop * oldNumProp;
|
||||
|
||||
if (group[idx] != lastGroup && group[idx] != 0 && prop == lastProp) {
|
||||
// split property vertex, duplicating but with an updated normal
|
||||
lastGroup = group[idx];
|
||||
newProp = NumPropVert();
|
||||
properties_.resize(properties_.size() + numProp);
|
||||
std::copy(start, start + oldNumProp,
|
||||
meshRelation_.properties.begin() + prop * numProp);
|
||||
for (const int i : {0, 1, 2})
|
||||
meshRelation_.properties[prop * numProp + normalIdx + i] =
|
||||
normal[i];
|
||||
});
|
||||
} else { // vertex has multiple normals
|
||||
const vec3 centerPos = vertPos_[vert];
|
||||
// Length degree
|
||||
std::vector<int> group;
|
||||
// Length number of normals
|
||||
std::vector<vec3> normals;
|
||||
int current = startEdge;
|
||||
int prevFace = current / 3;
|
||||
|
||||
do { // find a sharp edge to start on
|
||||
int next = NextHalfedge(halfedge_[current].pairedHalfedge);
|
||||
const int face = next / 3;
|
||||
|
||||
const double dihedral = degrees(
|
||||
std::acos(la::dot(faceNormal_[face], faceNormal_[prevFace])));
|
||||
if (dihedral > minSharpAngle ||
|
||||
triIsFlatFace[face] != triIsFlatFace[prevFace] ||
|
||||
(triIsFlatFace[face] && triIsFlatFace[prevFace] &&
|
||||
!meshRelation_.triRef[face].SameFace(
|
||||
meshRelation_.triRef[prevFace]))) {
|
||||
break;
|
||||
properties_.begin() + newProp * numProp);
|
||||
for (const int i : {0, 1, 2}) {
|
||||
properties_[newProp * numProp + normalIdx + i] =
|
||||
normals[group[idx]][i];
|
||||
}
|
||||
current = next;
|
||||
prevFace = face;
|
||||
} while (current != startEdge);
|
||||
|
||||
const int endEdge = current;
|
||||
|
||||
struct FaceEdge {
|
||||
int face;
|
||||
vec3 edgeVec;
|
||||
};
|
||||
|
||||
// calculate pseudo-normals between each sharp edge
|
||||
ForVert<FaceEdge>(
|
||||
endEdge,
|
||||
[this, centerPos, &vertNumSharp, &vertFlatFace](int current) {
|
||||
if (IsInsideQuad(current)) {
|
||||
return FaceEdge({current / 3, vec3(NAN)});
|
||||
}
|
||||
const int vert = halfedge_[current].endVert;
|
||||
vec3 pos = vertPos_[vert];
|
||||
const vec3 edgeVec = centerPos - pos;
|
||||
if (vertNumSharp[vert] < 2) {
|
||||
// opposite vert has fixed normal
|
||||
const vec3 normal = vertFlatFace[vert] >= 0
|
||||
? faceNormal_[vertFlatFace[vert]]
|
||||
: vertNormal_[vert];
|
||||
// Flair out the normal we're calculating to give the edge a
|
||||
// more constant curvature to meet the opposite normal. Achieve
|
||||
// this by pointing the tangent toward the opposite bezier
|
||||
// control point instead of the vert itself.
|
||||
pos += vec3(TangentFromNormal(
|
||||
normal, halfedge_[current].pairedHalfedge));
|
||||
}
|
||||
return FaceEdge({current / 3, SafeNormalize(pos - centerPos)});
|
||||
},
|
||||
[this, &triIsFlatFace, &normals, &group, minSharpAngle](
|
||||
int current, const FaceEdge& here, FaceEdge& next) {
|
||||
const double dihedral = degrees(std::acos(
|
||||
la::dot(faceNormal_[here.face], faceNormal_[next.face])));
|
||||
if (dihedral > minSharpAngle ||
|
||||
triIsFlatFace[here.face] != triIsFlatFace[next.face] ||
|
||||
(triIsFlatFace[here.face] && triIsFlatFace[next.face] &&
|
||||
!meshRelation_.triRef[here.face].SameFace(
|
||||
meshRelation_.triRef[next.face]))) {
|
||||
normals.push_back(vec3(0.0));
|
||||
}
|
||||
group.push_back(normals.size() - 1);
|
||||
if (std::isfinite(next.edgeVec.x)) {
|
||||
normals.back() +=
|
||||
SafeNormalize(la::cross(next.edgeVec, here.edgeVec)) *
|
||||
AngleBetween(here.edgeVec, next.edgeVec);
|
||||
} else {
|
||||
next.edgeVec = here.edgeVec;
|
||||
}
|
||||
});
|
||||
|
||||
for (auto& normal : normals) {
|
||||
normal = SafeNormalize(normal);
|
||||
} else if (prop != lastProp) {
|
||||
// update property vertex
|
||||
lastProp = prop;
|
||||
newProp = prop;
|
||||
std::copy(start, start + oldNumProp,
|
||||
properties_.begin() + prop * numProp);
|
||||
for (const int i : {0, 1, 2})
|
||||
properties_[prop * numProp + normalIdx + i] =
|
||||
normals[group[idx]][i];
|
||||
}
|
||||
|
||||
int lastGroup = 0;
|
||||
int lastProp = -1;
|
||||
int newProp = -1;
|
||||
int idx = 0;
|
||||
ForVert(endEdge, [&](int current1) {
|
||||
const int thisTri = current1 / 3;
|
||||
const int j = current1 - 3 * thisTri;
|
||||
const int prop = oldTriProp[thisTri][j];
|
||||
auto start = oldProperties.begin() + prop * oldNumProp;
|
||||
|
||||
if (group[idx] != lastGroup && group[idx] != 0 && prop == lastProp) {
|
||||
// split property vertex, duplicating but with an updated normal
|
||||
lastGroup = group[idx];
|
||||
newProp = NumPropVert();
|
||||
meshRelation_.properties.resize(meshRelation_.properties.size() +
|
||||
numProp);
|
||||
std::copy(start, start + oldNumProp,
|
||||
meshRelation_.properties.begin() + newProp * numProp);
|
||||
for (const int i : {0, 1, 2}) {
|
||||
meshRelation_.properties[newProp * numProp + normalIdx + i] =
|
||||
normals[group[idx]][i];
|
||||
}
|
||||
} else if (prop != lastProp) {
|
||||
// update property vertex
|
||||
lastProp = prop;
|
||||
newProp = prop;
|
||||
std::copy(start, start + oldNumProp,
|
||||
meshRelation_.properties.begin() + prop * numProp);
|
||||
for (const int i : {0, 1, 2})
|
||||
meshRelation_.properties[prop * numProp + normalIdx + i] =
|
||||
normals[group[idx]][i];
|
||||
}
|
||||
|
||||
// point to updated property vertex
|
||||
meshRelation_.triProperties[thisTri][j] = newProp;
|
||||
++idx;
|
||||
});
|
||||
}
|
||||
// point to updated property vertex
|
||||
halfedge_[current1].propVert = newProp;
|
||||
++idx;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -770,7 +755,7 @@ void Manifold::Impl::CreateTangents(int normalIdx) {
|
|||
ZoneScoped;
|
||||
const int numVert = NumVert();
|
||||
const int numHalfedge = halfedge_.size();
|
||||
halfedgeTangent_.resize(0);
|
||||
halfedgeTangent_.clear();
|
||||
Vec<vec4> tangent(numHalfedge);
|
||||
Vec<bool> fixedHalfedge(numHalfedge, false);
|
||||
|
||||
|
@ -854,7 +839,7 @@ void Manifold::Impl::CreateTangents(int normalIdx) {
|
|||
void Manifold::Impl::CreateTangents(std::vector<Smoothness> sharpenedEdges) {
|
||||
ZoneScoped;
|
||||
const int numHalfedge = halfedge_.size();
|
||||
halfedgeTangent_.resize(0);
|
||||
halfedgeTangent_.clear();
|
||||
Vec<vec4> tangent(numHalfedge);
|
||||
Vec<bool> fixedHalfedge(numHalfedge, false);
|
||||
|
||||
|
@ -994,9 +979,11 @@ void Manifold::Impl::Refine(std::function<int(vec3, vec4, vec4)> edgeDivisions,
|
|||
InterpTri({vertPos_, vertBary, &old}));
|
||||
}
|
||||
|
||||
halfedgeTangent_.resize(0);
|
||||
halfedgeTangent_.clear();
|
||||
Finish();
|
||||
CreateFaces();
|
||||
if (old.halfedgeTangent_.size() == old.halfedge_.size()) {
|
||||
MarkCoplanar();
|
||||
}
|
||||
meshRelation_.originalID = -1;
|
||||
}
|
||||
|
||||
|
|
191
thirdparty/manifold/src/sort.cpp
vendored
191
thirdparty/manifold/src/sort.cpp
vendored
|
@ -15,8 +15,9 @@
|
|||
#include <atomic>
|
||||
#include <set>
|
||||
|
||||
#include "./impl.h"
|
||||
#include "./parallel.h"
|
||||
#include "impl.h"
|
||||
#include "parallel.h"
|
||||
#include "shared.h"
|
||||
|
||||
namespace {
|
||||
using namespace manifold;
|
||||
|
@ -31,37 +32,6 @@ uint32_t MortonCode(vec3 position, Box bBox) {
|
|||
return Collider::MortonCode(position, bBox);
|
||||
}
|
||||
|
||||
struct Reindex {
|
||||
VecView<const int> indexInv;
|
||||
|
||||
void operator()(Halfedge& edge) {
|
||||
if (edge.startVert < 0) return;
|
||||
edge.startVert = indexInv[edge.startVert];
|
||||
edge.endVert = indexInv[edge.endVert];
|
||||
}
|
||||
};
|
||||
|
||||
struct MarkProp {
|
||||
VecView<int> keep;
|
||||
|
||||
void operator()(ivec3 triProp) {
|
||||
for (const int i : {0, 1, 2}) {
|
||||
reinterpret_cast<std::atomic<int>*>(&keep[triProp[i]])
|
||||
->store(1, std::memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct ReindexProps {
|
||||
VecView<const int> old2new;
|
||||
|
||||
void operator()(ivec3& triProp) {
|
||||
for (const int i : {0, 1, 2}) {
|
||||
triProp[i] = old2new[triProp[i]];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct ReindexFace {
|
||||
VecView<Halfedge> halfedge;
|
||||
VecView<vec4> halfedgeTangent;
|
||||
|
@ -180,18 +150,20 @@ bool MergeMeshGLP(MeshGLP<Precision, I>& mesh) {
|
|||
Permute(vertMorton, vertNew2Old);
|
||||
Permute(vertBox, vertNew2Old);
|
||||
Permute(openVerts, vertNew2Old);
|
||||
Collider collider(vertBox, vertMorton);
|
||||
SparseIndices toMerge = collider.Collisions<true>(vertBox.cview());
|
||||
|
||||
Collider collider(vertBox, vertMorton);
|
||||
UnionFind<> uf(numVert);
|
||||
|
||||
auto f = [&uf, &openVerts](int a, int b) {
|
||||
return uf.unionXY(openVerts[a], openVerts[b]);
|
||||
};
|
||||
auto recorder = MakeSimpleRecorder(f);
|
||||
collider.Collisions<true>(vertBox.cview(), recorder, false);
|
||||
|
||||
for (size_t i = 0; i < mesh.mergeFromVert.size(); ++i) {
|
||||
uf.unionXY(static_cast<int>(mesh.mergeFromVert[i]),
|
||||
static_cast<int>(mesh.mergeToVert[i]));
|
||||
}
|
||||
for (size_t i = 0; i < toMerge.size(); ++i) {
|
||||
uf.unionXY(openVerts[toMerge.Get(i, false)],
|
||||
openVerts[toMerge.Get(i, true)]);
|
||||
}
|
||||
|
||||
mesh.mergeToVert.clear();
|
||||
mesh.mergeFromVert.clear();
|
||||
|
@ -221,7 +193,7 @@ void Manifold::Impl::Finish() {
|
|||
SetEpsilon(epsilon_);
|
||||
if (!bBox_.IsFinite()) {
|
||||
// Decimated out of existence - early out.
|
||||
MarkFailure(Error::NoError);
|
||||
MakeEmpty(Error::NoError);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -237,31 +209,33 @@ void Manifold::Impl::Finish() {
|
|||
"Not an even number of faces after sorting faces!");
|
||||
|
||||
#ifdef MANIFOLD_DEBUG
|
||||
auto MaxOrMinus = [](int a, int b) {
|
||||
return std::min(a, b) < 0 ? -1 : std::max(a, b);
|
||||
};
|
||||
int face = 0;
|
||||
Halfedge extrema = {0, 0, 0};
|
||||
for (size_t i = 0; i < halfedge_.size(); i++) {
|
||||
Halfedge e = halfedge_[i];
|
||||
if (!e.IsForward()) std::swap(e.startVert, e.endVert);
|
||||
extrema.startVert = std::min(extrema.startVert, e.startVert);
|
||||
extrema.endVert = std::min(extrema.endVert, e.endVert);
|
||||
extrema.pairedHalfedge =
|
||||
MaxOrMinus(extrema.pairedHalfedge, e.pairedHalfedge);
|
||||
face = MaxOrMinus(face, i / 3);
|
||||
if (ManifoldParams().intermediateChecks) {
|
||||
auto MaxOrMinus = [](int a, int b) {
|
||||
return std::min(a, b) < 0 ? -1 : std::max(a, b);
|
||||
};
|
||||
int face = 0;
|
||||
Halfedge extrema = {0, 0, 0};
|
||||
for (size_t i = 0; i < halfedge_.size(); i++) {
|
||||
Halfedge e = halfedge_[i];
|
||||
if (!e.IsForward()) std::swap(e.startVert, e.endVert);
|
||||
extrema.startVert = std::min(extrema.startVert, e.startVert);
|
||||
extrema.endVert = std::min(extrema.endVert, e.endVert);
|
||||
extrema.pairedHalfedge =
|
||||
MaxOrMinus(extrema.pairedHalfedge, e.pairedHalfedge);
|
||||
face = MaxOrMinus(face, i / 3);
|
||||
}
|
||||
DEBUG_ASSERT(extrema.startVert >= 0, topologyErr,
|
||||
"Vertex index is negative!");
|
||||
DEBUG_ASSERT(extrema.endVert < static_cast<int>(NumVert()), topologyErr,
|
||||
"Vertex index exceeds number of verts!");
|
||||
DEBUG_ASSERT(extrema.pairedHalfedge >= 0, topologyErr,
|
||||
"Halfedge index is negative!");
|
||||
DEBUG_ASSERT(extrema.pairedHalfedge < 2 * static_cast<int>(NumEdge()),
|
||||
topologyErr, "Halfedge index exceeds number of halfedges!");
|
||||
DEBUG_ASSERT(face >= 0, topologyErr, "Face index is negative!");
|
||||
DEBUG_ASSERT(face < static_cast<int>(NumTri()), topologyErr,
|
||||
"Face index exceeds number of faces!");
|
||||
}
|
||||
DEBUG_ASSERT(extrema.startVert >= 0, topologyErr,
|
||||
"Vertex index is negative!");
|
||||
DEBUG_ASSERT(extrema.endVert < static_cast<int>(NumVert()), topologyErr,
|
||||
"Vertex index exceeds number of verts!");
|
||||
DEBUG_ASSERT(extrema.pairedHalfedge >= 0, topologyErr,
|
||||
"Halfedge index is negative!");
|
||||
DEBUG_ASSERT(extrema.pairedHalfedge < 2 * static_cast<int>(NumEdge()),
|
||||
topologyErr, "Halfedge index exceeds number of halfedges!");
|
||||
DEBUG_ASSERT(face >= 0, topologyErr, "Face index is negative!");
|
||||
DEBUG_ASSERT(face < static_cast<int>(NumTri()), topologyErr,
|
||||
"Face index exceeds number of faces!");
|
||||
#endif
|
||||
|
||||
DEBUG_ASSERT(meshRelation_.triRef.size() == NumTri() ||
|
||||
|
@ -301,11 +275,12 @@ void Manifold::Impl::SortVerts() {
|
|||
|
||||
// Verts were flagged for removal with NaNs and assigned kNoCode to sort
|
||||
// them to the end, which allows them to be removed.
|
||||
const auto newNumVert = std::find_if(vertNew2Old.begin(), vertNew2Old.end(),
|
||||
[&vertMorton](const int vert) {
|
||||
return vertMorton[vert] == kNoCode;
|
||||
}) -
|
||||
vertNew2Old.begin();
|
||||
const auto newNumVert =
|
||||
std::lower_bound(vertNew2Old.begin(), vertNew2Old.end(), kNoCode,
|
||||
[&vertMorton](const int vert, const uint32_t val) {
|
||||
return vertMorton[vert] < val;
|
||||
}) -
|
||||
vertNew2Old.begin();
|
||||
|
||||
vertNew2Old.resize(newNumVert);
|
||||
Permute(vertPos_, vertNew2Old);
|
||||
|
@ -326,31 +301,41 @@ void Manifold::Impl::ReindexVerts(const Vec<int>& vertNew2Old,
|
|||
Vec<int> vertOld2New(oldNumVert);
|
||||
scatter(countAt(0), countAt(static_cast<int>(NumVert())), vertNew2Old.begin(),
|
||||
vertOld2New.begin());
|
||||
const bool hasProp = NumProp() > 0;
|
||||
for_each(autoPolicy(oldNumVert, 1e5), halfedge_.begin(), halfedge_.end(),
|
||||
Reindex({vertOld2New}));
|
||||
[&vertOld2New, hasProp](Halfedge& edge) {
|
||||
if (edge.startVert < 0) return;
|
||||
edge.startVert = vertOld2New[edge.startVert];
|
||||
edge.endVert = vertOld2New[edge.endVert];
|
||||
if (!hasProp) {
|
||||
edge.propVert = edge.startVert;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes unreferenced property verts and reindexes triProperties.
|
||||
* Removes unreferenced property verts and reindexes propVerts.
|
||||
*/
|
||||
void Manifold::Impl::CompactProps() {
|
||||
ZoneScoped;
|
||||
if (meshRelation_.numProp == 0) return;
|
||||
if (numProp_ == 0) return;
|
||||
|
||||
const auto numVerts = meshRelation_.properties.size() / meshRelation_.numProp;
|
||||
const int numProp = NumProp();
|
||||
const auto numVerts = properties_.size() / numProp;
|
||||
Vec<int> keep(numVerts, 0);
|
||||
auto policy = autoPolicy(numVerts, 1e5);
|
||||
|
||||
for_each(policy, meshRelation_.triProperties.cbegin(),
|
||||
meshRelation_.triProperties.cend(), MarkProp({keep}));
|
||||
for_each(policy, halfedge_.cbegin(), halfedge_.cend(), [&keep](Halfedge h) {
|
||||
reinterpret_cast<std::atomic<int>*>(&keep[h.propVert])
|
||||
->store(1, std::memory_order_relaxed);
|
||||
});
|
||||
Vec<int> propOld2New(numVerts + 1, 0);
|
||||
inclusive_scan(keep.begin(), keep.end(), propOld2New.begin() + 1);
|
||||
|
||||
Vec<double> oldProp = meshRelation_.properties;
|
||||
Vec<double> oldProp = properties_;
|
||||
const int numVertsNew = propOld2New[numVerts];
|
||||
const int numProp = meshRelation_.numProp;
|
||||
auto& properties = meshRelation_.properties;
|
||||
properties.resize(numProp * numVertsNew);
|
||||
auto& properties = properties_;
|
||||
properties.resize_nofill(numProp * numVertsNew);
|
||||
for_each_n(
|
||||
policy, countAt(0), numVerts,
|
||||
[&properties, &oldProp, &propOld2New, &keep, &numProp](const int oldIdx) {
|
||||
|
@ -360,8 +345,10 @@ void Manifold::Impl::CompactProps() {
|
|||
oldProp[oldIdx * numProp + p];
|
||||
}
|
||||
});
|
||||
for_each_n(policy, meshRelation_.triProperties.begin(), NumTri(),
|
||||
ReindexProps({propOld2New}));
|
||||
for_each(policy, halfedge_.begin(), halfedge_.end(),
|
||||
[&propOld2New](Halfedge& edge) {
|
||||
edge.propVert = propOld2New[edge.propVert];
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -372,8 +359,9 @@ void Manifold::Impl::CompactProps() {
|
|||
void Manifold::Impl::GetFaceBoxMorton(Vec<Box>& faceBox,
|
||||
Vec<uint32_t>& faceMorton) const {
|
||||
ZoneScoped;
|
||||
faceBox.resize(NumTri());
|
||||
faceMorton.resize(NumTri());
|
||||
// faceBox should be initialized
|
||||
faceBox.resize(NumTri(), Box());
|
||||
faceMorton.resize_nofill(NumTri());
|
||||
for_each_n(autoPolicy(NumTri(), 1e5), countAt(0), NumTri(),
|
||||
[this, &faceBox, &faceMorton](const int face) {
|
||||
// Removed tris are marked by all halfedges having pairedHalfedge
|
||||
|
@ -413,11 +401,12 @@ void Manifold::Impl::SortFaces(Vec<Box>& faceBox, Vec<uint32_t>& faceMorton) {
|
|||
|
||||
// Tris were flagged for removal with pairedHalfedge = -1 and assigned kNoCode
|
||||
// to sort them to the end, which allows them to be removed.
|
||||
const int newNumTri = std::find_if(faceNew2Old.begin(), faceNew2Old.end(),
|
||||
[&faceMorton](const int face) {
|
||||
return faceMorton[face] == kNoCode;
|
||||
}) -
|
||||
faceNew2Old.begin();
|
||||
const int newNumTri =
|
||||
std::lower_bound(faceNew2Old.begin(), faceNew2Old.end(), kNoCode,
|
||||
[&faceMorton](const int face, const uint32_t val) {
|
||||
return faceMorton[face] < val;
|
||||
}) -
|
||||
faceNew2Old.begin();
|
||||
faceNew2Old.resize(newNumTri);
|
||||
|
||||
Permute(faceMorton, faceNew2Old);
|
||||
|
@ -435,8 +424,6 @@ void Manifold::Impl::GatherFaces(const Vec<int>& faceNew2Old) {
|
|||
const auto numTri = faceNew2Old.size();
|
||||
if (meshRelation_.triRef.size() == NumTri())
|
||||
Permute(meshRelation_.triRef, faceNew2Old);
|
||||
if (meshRelation_.triProperties.size() == NumTri())
|
||||
Permute(meshRelation_.triProperties, faceNew2Old);
|
||||
if (faceNormal_.size() == NumTri()) Permute(faceNormal_, faceNew2Old);
|
||||
|
||||
Vec<Halfedge> oldHalfedge(std::move(halfedge_));
|
||||
|
@ -446,8 +433,9 @@ void Manifold::Impl::GatherFaces(const Vec<int>& faceNew2Old) {
|
|||
scatter(countAt(0_uz), countAt(numTri), faceNew2Old.begin(),
|
||||
faceOld2New.begin());
|
||||
|
||||
halfedge_.resize(3 * numTri);
|
||||
if (oldHalfedgeTangent.size() != 0) halfedgeTangent_.resize(3 * numTri);
|
||||
halfedge_.resize_nofill(3 * numTri);
|
||||
if (oldHalfedgeTangent.size() != 0)
|
||||
halfedgeTangent_.resize_nofill(3 * numTri);
|
||||
for_each_n(policy, countAt(0), numTri,
|
||||
ReindexFace({halfedge_, halfedgeTangent_, oldHalfedge,
|
||||
oldHalfedgeTangent, faceNew2Old, faceOld2New}));
|
||||
|
@ -457,7 +445,7 @@ void Manifold::Impl::GatherFaces(const Impl& old, const Vec<int>& faceNew2Old) {
|
|||
ZoneScoped;
|
||||
const auto numTri = faceNew2Old.size();
|
||||
|
||||
meshRelation_.triRef.resize(numTri);
|
||||
meshRelation_.triRef.resize_nofill(numTri);
|
||||
gather(faceNew2Old.begin(), faceNew2Old.end(),
|
||||
old.meshRelation_.triRef.begin(), meshRelation_.triRef.begin());
|
||||
|
||||
|
@ -465,17 +453,13 @@ void Manifold::Impl::GatherFaces(const Impl& old, const Vec<int>& faceNew2Old) {
|
|||
meshRelation_.meshIDtransform[pair.first] = pair.second;
|
||||
}
|
||||
|
||||
if (old.meshRelation_.triProperties.size() > 0) {
|
||||
meshRelation_.triProperties.resize(numTri);
|
||||
gather(faceNew2Old.begin(), faceNew2Old.end(),
|
||||
old.meshRelation_.triProperties.begin(),
|
||||
meshRelation_.triProperties.begin());
|
||||
meshRelation_.numProp = old.meshRelation_.numProp;
|
||||
meshRelation_.properties = old.meshRelation_.properties;
|
||||
if (old.NumProp() > 0) {
|
||||
numProp_ = old.numProp_;
|
||||
properties_ = old.properties_;
|
||||
}
|
||||
|
||||
if (old.faceNormal_.size() == old.NumTri()) {
|
||||
faceNormal_.resize(numTri);
|
||||
faceNormal_.resize_nofill(numTri);
|
||||
gather(faceNew2Old.begin(), faceNew2Old.end(), old.faceNormal_.begin(),
|
||||
faceNormal_.begin());
|
||||
}
|
||||
|
@ -484,8 +468,9 @@ void Manifold::Impl::GatherFaces(const Impl& old, const Vec<int>& faceNew2Old) {
|
|||
scatter(countAt(0_uz), countAt(numTri), faceNew2Old.begin(),
|
||||
faceOld2New.begin());
|
||||
|
||||
halfedge_.resize(3 * numTri);
|
||||
if (old.halfedgeTangent_.size() != 0) halfedgeTangent_.resize(3 * numTri);
|
||||
halfedge_.resize_nofill(3 * numTri);
|
||||
if (old.halfedgeTangent_.size() != 0)
|
||||
halfedgeTangent_.resize_nofill(3 * numTri);
|
||||
for_each_n(autoPolicy(numTri, 1e5), countAt(0), numTri,
|
||||
ReindexFace({halfedge_, halfedgeTangent_, old.halfedge_,
|
||||
old.halfedgeTangent_, faceNew2Old, faceOld2New}));
|
||||
|
|
146
thirdparty/manifold/src/subdivision.cpp
vendored
146
thirdparty/manifold/src/subdivision.cpp
vendored
|
@ -12,8 +12,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "./impl.h"
|
||||
#include "./parallel.h"
|
||||
#include "impl.h"
|
||||
#include "parallel.h"
|
||||
|
||||
template <>
|
||||
struct std::hash<manifold::ivec4> {
|
||||
|
@ -113,7 +113,7 @@ class Partition {
|
|||
}
|
||||
const int offset = interiorOffset - newVerts.size();
|
||||
size_t old = newVerts.size();
|
||||
newVerts.resize(vertBary.size());
|
||||
newVerts.resize_nofill(vertBary.size());
|
||||
std::iota(newVerts.begin() + old, newVerts.end(), old + offset);
|
||||
|
||||
const int numTri = triVert.size();
|
||||
|
@ -535,7 +535,7 @@ Vec<Barycentric> Manifold::Impl::Subdivide(
|
|||
auto Added = [&edgeAdded, &half2Edge, thisAdded, this](int hIdx) {
|
||||
int longest = 0;
|
||||
int total = 0;
|
||||
for (int j : {0, 1, 2}) {
|
||||
for (int _ : {0, 1, 2}) {
|
||||
const int added = edgeAdded[half2Edge[hIdx]];
|
||||
longest = la::max(longest, added);
|
||||
total += added;
|
||||
|
@ -586,7 +586,7 @@ Vec<Barycentric> Manifold::Impl::Subdivide(
|
|||
|
||||
std::vector<Partition> subTris(numTri);
|
||||
for_each_n(policy, countAt(0), numTri,
|
||||
[this, &subTris, &half2Edge, &edgeAdded, &faceHalfedges](int tri) {
|
||||
[&subTris, &half2Edge, &edgeAdded, &faceHalfedges](int tri) {
|
||||
const ivec4 halfedges = faceHalfedges[tri];
|
||||
ivec4 divisions(0);
|
||||
for (const int i : {0, 1, 2, 3}) {
|
||||
|
@ -684,18 +684,19 @@ Vec<Barycentric> Manifold::Impl::Subdivide(
|
|||
});
|
||||
vertPos_ = newVertPos;
|
||||
|
||||
faceNormal_.resize(0);
|
||||
faceNormal_.clear();
|
||||
|
||||
if (meshRelation_.numProp > 0) {
|
||||
if (numProp_ > 0) {
|
||||
const int numPropVert = NumPropVert();
|
||||
const int addedVerts = NumVert() - numVert;
|
||||
const int propOffset = numPropVert - numVert;
|
||||
Vec<double> prop(meshRelation_.numProp *
|
||||
(numPropVert + addedVerts + totalEdgeAdded));
|
||||
// duplicate the prop verts along all new edges even though this is
|
||||
// unnecessary for edges that share the same prop verts. The duplicates will
|
||||
// be removed by CompactProps.
|
||||
Vec<double> prop(numProp_ * (numPropVert + addedVerts + totalEdgeAdded));
|
||||
|
||||
// copy retained prop verts
|
||||
copy(meshRelation_.properties.begin(), meshRelation_.properties.end(),
|
||||
prop.begin());
|
||||
copy(properties_.begin(), properties_.end(), prop.begin());
|
||||
|
||||
// copy interior prop verts and forward edge prop verts
|
||||
for_each_n(
|
||||
|
@ -705,104 +706,99 @@ Vec<Barycentric> Manifold::Impl::Subdivide(
|
|||
const int vert = numPropVert + i;
|
||||
const Barycentric bary = vertBary[numVert + i];
|
||||
const ivec4 halfedges = faceHalfedges[bary.tri];
|
||||
auto& rel = meshRelation_;
|
||||
const int numProp = NumProp();
|
||||
|
||||
for (int p = 0; p < rel.numProp; ++p) {
|
||||
for (int p = 0; p < numProp; ++p) {
|
||||
if (halfedges[3] < 0) {
|
||||
vec3 triProp;
|
||||
for (const int i : {0, 1, 2}) {
|
||||
triProp[i] = rel.properties[rel.triProperties[bary.tri][i] *
|
||||
rel.numProp +
|
||||
p];
|
||||
triProp[i] =
|
||||
properties_[halfedge_[3 * bary.tri + i].propVert * numProp +
|
||||
p];
|
||||
}
|
||||
prop[vert * rel.numProp + p] = la::dot(triProp, vec3(bary.uvw));
|
||||
prop[vert * numProp + p] = la::dot(triProp, vec3(bary.uvw));
|
||||
} else {
|
||||
vec4 quadProp;
|
||||
for (const int i : {0, 1, 2, 3}) {
|
||||
const int tri = halfedges[i] / 3;
|
||||
const int j = halfedges[i] % 3;
|
||||
quadProp[i] =
|
||||
rel.properties[rel.triProperties[tri][j] * rel.numProp + p];
|
||||
properties_[halfedge_[halfedges[i]].propVert * numProp + p];
|
||||
}
|
||||
prop[vert * rel.numProp + p] = la::dot(quadProp, bary.uvw);
|
||||
prop[vert * numProp + p] = la::dot(quadProp, bary.uvw);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// copy backward edge prop verts
|
||||
// copy backward edge prop verts, some of which will be unreferenced
|
||||
// duplicates.
|
||||
for_each_n(policy, countAt(0), numEdge,
|
||||
[this, &prop, &edges, &edgeAdded, &edgeOffset, propOffset,
|
||||
addedVerts](const int i) {
|
||||
const int n = edgeAdded[i];
|
||||
const int offset = edgeOffset[i] + propOffset + addedVerts;
|
||||
auto& rel = meshRelation_;
|
||||
const int numProp = NumProp();
|
||||
|
||||
const double frac = 1.0 / (n + 1);
|
||||
const int halfedgeIdx =
|
||||
halfedge_[edges[i].halfedgeIdx].pairedHalfedge;
|
||||
const int v0 = halfedgeIdx % 3;
|
||||
const int tri = halfedgeIdx / 3;
|
||||
const int prop0 = rel.triProperties[tri][v0];
|
||||
const int prop1 = rel.triProperties[tri][Next3(v0)];
|
||||
const int prop0 = halfedge_[halfedgeIdx].propVert;
|
||||
const int prop1 =
|
||||
halfedge_[NextHalfedge(halfedgeIdx)].propVert;
|
||||
for (int i = 0; i < n; ++i) {
|
||||
for (int p = 0; p < rel.numProp; ++p) {
|
||||
prop[(offset + i) * rel.numProp + p] =
|
||||
la::lerp(rel.properties[prop0 * rel.numProp + p],
|
||||
rel.properties[prop1 * rel.numProp + p],
|
||||
(i + 1) * frac);
|
||||
for (int p = 0; p < numProp; ++p) {
|
||||
prop[(offset + i) * numProp + p] = la::lerp(
|
||||
properties_[prop0 * numProp + p],
|
||||
properties_[prop1 * numProp + p], (i + 1) * frac);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Vec<ivec3> triProp(triVerts.size());
|
||||
for_each_n(policy, countAt(0), numTri,
|
||||
[this, &triProp, &subTris, &edgeOffset, &half2Edge, &triOffset,
|
||||
&interiorOffset, &faceHalfedges, propOffset,
|
||||
addedVerts](const int tri) {
|
||||
const ivec4 halfedges = faceHalfedges[tri];
|
||||
if (halfedges[0] < 0) return;
|
||||
for_each_n(
|
||||
policy, countAt(0), numTri,
|
||||
[this, &triProp, &subTris, &edgeOffset, &half2Edge, &triOffset,
|
||||
&interiorOffset, &faceHalfedges, propOffset,
|
||||
addedVerts](const int tri) {
|
||||
const ivec4 halfedges = faceHalfedges[tri];
|
||||
if (halfedges[0] < 0) return;
|
||||
|
||||
auto& rel = meshRelation_;
|
||||
ivec4 tri3;
|
||||
ivec4 edgeOffsets;
|
||||
bvec4 edgeFwd(true);
|
||||
for (const int i : {0, 1, 2, 3}) {
|
||||
if (halfedges[i] < 0) {
|
||||
tri3[i] = -1;
|
||||
continue;
|
||||
}
|
||||
const int thisTri = halfedges[i] / 3;
|
||||
const int j = halfedges[i] % 3;
|
||||
const Halfedge& halfedge = halfedge_[halfedges[i]];
|
||||
tri3[i] = rel.triProperties[thisTri][j];
|
||||
edgeOffsets[i] = edgeOffset[half2Edge[halfedges[i]]];
|
||||
if (!halfedge.IsForward()) {
|
||||
const int pairTri = halfedge.pairedHalfedge / 3;
|
||||
const int k = halfedge.pairedHalfedge % 3;
|
||||
if (rel.triProperties[pairTri][k] !=
|
||||
rel.triProperties[thisTri][Next3(j)] ||
|
||||
rel.triProperties[pairTri][Next3(k)] !=
|
||||
rel.triProperties[thisTri][j]) {
|
||||
edgeOffsets[i] += addedVerts;
|
||||
} else {
|
||||
edgeFwd[i] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
ivec4 tri3;
|
||||
ivec4 edgeOffsets;
|
||||
bvec4 edgeFwd(true);
|
||||
for (const int i : {0, 1, 2, 3}) {
|
||||
if (halfedges[i] < 0) {
|
||||
tri3[i] = -1;
|
||||
continue;
|
||||
}
|
||||
const Halfedge& halfedge = halfedge_[halfedges[i]];
|
||||
tri3[i] = halfedge.propVert;
|
||||
edgeOffsets[i] = edgeOffset[half2Edge[halfedges[i]]];
|
||||
if (!halfedge.IsForward()) {
|
||||
if (halfedge_[halfedge.pairedHalfedge].propVert !=
|
||||
halfedge_[NextHalfedge(halfedges[i])].propVert ||
|
||||
halfedge_[NextHalfedge(halfedge.pairedHalfedge)].propVert !=
|
||||
halfedge.propVert) {
|
||||
// if the edge doesn't match, point to the backward edge
|
||||
// propverts.
|
||||
edgeOffsets[i] += addedVerts;
|
||||
} else {
|
||||
edgeFwd[i] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Vec<ivec3> newTris = subTris[tri].Reindex(
|
||||
tri3, edgeOffsets + propOffset, edgeFwd,
|
||||
interiorOffset[tri] + propOffset);
|
||||
copy(newTris.begin(), newTris.end(),
|
||||
triProp.begin() + triOffset[tri]);
|
||||
});
|
||||
Vec<ivec3> newTris =
|
||||
subTris[tri].Reindex(tri3, edgeOffsets + propOffset, edgeFwd,
|
||||
interiorOffset[tri] + propOffset);
|
||||
copy(newTris.begin(), newTris.end(),
|
||||
triProp.begin() + triOffset[tri]);
|
||||
});
|
||||
|
||||
meshRelation_.properties = prop;
|
||||
meshRelation_.triProperties = triProp;
|
||||
properties_ = prop;
|
||||
CreateHalfedges(triProp, triVerts);
|
||||
} else {
|
||||
CreateHalfedges(triVerts);
|
||||
}
|
||||
|
||||
CreateHalfedges(triVerts);
|
||||
|
||||
return vertBary;
|
||||
}
|
||||
|
||||
|
|
110
thirdparty/manifold/src/svd.h
vendored
110
thirdparty/manifold/src/svd.h
vendored
|
@ -82,7 +82,7 @@ struct QR {
|
|||
mat3 Q, R;
|
||||
};
|
||||
// Calculates the squared norm of the vector.
|
||||
inline double Dist2(vec3 v) { return la::dot(v, v); }
|
||||
inline double Dist2(vec3 v) { return manifold::la::dot(v, v); }
|
||||
// For an explanation of the math see
|
||||
// http://pages.cs.wisc.edu/~sifakis/papers/SVD_TR1690.pdf Computing the
|
||||
// Singular Value Decomposition of 3 x 3 matrices with minimal branching and
|
||||
|
@ -101,29 +101,26 @@ inline Givens ApproximateGivensQuaternion(Symmetric3x3& A) {
|
|||
inline void JacobiConjugation(const int32_t x, const int32_t y, const int32_t z,
|
||||
Symmetric3x3& S, vec4& q) {
|
||||
auto g = ApproximateGivensQuaternion(S);
|
||||
double scale = 1.0 / fma(g.ch, g.ch, g.sh * g.sh);
|
||||
double a = fma(g.ch, g.ch, -g.sh * g.sh) * scale;
|
||||
double scale = 1.0 / (g.ch * g.ch + g.sh * g.sh);
|
||||
double a = (g.ch * g.ch - g.sh * g.sh) * scale;
|
||||
double b = 2.0 * g.sh * g.ch * scale;
|
||||
Symmetric3x3 _S = S;
|
||||
// perform conjugation S = Q'*S*Q
|
||||
S.m_00 =
|
||||
fma(a, fma(a, _S.m_00, b * _S.m_10), b * (fma(a, _S.m_10, b * _S.m_11)));
|
||||
S.m_10 = fma(a, fma(-b, _S.m_00, a * _S.m_10),
|
||||
b * (fma(-b, _S.m_10, a * _S.m_11)));
|
||||
S.m_11 = fma(-b, fma(-b, _S.m_00, a * _S.m_10),
|
||||
a * (fma(-b, _S.m_10, a * _S.m_11)));
|
||||
S.m_20 = fma(a, _S.m_20, b * _S.m_21);
|
||||
S.m_21 = fma(-b, _S.m_20, a * _S.m_21);
|
||||
S.m_00 = a * (a * _S.m_00 + b * _S.m_10) + b * (a * _S.m_10 + b * _S.m_11);
|
||||
S.m_10 = a * (-b * _S.m_00 + a * _S.m_10) + b * (-b * _S.m_10 + a * _S.m_11);
|
||||
S.m_11 = -b * (-b * _S.m_00 + a * _S.m_10) + a * (-b * _S.m_10 + a * _S.m_11);
|
||||
S.m_20 = a * _S.m_20 + b * _S.m_21;
|
||||
S.m_21 = -b * _S.m_20 + a * _S.m_21;
|
||||
S.m_22 = _S.m_22;
|
||||
// update cumulative rotation qV
|
||||
vec3 tmp = g.sh * vec3(q);
|
||||
g.sh *= q[3];
|
||||
// (x,y,z) corresponds to ((0,1,2),(1,2,0),(2,0,1)) for (p,q) =
|
||||
// ((0,1),(1,2),(0,2))
|
||||
q[z] = fma(q[z], g.ch, g.sh);
|
||||
q[3] = fma(q[3], g.ch, -tmp[z]); // w
|
||||
q[x] = fma(q[x], g.ch, tmp[y]);
|
||||
q[y] = fma(q[y], g.ch, -tmp[x]);
|
||||
q[z] = q[z] * g.ch + g.sh;
|
||||
q[3] = q[3] * g.ch + -tmp[z]; // w
|
||||
q[x] = q[x] * g.ch + tmp[y];
|
||||
q[y] = q[y] * g.ch + -tmp[x];
|
||||
// re-arrange matrix for next iteration
|
||||
_S.m_00 = S.m_11;
|
||||
_S.m_10 = S.m_21;
|
||||
|
@ -148,15 +145,15 @@ inline mat3 JacobiEigenAnalysis(Symmetric3x3 S) {
|
|||
JacobiConjugation(1, 2, 0, S, q);
|
||||
JacobiConjugation(2, 0, 1, S, q);
|
||||
}
|
||||
return mat3({1.0 - 2.0 * (fma(q.y, q.y, q.z * q.z)), //
|
||||
2.0 * fma(q.x, q.y, +q.w * q.z), //
|
||||
2.0 * fma(q.x, q.z, -q.w * q.y)}, //
|
||||
{2 * fma(q.x, q.y, -q.w * q.z), //
|
||||
1 - 2 * fma(q.x, q.x, q.z * q.z), //
|
||||
2 * fma(q.y, q.z, q.w * q.x)}, //
|
||||
{2 * fma(q.x, q.z, q.w * q.y), //
|
||||
2 * fma(q.y, q.z, -q.w * q.x), //
|
||||
1 - 2 * fma(q.x, q.x, q.y * q.y)});
|
||||
return mat3({1.0 - 2.0 * (q.y * q.y + q.z * q.z), //
|
||||
2.0 * (q.x * q.y + +q.w * q.z), //
|
||||
2.0 * (q.x * q.z + -q.w * q.y)}, //
|
||||
{2 * (q.x * q.y + -q.w * q.z), //
|
||||
1 - 2 * (q.x * q.x + q.z * q.z), //
|
||||
2 * (q.y * q.z + q.w * q.x)}, //
|
||||
{2 * (q.x * q.z + q.w * q.y), //
|
||||
2 * (q.y * q.z + -q.w * q.x), //
|
||||
1 - 2 * (q.x * q.x + q.y * q.y)});
|
||||
}
|
||||
// Implementation of Algorithm 3
|
||||
inline void SortSingularValues(mat3& B, mat3& V) {
|
||||
|
@ -207,65 +204,64 @@ inline QR QRDecomposition(mat3& B) {
|
|||
mat3 Q, R;
|
||||
// first Givens rotation (ch,0,0,sh)
|
||||
auto g1 = QRGivensQuaternion(B[0][0], B[0][1]);
|
||||
auto a = fma(-2.0, g1.sh * g1.sh, 1.0);
|
||||
auto a = -2.0 * g1.sh * g1.sh + 1.0;
|
||||
auto b = 2.0 * g1.ch * g1.sh;
|
||||
// apply B = Q' * B
|
||||
R[0][0] = fma(a, B[0][0], b * B[0][1]);
|
||||
R[1][0] = fma(a, B[1][0], b * B[1][1]);
|
||||
R[2][0] = fma(a, B[2][0], b * B[2][1]);
|
||||
R[0][1] = fma(-b, B[0][0], a * B[0][1]);
|
||||
R[1][1] = fma(-b, B[1][0], a * B[1][1]);
|
||||
R[2][1] = fma(-b, B[2][0], a * B[2][1]);
|
||||
R[0][0] = a * B[0][0] + b * B[0][1];
|
||||
R[1][0] = a * B[1][0] + b * B[1][1];
|
||||
R[2][0] = a * B[2][0] + b * B[2][1];
|
||||
R[0][1] = -b * B[0][0] + a * B[0][1];
|
||||
R[1][1] = -b * B[1][0] + a * B[1][1];
|
||||
R[2][1] = -b * B[2][0] + a * B[2][1];
|
||||
R[0][2] = B[0][2];
|
||||
R[1][2] = B[1][2];
|
||||
R[2][2] = B[2][2];
|
||||
// second Givens rotation (ch,0,-sh,0)
|
||||
auto g2 = QRGivensQuaternion(R[0][0], R[0][2]);
|
||||
a = fma(-2.0, g2.sh * g2.sh, 1.0);
|
||||
a = -2.0 * g2.sh * g2.sh + 1.0;
|
||||
b = 2.0 * g2.ch * g2.sh;
|
||||
// apply B = Q' * B;
|
||||
B[0][0] = fma(a, R[0][0], b * R[0][2]);
|
||||
B[1][0] = fma(a, R[1][0], b * R[1][2]);
|
||||
B[2][0] = fma(a, R[2][0], b * R[2][2]);
|
||||
B[0][0] = a * R[0][0] + b * R[0][2];
|
||||
B[1][0] = a * R[1][0] + b * R[1][2];
|
||||
B[2][0] = a * R[2][0] + b * R[2][2];
|
||||
B[0][1] = R[0][1];
|
||||
B[1][1] = R[1][1];
|
||||
B[2][1] = R[2][1];
|
||||
B[0][2] = fma(-b, R[0][0], a * R[0][2]);
|
||||
B[1][2] = fma(-b, R[1][0], a * R[1][2]);
|
||||
B[2][2] = fma(-b, R[2][0], a * R[2][2]);
|
||||
B[0][2] = -b * R[0][0] + a * R[0][2];
|
||||
B[1][2] = -b * R[1][0] + a * R[1][2];
|
||||
B[2][2] = -b * R[2][0] + a * R[2][2];
|
||||
// third Givens rotation (ch,sh,0,0)
|
||||
auto g3 = QRGivensQuaternion(B[1][1], B[1][2]);
|
||||
a = fma(-2.0, g3.sh * g3.sh, 1.0);
|
||||
a = -2.0 * g3.sh * g3.sh + 1.0;
|
||||
b = 2.0 * g3.ch * g3.sh;
|
||||
// R is now set to desired value
|
||||
R[0][0] = B[0][0];
|
||||
R[1][0] = B[1][0];
|
||||
R[2][0] = B[2][0];
|
||||
R[0][1] = fma(a, B[0][1], b * B[0][2]);
|
||||
R[1][1] = fma(a, B[1][1], b * B[1][2]);
|
||||
R[2][1] = fma(a, B[2][1], b * B[2][2]);
|
||||
R[0][2] = fma(-b, B[0][1], a * B[0][2]);
|
||||
R[1][2] = fma(-b, B[1][1], a * B[1][2]);
|
||||
R[2][2] = fma(-b, B[2][1], a * B[2][2]);
|
||||
R[0][1] = a * B[0][1] + b * B[0][2];
|
||||
R[1][1] = a * B[1][1] + b * B[1][2];
|
||||
R[2][1] = a * B[2][1] + b * B[2][2];
|
||||
R[0][2] = -b * B[0][1] + a * B[0][2];
|
||||
R[1][2] = -b * B[1][1] + a * B[1][2];
|
||||
R[2][2] = -b * B[2][1] + a * B[2][2];
|
||||
// construct the cumulative rotation Q=Q1 * Q2 * Q3
|
||||
// the number of floating point operations for three quaternion
|
||||
// multiplications is more or less comparable to the explicit form of the
|
||||
// joined matrix. certainly more memory-efficient!
|
||||
auto sh12 = 2.0 * fma(g1.sh, g1.sh, -0.5);
|
||||
auto sh22 = 2.0 * fma(g2.sh, g2.sh, -0.5);
|
||||
auto sh32 = 2.0 * fma(g3.sh, g3.sh, -0.5);
|
||||
auto sh12 = 2.0 * (g1.sh * g1.sh + -0.5);
|
||||
auto sh22 = 2.0 * (g2.sh * g2.sh + -0.5);
|
||||
auto sh32 = 2.0 * (g3.sh * g3.sh + -0.5);
|
||||
Q[0][0] = sh12 * sh22;
|
||||
Q[1][0] = fma(4.0 * g2.ch * g3.ch, sh12 * g2.sh * g3.sh,
|
||||
2.0 * g1.ch * g1.sh * sh32);
|
||||
Q[2][0] = fma(4.0 * g1.ch * g3.ch, g1.sh * g3.sh,
|
||||
-2.0 * g2.ch * sh12 * g2.sh * sh32);
|
||||
Q[1][0] =
|
||||
4.0 * g2.ch * g3.ch * sh12 * g2.sh * g3.sh + 2.0 * g1.ch * g1.sh * sh32;
|
||||
Q[2][0] =
|
||||
4.0 * g1.ch * g3.ch * g1.sh * g3.sh + -2.0 * g2.ch * sh12 * g2.sh * sh32;
|
||||
|
||||
Q[0][1] = -2.0 * g1.ch * g1.sh * sh22;
|
||||
Q[1][1] =
|
||||
fma(-8.0 * g1.ch * g2.ch * g3.ch, g1.sh * g2.sh * g3.sh, sh12 * sh32);
|
||||
Q[2][1] = fma(
|
||||
-2.0 * g3.ch, g3.sh,
|
||||
4.0 * g1.sh * fma(g3.ch * g1.sh, g3.sh, g1.ch * g2.ch * g2.sh * sh32));
|
||||
Q[1][1] = -8.0 * g1.ch * g2.ch * g3.ch * g1.sh * g2.sh * g3.sh + sh12 * sh32;
|
||||
Q[2][1] =
|
||||
-2.0 * g3.ch * g3.sh +
|
||||
4.0 * g1.sh * (g3.ch * g1.sh * g3.sh + g1.ch * g2.ch * g2.sh * sh32);
|
||||
|
||||
Q[0][2] = 2.0 * g2.ch * g2.sh;
|
||||
Q[1][2] = -2.0 * g3.ch * sh22 * g3.sh;
|
||||
|
|
59
thirdparty/manifold/src/tree2d.cpp
vendored
Normal file
59
thirdparty/manifold/src/tree2d.cpp
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2025 The Manifold Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "tree2d.h"
|
||||
|
||||
#include "parallel.h"
|
||||
|
||||
#ifndef ZoneScoped
|
||||
#if __has_include(<tracy/Tracy.hpp>)
|
||||
#include <tracy/Tracy.hpp>
|
||||
#else
|
||||
#define FrameMarkStart(x)
|
||||
#define FrameMarkEnd(x)
|
||||
// putting ZoneScoped in a function will instrument the function execution when
|
||||
// TRACY_ENABLE is set, which allows the profiler to record more accurate
|
||||
// timing.
|
||||
#define ZoneScoped
|
||||
#define ZoneScopedN(name)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace manifold {
|
||||
|
||||
// Not really a proper KD-tree, but a kd tree with k = 2 and alternating x/y
|
||||
// partition.
|
||||
// Recursive sorting is not the most efficient, but simple and guaranteed to
|
||||
// result in a balanced tree.
|
||||
void BuildTwoDTreeImpl(VecView<PolyVert> points, bool sortX) {
|
||||
using CmpFn = std::function<bool(const PolyVert&, const PolyVert&)>;
|
||||
CmpFn cmpx = [](const PolyVert& a, const PolyVert& b) {
|
||||
return a.pos.x < b.pos.x;
|
||||
};
|
||||
CmpFn cmpy = [](const PolyVert& a, const PolyVert& b) {
|
||||
return a.pos.y < b.pos.y;
|
||||
};
|
||||
manifold::stable_sort(points.begin(), points.end(), sortX ? cmpx : cmpy);
|
||||
if (points.size() < 2) return;
|
||||
BuildTwoDTreeImpl(points.view(0, points.size() / 2), !sortX);
|
||||
BuildTwoDTreeImpl(points.view(points.size() / 2 + 1), !sortX);
|
||||
}
|
||||
|
||||
void BuildTwoDTree(VecView<PolyVert> points) {
|
||||
ZoneScoped;
|
||||
// don't even bother...
|
||||
if (points.size() <= 8) return;
|
||||
BuildTwoDTreeImpl(points, true);
|
||||
}
|
||||
} // namespace manifold
|
85
thirdparty/manifold/src/tree2d.h
vendored
Normal file
85
thirdparty/manifold/src/tree2d.h
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2025 The Manifold Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "manifold/common.h"
|
||||
#include "manifold/optional_assert.h"
|
||||
#include "manifold/polygon.h"
|
||||
#include "manifold/vec_view.h"
|
||||
|
||||
namespace manifold {
|
||||
|
||||
void BuildTwoDTreeImpl(VecView<PolyVert> points, bool sortX);
|
||||
|
||||
void BuildTwoDTree(VecView<PolyVert> points);
|
||||
|
||||
template <typename F>
|
||||
void QueryTwoDTree(VecView<PolyVert> points, Rect r, F f) {
|
||||
if (points.size() <= 8) {
|
||||
for (const auto& p : points)
|
||||
if (r.Contains(p.pos)) f(p);
|
||||
return;
|
||||
}
|
||||
Rect current;
|
||||
current.min = vec2(-std::numeric_limits<double>::infinity());
|
||||
current.max = vec2(std::numeric_limits<double>::infinity());
|
||||
|
||||
int level = 0;
|
||||
VecView<PolyVert> currentView = points;
|
||||
std::array<Rect, 64> rectStack;
|
||||
std::array<VecView<PolyVert>, 64> viewStack;
|
||||
std::array<int, 64> levelStack;
|
||||
int stackPointer = 0;
|
||||
|
||||
while (1) {
|
||||
if (currentView.size() <= 2) {
|
||||
for (const auto& p : currentView)
|
||||
if (r.Contains(p.pos)) f(p);
|
||||
if (--stackPointer < 0) break;
|
||||
level = levelStack[stackPointer];
|
||||
currentView = viewStack[stackPointer];
|
||||
current = rectStack[stackPointer];
|
||||
continue;
|
||||
}
|
||||
|
||||
// these are conceptual left/right trees
|
||||
Rect left = current;
|
||||
Rect right = current;
|
||||
const PolyVert middle = currentView[currentView.size() / 2];
|
||||
if (level % 2 == 0)
|
||||
left.max.x = right.min.x = middle.pos.x;
|
||||
else
|
||||
left.max.y = right.min.y = middle.pos.y;
|
||||
|
||||
if (r.Contains(middle.pos)) f(middle);
|
||||
if (left.DoesOverlap(r)) {
|
||||
if (right.DoesOverlap(r)) {
|
||||
DEBUG_ASSERT(stackPointer < 64, logicErr, "Stack overflow");
|
||||
rectStack[stackPointer] = right;
|
||||
viewStack[stackPointer] = currentView.view(currentView.size() / 2 + 1);
|
||||
levelStack[stackPointer] = level + 1;
|
||||
stackPointer++;
|
||||
}
|
||||
current = left;
|
||||
currentView = currentView.view(0, currentView.size() / 2);
|
||||
level++;
|
||||
} else {
|
||||
current = right;
|
||||
currentView = currentView.view(currentView.size() / 2 + 1);
|
||||
level++;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace manifold
|
1
thirdparty/manifold/src/tri_dist.h
vendored
1
thirdparty/manifold/src/tri_dist.h
vendored
|
@ -15,6 +15,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
|
||||
#include "manifold/common.h"
|
||||
|
||||
|
|
21
thirdparty/manifold/src/utils.h
vendored
21
thirdparty/manifold/src/utils.h
vendored
|
@ -19,8 +19,8 @@
|
|||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "./vec.h"
|
||||
#include "manifold/common.h"
|
||||
#include "vec.h"
|
||||
|
||||
#ifndef MANIFOLD_PAR
|
||||
#error "MANIFOLD_PAR must be defined to either 1 (parallel) or -1 (series)"
|
||||
|
@ -33,7 +33,7 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#include "./parallel.h"
|
||||
#include "parallel.h"
|
||||
|
||||
#if __has_include(<tracy/Tracy.hpp>)
|
||||
#include <tracy/Tracy.hpp>
|
||||
|
@ -72,7 +72,7 @@ inline int Prev3(int i) {
|
|||
template <typename T, typename T1>
|
||||
void Permute(Vec<T>& inOut, const Vec<T1>& new2Old) {
|
||||
Vec<T> tmp(std::move(inOut));
|
||||
inOut.resize(new2Old.size());
|
||||
inOut.resize_nofill(new2Old.size());
|
||||
gather(new2Old.begin(), new2Old.end(), tmp.begin(), inOut.begin());
|
||||
}
|
||||
|
||||
|
@ -106,6 +106,12 @@ class ConcurrentSharedPtr {
|
|||
ConcurrentSharedPtr(T value) : impl(std::make_shared<T>(value)) {}
|
||||
ConcurrentSharedPtr(const ConcurrentSharedPtr<T>& other)
|
||||
: impl(other.impl), mutex(other.mutex) {}
|
||||
ConcurrentSharedPtr& operator=(const ConcurrentSharedPtr<T>& other) {
|
||||
if (this == &other) return *this;
|
||||
impl = other.impl;
|
||||
mutex = other.mutex;
|
||||
return *this;
|
||||
}
|
||||
class SharedPtrGuard {
|
||||
public:
|
||||
SharedPtrGuard(std::recursive_mutex* mutex, T* content)
|
||||
|
@ -211,7 +217,7 @@ struct Negate {
|
|||
inline int CCW(vec2 p0, vec2 p1, vec2 p2, double tol) {
|
||||
vec2 v1 = p1 - p0;
|
||||
vec2 v2 = p2 - p0;
|
||||
double area = fma(v1.x, v2.y, -v1.y * v2.x);
|
||||
double area = v1.x * v2.y - v1.y * v2.x;
|
||||
double base2 = la::max(la::dot(v1, v1), la::dot(v2, v2));
|
||||
if (area * area * 4 <= base2 * tol * tol)
|
||||
return 0;
|
||||
|
@ -224,4 +230,11 @@ inline mat4 Mat4(mat3x4 a) {
|
|||
}
|
||||
inline mat3 Mat3(mat2x3 a) { return mat3({a[0], 0}, {a[1], 0}, {a[2], 1}); }
|
||||
|
||||
// https://stackoverflow.com/questions/664014/what-integer-hash-function-are-good-that-accepts-an-integer-hash-key
|
||||
constexpr uint64_t hash64bit(uint64_t x) {
|
||||
x = (x ^ (x >> 30)) * 0xbf58476d1ce4e5b9ull;
|
||||
x = (x ^ (x >> 27)) * 0x94d049bb133111ebull;
|
||||
x = x ^ (x >> 31);
|
||||
return x;
|
||||
}
|
||||
} // namespace manifold
|
||||
|
|
58
thirdparty/manifold/src/vec.h
vendored
58
thirdparty/manifold/src/vec.h
vendored
|
@ -21,8 +21,8 @@
|
|||
#endif
|
||||
#include <vector>
|
||||
|
||||
#include "./parallel.h"
|
||||
#include "manifold/vec_view.h"
|
||||
#include "parallel.h"
|
||||
|
||||
namespace manifold {
|
||||
|
||||
|
@ -45,40 +45,40 @@ class Vec : public VecView<T> {
|
|||
// Note that the vector constructed with this constructor will contain
|
||||
// uninitialized memory. Please specify `val` if you need to make sure that
|
||||
// the data is initialized.
|
||||
Vec(size_t size) {
|
||||
Vec(size_t size) : VecView<T>() {
|
||||
reserve(size);
|
||||
this->size_ = size;
|
||||
}
|
||||
|
||||
Vec(size_t size, T val) { resize(size, val); }
|
||||
Vec(size_t size, T val) : VecView<T>() { resize(size, val); }
|
||||
|
||||
Vec(const Vec<T> &vec) { *this = Vec(vec.view()); }
|
||||
Vec(const Vec<T>& vec) : VecView<T>() { *this = Vec(vec.view()); }
|
||||
|
||||
Vec(const VecView<const T> &vec) {
|
||||
Vec(const VecView<const T>& vec) : VecView<T>() {
|
||||
this->size_ = vec.size();
|
||||
this->capacity_ = this->size_;
|
||||
auto policy = autoPolicy(this->size_);
|
||||
if (this->size_ != 0) {
|
||||
this->ptr_ = reinterpret_cast<T *>(malloc(this->size_ * sizeof(T)));
|
||||
this->ptr_ = reinterpret_cast<T*>(malloc(this->size_ * sizeof(T)));
|
||||
ASSERT(this->ptr_ != nullptr, std::bad_alloc());
|
||||
TracyAllocS(this->ptr_, this->size_ * sizeof(T), 3);
|
||||
copy(policy, vec.begin(), vec.end(), this->ptr_);
|
||||
}
|
||||
}
|
||||
|
||||
Vec(const std::vector<T> &vec) {
|
||||
Vec(const std::vector<T>& vec) : VecView<T>() {
|
||||
this->size_ = vec.size();
|
||||
this->capacity_ = this->size_;
|
||||
auto policy = autoPolicy(this->size_);
|
||||
if (this->size_ != 0) {
|
||||
this->ptr_ = reinterpret_cast<T *>(malloc(this->size_ * sizeof(T)));
|
||||
this->ptr_ = reinterpret_cast<T*>(malloc(this->size_ * sizeof(T)));
|
||||
ASSERT(this->ptr_ != nullptr, std::bad_alloc());
|
||||
TracyAllocS(this->ptr_, this->size_ * sizeof(T), 3);
|
||||
copy(policy, vec.begin(), vec.end(), this->ptr_);
|
||||
}
|
||||
}
|
||||
|
||||
Vec(Vec<T> &&vec) {
|
||||
Vec(Vec<T>&& vec) : VecView<T>() {
|
||||
this->ptr_ = vec.ptr_;
|
||||
this->size_ = vec.size_;
|
||||
capacity_ = vec.capacity_;
|
||||
|
@ -100,7 +100,7 @@ class Vec : public VecView<T> {
|
|||
capacity_ = 0;
|
||||
}
|
||||
|
||||
Vec<T> &operator=(const Vec<T> &other) {
|
||||
Vec<T>& operator=(const Vec<T>& other) {
|
||||
if (&other == this) return *this;
|
||||
if (this->ptr_ != nullptr) {
|
||||
TracyFreeS(this->ptr_, 3);
|
||||
|
@ -109,7 +109,7 @@ class Vec : public VecView<T> {
|
|||
this->size_ = other.size_;
|
||||
capacity_ = other.size_;
|
||||
if (this->size_ != 0) {
|
||||
this->ptr_ = reinterpret_cast<T *>(malloc(this->size_ * sizeof(T)));
|
||||
this->ptr_ = reinterpret_cast<T*>(malloc(this->size_ * sizeof(T)));
|
||||
ASSERT(this->ptr_ != nullptr, std::bad_alloc());
|
||||
TracyAllocS(this->ptr_, this->size_ * sizeof(T), 3);
|
||||
manifold::copy(other.begin(), other.end(), this->ptr_);
|
||||
|
@ -117,7 +117,7 @@ class Vec : public VecView<T> {
|
|||
return *this;
|
||||
}
|
||||
|
||||
Vec<T> &operator=(Vec<T> &&other) {
|
||||
Vec<T>& operator=(Vec<T>&& other) {
|
||||
if (&other == this) return *this;
|
||||
if (this->ptr_ != nullptr) {
|
||||
TracyFreeS(this->ptr_, 3);
|
||||
|
@ -134,38 +134,37 @@ class Vec : public VecView<T> {
|
|||
|
||||
operator VecView<T>() const { return {this->ptr_, this->size_}; }
|
||||
|
||||
void swap(Vec<T> &other) {
|
||||
void swap(Vec<T>& other) {
|
||||
std::swap(this->ptr_, other.ptr_);
|
||||
std::swap(this->size_, other.size_);
|
||||
std::swap(capacity_, other.capacity_);
|
||||
}
|
||||
|
||||
inline void push_back(const T &val, bool seq = false) {
|
||||
inline void push_back(const T& val) {
|
||||
if (this->size_ >= capacity_) {
|
||||
// avoid dangling pointer in case val is a reference of our array
|
||||
T val_copy = val;
|
||||
reserve(capacity_ == 0 ? 128 : capacity_ * 2, seq);
|
||||
reserve(capacity_ == 0 ? 128 : capacity_ * 2);
|
||||
this->ptr_[this->size_++] = val_copy;
|
||||
return;
|
||||
}
|
||||
this->ptr_[this->size_++] = val;
|
||||
}
|
||||
|
||||
inline void extend(size_t n, bool seq = false) {
|
||||
inline void extend(size_t n) {
|
||||
if (this->size_ + n >= capacity_)
|
||||
reserve(capacity_ == 0 ? 128 : std::max(capacity_ * 2, this->size_ + n),
|
||||
seq);
|
||||
reserve(capacity_ == 0 ? 128 : std::max(capacity_ * 2, this->size_ + n));
|
||||
this->size_ += n;
|
||||
}
|
||||
|
||||
void reserve(size_t n, bool seq = false) {
|
||||
void reserve(size_t n) {
|
||||
if (n > capacity_) {
|
||||
T *newBuffer = reinterpret_cast<T *>(malloc(n * sizeof(T)));
|
||||
T* newBuffer = reinterpret_cast<T*>(malloc(n * sizeof(T)));
|
||||
ASSERT(newBuffer != nullptr, std::bad_alloc());
|
||||
TracyAllocS(newBuffer, n * sizeof(T), 3);
|
||||
if (this->size_ > 0)
|
||||
manifold::copy(seq ? ExecutionPolicy::Seq : autoPolicy(this->size_),
|
||||
this->ptr_, this->ptr_ + this->size_, newBuffer);
|
||||
manifold::copy(autoPolicy(this->size_), this->ptr_,
|
||||
this->ptr_ + this->size_, newBuffer);
|
||||
if (this->ptr_ != nullptr) {
|
||||
TracyFreeS(this->ptr_, 3);
|
||||
free(this->ptr_);
|
||||
|
@ -176,7 +175,7 @@ class Vec : public VecView<T> {
|
|||
}
|
||||
|
||||
void resize(size_t newSize, T val = T()) {
|
||||
bool shrink = this->size_ > 2 * newSize;
|
||||
bool shrink = this->size_ > 2 * newSize && this->size_ > 16;
|
||||
reserve(newSize);
|
||||
if (this->size_ < newSize) {
|
||||
fill(autoPolicy(newSize - this->size_), this->ptr_ + this->size_,
|
||||
|
@ -186,7 +185,14 @@ class Vec : public VecView<T> {
|
|||
if (shrink) shrink_to_fit();
|
||||
}
|
||||
|
||||
void pop_back() { resize(this->size_ - 1); }
|
||||
void resize_nofill(size_t newSize) {
|
||||
bool shrink = this->size_ > 2 * newSize && this->size_ > 16;
|
||||
reserve(newSize);
|
||||
this->size_ = newSize;
|
||||
if (shrink) shrink_to_fit();
|
||||
}
|
||||
|
||||
void pop_back() { resize_nofill(this->size_ - 1); }
|
||||
|
||||
void clear(bool shrink = true) {
|
||||
this->size_ = 0;
|
||||
|
@ -194,9 +200,9 @@ class Vec : public VecView<T> {
|
|||
}
|
||||
|
||||
void shrink_to_fit() {
|
||||
T *newBuffer = nullptr;
|
||||
T* newBuffer = nullptr;
|
||||
if (this->size_ > 0) {
|
||||
newBuffer = reinterpret_cast<T *>(malloc(this->size_ * sizeof(T)));
|
||||
newBuffer = reinterpret_cast<T*>(malloc(this->size_ * sizeof(T)));
|
||||
ASSERT(newBuffer != nullptr, std::bad_alloc());
|
||||
TracyAllocS(newBuffer, this->size_ * sizeof(T), 3);
|
||||
manifold::copy(this->ptr_, this->ptr_ + this->size_, newBuffer);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue