diff --git a/CMakeLists.txt b/CMakeLists.txt index a5d88a9..4256ee6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -9,17 +9,24 @@ set(DIR_EXT "${CMAKE_CURRENT_SOURCE_DIR}/ext") # Set options if top level if (PROJECT_IS_TOP_LEVEL) - # Options for the project + # Options to generate python bindings + option(USE_PYBIND "Generate pybindings" ON) + + # Alternative library options option(USE_MULTIPRECISION "Use high precision floating point" OFF) - option(CHECK_VALIDITY "Check validity pre and post conditions" ON) + option(USE_EMBREE "Use Embree for rendering" OFF) + option(USE_SUITESPARSE "Use suite sparse methods for matrix inversion" ON) + option(USE_COMISO "Use Comiso for field generation" ON) + + # Visualization options option(ENABLE_VISUALIZATION "Generate viewers for visualization" ON) option(RENDER_TEXTURE "Render results" ON) - option(USE_SUITESPARSE "Use suite sparse methods for matrix inversion" ON) - option(USE_PYBIND "Generate pybindings" ON) + + # Validity check options option(BUILD_CURVATURE_METRIC_TESTS "Build tests" ON) - option(USE_EMBREE "Use Embree for rendering" OFF) + option(CHECK_VALIDITY "Check validity pre and post conditions" ON) - # Set libigl and suitesparse options + # Set default libigl and suitesparse options option(LIBIGL_PREDICATES "Use Predicates" ON) set ( SUITESPARSE_ENABLE_PROJECTS "suitesparse_config;cholmod;spqr" ) option ( SUITESPARSE_USE_CUDA OFF ) @@ -40,6 +47,25 @@ if(USE_MULTIPRECISION) link_directories(${MPFR_LIBRARIES_DIR}) endif() +# Optionally get frame field libraries +if (USE_COMISO) + add_compile_definitions(USE_COMISO) + option(LIBIGL_COPYLEFT_COMISO "Use COSIMO" ON) + set(COMISO_LIBS + igl_copyleft::comiso + ) +endif() + +# Set libigl options for rendering if enabled +if (RENDER_TEXTURE) + option(LIBIGL_OPENGL "Use OpenGL" ON) + option(LIBIGL_GLFW "Use GLFW" ON) + option(LIBIGL_PNG "Use PNG" ON) +endif() +if (USE_EMBREE) + option(LIBIGL_EMBREE "Use EMBREE" ON) +endif() + # Set compile definitions add_compile_definitions(_USE_MATH_DEFINES) if(USE_MULTIPRECISION) @@ -49,49 +75,56 @@ if(CHECK_VALIDITY) add_compile_definitions(CHECK_VALIDITY) endif() if (RENDER_TEXTURE) - option(LIBIGL_OPENGL "Use OpenGL" ON) - option(LIBIGL_GLFW "Use GLFW" ON) - option(LIBIGL_PNG "Use PNG" ON) add_compile_definitions(RENDER_TEXTURE) endif() if (USE_EMBREE) add_compile_definitions(USE_EMBREE) - option(LIBIGL_EMBREE "Use EMBREE" ON) endif() if(USE_PYBIND) add_compile_definitions(PYBIND) endif() - -# Set suitesparse compile definitions # WARNING: This compile definition publicly links suitesparse into the # conformal ideal delaunay library if(USE_SUITESPARSE) add_compile_definitions(USE_SUITESPARSE) + set(SUITESPARSE_LIBS + SuiteSparse::SuiteSparseConfig + SuiteSparse::SPQR + SuiteSparse::CHOLMOD + ) endif() # Get external libraries -include_directories("${CMAKE_CURRENT_SOURCE_DIR}/src/util") include(conformal_ideal_delaunay) include(libigl) include(spectra) include(cli11) +include(json) +include(geometry-central) -# Optionally create visualization library +# Optionally create rendering library if(RENDER_TEXTURE) + # Build core rendering library + # TODO: Move somewhere reasonable add_library(rendering - src/util/visualization.cc + src/optimization/util/visualization.cc ) + target_include_directories(rendering PUBLIC include/optimization/optimization/util) target_link_libraries(rendering PUBLIC igl::core igl::glfw igl::png plot ) -if(USE_EMBREE) - target_link_libraries(rendering PUBLIC - igl::embree - ) -endif() + + # Link in embree if enabled + if(USE_EMBREE) + target_link_libraries(rendering PUBLIC + igl::embree + ) + endif() + + # Change rendering libraries from null to singleton set(RENDER_LIBRARIES rendering ) @@ -106,50 +139,13 @@ if(ENABLE_VISUALIZATION) ) endif() -# Install executables to bin directory -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) - -# Make main cpp library -add_subdirectory(src) - -# Build executables -add_subdirectory(src/app) - -# Build pybind optimization functions +# Optionally build pybind if(USE_PYBIND) include(pybind11) - add_library(optimization_py MODULE - src/penner_optimization_pybind.cpp - ) - - # Link libraries - target_link_libraries(optimization_py PUBLIC - PennerOptimizationLib - pybind11::module - ${RENDER_LIBRARIES} - ) - - # Set pybinding settings - set_target_properties(optimization_py PROPERTIES LIBRARY_OUTPUT_DIRECTORY - ${PROJECT_SOURCE_DIR}/py - ) - set_target_properties(optimization_py PROPERTIES PREFIX - "${PYTHON_MODULE_PREFIX}" - ) endif() -# Optionally build tests (only valid for double precision) -if((BUILD_CURVATURE_METRIC_TESTS) AND (NOT USE_MULTIPRECISION)) - include(Catch2) +# Install executables to bin directory +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) - # Build testing executable - add_executable(CurvatureMetricTests - src/tests/tests.cpp - ) - target_link_libraries(CurvatureMetricTests PRIVATE - PennerOptimizationLib - Catch2::Catch2WithMain - ) - set(TEST_DATA_ROOT "${PROJECT_SOURCE_DIR}/src/tests/regression/") - target_compile_definitions(CurvatureMetricTests PUBLIC TEST_DATA_DIR=\"${TEST_DATA_ROOT}\") -endif() +# Make main cpp library +add_subdirectory(src) diff --git a/cmake/geometry-central.cmake b/cmake/geometry-central.cmake new file mode 100755 index 0000000..d1d5f67 --- /dev/null +++ b/cmake/geometry-central.cmake @@ -0,0 +1,11 @@ +if(TARGET geometry-central) + return() +endif() + +include(FetchContent) +FetchContent_Declare( + geometry-central + SYSTEM + GIT_REPOSITORY https://github.com/nmwsharp/geometry-central.git +) +FetchContent_MakeAvailable(geometry-central) diff --git a/cmake/json.cmake b/cmake/json.cmake new file mode 100644 index 0000000..c93ca3b --- /dev/null +++ b/cmake/json.cmake @@ -0,0 +1,8 @@ +include(FetchContent) +FetchContent_Declare( + json + SYSTEM + GIT_REPOSITORY https://github.com/nlohmann/json.git +) +FetchContent_MakeAvailable(json) + diff --git a/figures/fig-all.sh b/figures/fig-all.sh deleted file mode 100644 index 7fc5fb3..0000000 --- a/figures/fig-all.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/bash -SCRIPT=$(realpath "$0") -SCRIPTPATH=$(dirname "$SCRIPT") - -bash ${SCRIPTPATH}/fig-comparison.sh & -bash ${SCRIPTPATH}/fig-disk.sh & -bash ${SCRIPTPATH}/fig-examples.sh & -bash ${SCRIPTPATH}/fig-initial.sh & -bash ${SCRIPTPATH}/fig-interpolation.sh & -bash ${SCRIPTPATH}/fig-objectives.sh & -bash ${SCRIPTPATH}/fig-teaser.sh & diff --git a/figures/fig-comparison.sh b/figures/fig-comparison.sh deleted file mode 100644 index 5be2a3a..0000000 --- a/figures/fig-comparison.sh +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/bash -SCRIPT=$(realpath "$0") -SCRIPTPATH=$(dirname "$SCRIPT") - -bash ${SCRIPTPATH}/pipeline.sh comparison_fixed & -bash ${SCRIPTPATH}/pipeline.sh comparison_quadratic & diff --git a/figures/fig-disk.sh b/figures/fig-disk.sh deleted file mode 100644 index e043d0b..0000000 --- a/figures/fig-disk.sh +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/bash -SCRIPT=$(realpath "$0") -SCRIPTPATH=$(dirname "$SCRIPT") - -bash ${SCRIPTPATH}/pipeline.sh disk_quadratic & -bash ${SCRIPTPATH}/pipeline.sh disk_slim & \ No newline at end of file diff --git a/figures/fig-examples.sh b/figures/fig-examples.sh deleted file mode 100644 index 7ca61ef..0000000 --- a/figures/fig-examples.sh +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/bash -SCRIPT=$(realpath "$0") -SCRIPTPATH=$(dirname "$SCRIPT") - -bash ${SCRIPTPATH}/pipeline.sh examples_closed & -bash ${SCRIPTPATH}/pipeline.sh examples_open & -bash ${SCRIPTPATH}/pipeline.sh examples_cut & diff --git a/figures/fig-initial.sh b/figures/fig-initial.sh deleted file mode 100644 index b91d7a2..0000000 --- a/figures/fig-initial.sh +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/bash -SCRIPT=$(realpath "$0") -SCRIPTPATH=$(dirname "$SCRIPT") - -bash ${SCRIPTPATH}/pipeline.sh initial_0_5 & -bash ${SCRIPTPATH}/pipeline.sh initial_1_5 & -bash ${SCRIPTPATH}/pipeline.sh initial_2_5 & diff --git a/figures/fig-interpolation.sh b/figures/fig-interpolation.sh deleted file mode 100644 index b8fa409..0000000 --- a/figures/fig-interpolation.sh +++ /dev/null @@ -1,10 +0,0 @@ -#! /bin/bash -SCRIPT=$(realpath "$0") -SCRIPTPATH=$(dirname "$SCRIPT") - -bash ${SCRIPTPATH}/pipeline.sh interpolation_log_length - -# Need to run sequentially for interpolation -wait - -bash ${SCRIPTPATH}/pipeline.sh interpolation_log_scale diff --git a/figures/fig-objectives.sh b/figures/fig-objectives.sh deleted file mode 100644 index f24b31c..0000000 --- a/figures/fig-objectives.sh +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/bash -SCRIPT=$(realpath "$0") -SCRIPTPATH=$(dirname "$SCRIPT") - -bash ${SCRIPTPATH}/pipeline.sh objectives_log_length & -bash ${SCRIPTPATH}/pipeline.sh objectives_log_length_p4 & -bash ${SCRIPTPATH}/pipeline.sh objectives_log_scale & diff --git a/figures/fig-teaser.sh b/figures/fig-teaser.sh deleted file mode 100644 index 279ca38..0000000 --- a/figures/fig-teaser.sh +++ /dev/null @@ -1,5 +0,0 @@ -#! /bin/bash -SCRIPT=$(realpath "$0") -SCRIPTPATH=$(dirname "$SCRIPT") - -bash ${SCRIPTPATH}/pipeline.sh teaser & \ No newline at end of file diff --git a/figures/pipeline.sh b/figures/pipeline.sh deleted file mode 100644 index 0254abf..0000000 --- a/figures/pipeline.sh +++ /dev/null @@ -1,9 +0,0 @@ -#! /bin/bash -SCRIPT=$(realpath "$0") -SCRIPTPATH=$(dirname "$SCRIPT") -pipeline=$1 - -output_dir=${SCRIPTPATH}/../output/${pipeline} -mkdir -p ${output_dir} -cp ${SCRIPTPATH}/pipelines/${pipeline}.json ${output_dir}/_pipeline.json -python3 ${SCRIPTPATH}/../scripts/pipeline.py ${output_dir}/_pipeline.json diff --git a/figures/pipelines/comparison_fixed.json b/figures/pipelines/comparison_fixed.json deleted file mode 100644 index 2f1aff9..0000000 --- a/figures/pipelines/comparison_fixed.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "global_args": { - "fname": [ - "bunnyBotsch.obj" - ], - "input_dir": "data/closed-Myles", - "energy_choice": "sym_dirichlet", - "use_edge_lengths": true, - "colormap": "sym_dirichlet", - "histogram_choice": "sym_dirichlet", - "ylim": 100, - "colormap_scale": 1, - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "colormap_histogram", - "skip": false, - "args_list": [ - { - "color": "blue", - "bin_min": 0, - "bin_max": 3, - "label": "sym. dirichlet", - "suffix": "opt_overlay" - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "conf_overlay" - }, - { - "suffix": "opt_overlay" - } - ] - } - ] -} - diff --git a/figures/pipelines/comparison_quadratic.json b/figures/pipelines/comparison_quadratic.json deleted file mode 100644 index 4f3afb7..0000000 --- a/figures/pipelines/comparison_quadratic.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "global_args": { - "fname": [ - "bunnyBotsch.obj" - ], - "input_dir": "data/closed-Myles", - "energy_choice": "quadratic_sym_dirichlet", - "colormap": "sym_dirichlet", - "direction_choice": "projected_newton", - "ylim": 100, - "colormap_scale": 1, - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "colormap_histogram", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined", - "color": "red", - "bin_min": 0, - "bin_max": 3, - "label": "sym. dirichlet" - }, - { - "suffix": "opt_refined", - "bin_min": 0, - "bin_max": 3, - "color": "blue", - "label": "sym. dirichlet" - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/disk_quadratic.json b/figures/pipelines/disk_quadratic.json deleted file mode 100644 index e20cc39..0000000 --- a/figures/pipelines/disk_quadratic.json +++ /dev/null @@ -1,98 +0,0 @@ -{ - "global_args": { - "fname": [ - "nicolo_da_uzzano.obj" - ], - "input_dir": "data/open-Myles", - "energy_choice": "quadratic_sym_dirichlet", - "colormap": "sym_dirichlet", - "direction_choice": "projected_newton", - "map_to_disk": true, - "free_bd_angles": true, - "ylim": 100, - "no_cones": true, - "colormap_scale": 4, - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - { - } - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "colormap_histogram", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined", - "color": "red", - "bin_min": 0, - "bin_max": 10, - "label": "sym. dirichlet" - }, - { - "suffix": "opt_refined", - "color": "blue", - "bin_min": 0, - "bin_max": 10, - "label": "sym. dirichlet" - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - }, - { - "method": "render_layout", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/disk_slim.json b/figures/pipelines/disk_slim.json deleted file mode 100644 index cb508fa..0000000 --- a/figures/pipelines/disk_slim.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "global_args": { - "fname": [ - "nicolo_da_uzzano.obj" - ], - "input_dir": "data/open-Myles", - "colormap": "sym_dirichlet", - "free_bd_angles": true, - "no_cones": true, - "colormap_scale": 4, - "ylim": 100, - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "slim", - "skip": false, - "args_list": [ - { - } - ] - }, - { - "method": "colormap_histogram", - "skip": false, - "args_list": [ - { - "suffix": "slim", - "color": "blue", - "bin_min": 0, - "bin_max": 10, - "label": "sym. dirichlet" - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "slim" - } - ] - }, - { - "method": "render_layout", - "skip": false, - "args_list": [ - { - "suffix": "slim" - } - ] - } - ] -} - diff --git a/figures/pipelines/examples_closed.json b/figures/pipelines/examples_closed.json deleted file mode 100644 index 1a3b4fc..0000000 --- a/figures/pipelines/examples_closed.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "global_args": - { - "fname": [ - "carter100K.obj", - "dancing_children100K.obj", - "robocat_deci.obj" - ], - "input_dir": "data/closed-Myles", - "energy_choice": "quadratic_sym_dirichlet", - "colormap": "scale_factors", - "histogram_choice": "compare_scale_factors", - "direction_choice": "projected_newton", - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "opt", - "color": "red", - "second_color": "blue" - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/examples_cut.json b/figures/pipelines/examples_cut.json deleted file mode 100644 index 7001640..0000000 --- a/figures/pipelines/examples_cut.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "global_args": - { - "fname": [ - "bozbezbozzel100K.obj", - "chair100K.obj", - "pegaso.obj" - ], - "input_dir": "data/cut-Myles", - "energy_choice": "quadratic_sym_dirichlet", - "colormap": "scale_factors", - "histogram_choice": "compare_scale_factors", - "direction_choice": "projected_newton", - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "opt", - "color": "red", - "second_color": "blue" - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/examples_open.json b/figures/pipelines/examples_open.json deleted file mode 100644 index c658ceb..0000000 --- a/figures/pipelines/examples_open.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "global_args": - { - "fname": [ - "chinese_lion100K.obj", - "fish.obj", - "nicolo_da_uzzano.obj" - ], - "input_dir": "data/open-Myles", - "energy_choice": "quadratic_sym_dirichlet", - "colormap": "scale_factors", - "histogram_choice": "compare_scale_factors", - "direction_choice": "projected_newton", - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "opt", - "color": "red", - "second_color": "blue" - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/initial_0_5.json b/figures/pipelines/initial_0_5.json deleted file mode 100644 index 97c82c4..0000000 --- a/figures/pipelines/initial_0_5.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "global_args": { - "fname": [ - "bumpy_torus.obj" - ], - "input_dir": "data/closed-Myles", - "energy_choice": "quadratic_sym_dirichlet", - "colormap": "scale_factors", - "histogram_choice": "scale_factors", - "direction_choice": "projected_newton", - "initial_pert_sd": 0.5, - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "conf", - "color": "red", - "bin_min": -5, - "bin_max": 5 - }, - { - "suffix": "opt", - "color": "blue", - "bin_min": -1.0, - "bin_max": 1.0 - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/initial_1_5.json b/figures/pipelines/initial_1_5.json deleted file mode 100644 index ba7d0b1..0000000 --- a/figures/pipelines/initial_1_5.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "global_args": { - "fname": [ - "bumpy_torus.obj" - ], - "input_dir": "data/closed-Myles", - "energy_choice": "quadratic_sym_dirichlet", - "colormap": "scale_factors", - "histogram_choice": "scale_factors", - "direction_choice": "projected_newton", - "initial_pert_sd": 1.5, - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "conf", - "color": "red", - "bin_min": -5, - "bin_max": 5 - }, - { - "suffix": "opt", - "color": "blue", - "bin_min": -1.0, - "bin_max": 1.0 - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/initial_2_5.json b/figures/pipelines/initial_2_5.json deleted file mode 100644 index b0788d0..0000000 --- a/figures/pipelines/initial_2_5.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "global_args": { - "fname": [ - "bumpy_torus.obj" - ], - "input_dir": "data/closed-Myles", - "energy_choice": "quadratic_sym_dirichlet", - "colormap": "scale_factors", - "histogram_choice": "scale_factors", - "direction_choice": "projected_newton", - "initial_pert_sd": 2.5, - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "conf", - "color": "red", - "bin_min": -5, - "bin_max": 5 - }, - { - "suffix": "opt", - "color": "blue", - "bin_min": -1.0, - "bin_max": 1.0 - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/interpolation_log_length.json b/figures/pipelines/interpolation_log_length.json deleted file mode 100644 index ffcdab0..0000000 --- a/figures/pipelines/interpolation_log_length.json +++ /dev/null @@ -1,189 +0,0 @@ -{ - "global_args": { - "fname": [ - "fertility_tri.obj" - ], - "input_dir": "data/cut-Myles", - "energy_choice": "p_norm", - "colormap": "scale_factors", - "height": 1600, - "width": 2560, - "ylim": 100 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "interpolate", - "skip": false, - "args_list": [ - { - "num_steps": 4 - } - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "0" - }, - { - "suffix": "1" - }, - { - "suffix": "2" - }, - { - "suffix": "3" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "0" - }, - { - "suffix": "1" - }, - { - "suffix": "2" - }, - { - "suffix": "3" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "0", - "histogram_choice": "scale_factors", - "color": "red", - "bin_min": -5, - "bin_max": 5 - }, - { - "suffix": "1", - "histogram_choice": "scale_factors", - "color": "red", - "bin_min": -5, - "bin_max": 5 - }, - { - "suffix": "2", - "histogram_choice": "scale_factors", - "color": "red", - "bin_min": -5, - "bin_max": 5 - }, - { - "suffix": "3", - "histogram_choice": "scale_factors", - "color": "red", - "bin_min": -5, - "bin_max": 5 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "0", - "histogram_choice": "scale_residuals", - "color": "red", - "bin_min": -2, - "bin_max": 2 - }, - { - "suffix": "1", - "histogram_choice": "scale_residuals", - "color": "red", - "bin_min": -2, - "bin_max": 2 - }, - { - "suffix": "2", - "histogram_choice": "scale_residuals", - "color": "red", - "bin_min": -2, - "bin_max": 2 - }, - { - "suffix": "3", - "histogram_choice": "scale_residuals", - "color": "red", - "bin_min": -2, - "bin_max": 2 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "0", - "histogram_choice": "stretch_factors", - "color": "red", - "bin_min": 0, - "bin_max": 20 - }, - { - "suffix": "1", - "histogram_choice": "stretch_factors", - "color": "red", - "bin_min": 0, - "bin_max": 20 - }, - { - "suffix": "2", - "histogram_choice": "stretch_factors", - "color": "red", - "bin_min": 0, - "bin_max": 20 - }, - { - "suffix": "3", - "histogram_choice": "stretch_factors", - "color": "red", - "bin_min": 0, - "bin_max": 20 - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "0_refined" - }, - { - "suffix": "1_refined" - }, - { - "suffix": "2_refined" - }, - { - "suffix": "3_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/interpolation_log_scale.json b/figures/pipelines/interpolation_log_scale.json deleted file mode 100644 index 8bcbe05..0000000 --- a/figures/pipelines/interpolation_log_scale.json +++ /dev/null @@ -1,193 +0,0 @@ -{ - "global_args": { - "fname": [ - "fertility_tri.obj" - ], - "input_dir": "data/cut-Myles", - "energy_choice": "scale_distortion", - "colormap": "scale_factors", - "histogram_choice": "scale_factors", - "use_lengths_from_file": true, - "height": 1600, - "width": 2560, - "ylim": 100 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - { - "lambdas_dir": "output/interpolation_log_length" - } - ] - }, - { - "method": "interpolate", - "skip": false, - "args_list": [ - { - "num_steps": 4 - } - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "0" - }, - { - "suffix": "1" - }, - { - "suffix": "2" - }, - { - "suffix": "3" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "0" - }, - { - "suffix": "1" - }, - { - "suffix": "2" - }, - { - "suffix": "3" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "0", - "histogram_choice": "scale_factors", - "color": "blue", - "bin_min": -0.5, - "bin_max": 0.5 - }, - { - "suffix": "1", - "histogram_choice": "scale_factors", - "color": "blue", - "bin_min": -0.5, - "bin_max": 0.5 - }, - { - "suffix": "2", - "histogram_choice": "scale_factors", - "color": "blue", - "bin_min": -0.5, - "bin_max": 0.5 - }, - { - "suffix": "3", - "histogram_choice": "scale_factors", - "color": "blue", - "bin_min": -0.5, - "bin_max": 0.5 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "0", - "histogram_choice": "scale_residuals", - "color": "blue", - "bin_min": -2, - "bin_max": 2 - }, - { - "suffix": "1", - "histogram_choice": "scale_residuals", - "color": "blue", - "bin_min": -2, - "bin_max": 2 - }, - { - "suffix": "2", - "histogram_choice": "scale_residuals", - "color": "blue", - "bin_min": -2, - "bin_max": 2 - }, - { - "suffix": "3", - "histogram_choice": "scale_residuals", - "color": "blue", - "bin_min": -2, - "bin_max": 2 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "0", - "histogram_choice": "stretch_factors", - "color": "blue", - "bin_min": 1.0, - "bin_max": 1.2 - }, - { - "suffix": "1", - "histogram_choice": "stretch_factors", - "color": "blue", - "bin_min": 1.0, - "bin_max": 1.2 - }, - { - "suffix": "2", - "histogram_choice": "stretch_factors", - "color": "blue", - "bin_min": 1.0, - "bin_max": 1.2 - }, - { - "suffix": "3", - "histogram_choice": "stretch_factors", - "color": "blue", - "bin_min": 1.0, - "bin_max": 1.2 - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "0_refined" - }, - { - "suffix": "1_refined" - }, - { - "suffix": "2_refined" - }, - { - "suffix": "3_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/objectives_log_length.json b/figures/pipelines/objectives_log_length.json deleted file mode 100644 index 857bc1b..0000000 --- a/figures/pipelines/objectives_log_length.json +++ /dev/null @@ -1,317 +0,0 @@ -{ - "global_args": { - "energy_choice": "p_norm", - "histogram_choice": "scale_factors", - "height": 1600, - "width": 2560, - "ylim": 100 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles" - } - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "conf" - }, - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "conf" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "conf" - }, - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "conf" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "conf", - "histogram_choice": "scale_factors", - "color": "red", - "bin_min": -2.5, - "bin_max": 2.5 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "conf", - "histogram_choice": "scale_factors", - "color": "red", - "bin_min": -2.5, - "bin_max": 2.5 - }, - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt", - "histogram_choice": "scale_factors", - "color": "blue", - "bin_min": -0.5, - "bin_max": 0.5 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt", - "histogram_choice": "scale_factors", - "color": "blue", - "bin_min": -0.5, - "bin_max": 0.5 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "conf", - "histogram_choice": "scale_residuals", - "color": "red", - "bin_min": -2.0, - "bin_max": 2.0 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "conf", - "histogram_choice": "scale_residuals", - "color": "red", - "bin_min": -2.0, - "bin_max": 2.0 - }, - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt", - "histogram_choice": "scale_residuals", - "color": "blue", - "bin_min": -2.0, - "bin_max": 2.0 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt", - "histogram_choice": "scale_residuals", - "color": "blue", - "bin_min": -2.0, - "bin_max": 2.0 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "conf", - "histogram_choice": "stretch_factors", - "color": "red", - "bin_min": 1.0, - "bin_max": 4.0 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "conf", - "histogram_choice": "stretch_factors", - "color": "red", - "bin_min": 1.0, - "bin_max": 4.0 - }, - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt", - "histogram_choice": "stretch_factors", - "color": "blue", - "histogram_width": 5, - "bin_min": 1.0, - "bin_max": 1.1 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt", - "histogram_choice": "stretch_factors", - "color": "blue", - "histogram_width": 5, - "bin_min": 1.0, - "bin_max": 1.1 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "output_dir": "output/objectives/log_length/strectch_tail", - "suffix": "opt", - "histogram_choice": "stretch_factors", - "color": "blue", - "histogram_width": 5, - "bin_min": 1.1, - "bin_max": 1.5 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "output_dir": "output/objectives/log_length/strectch_tail", - "suffix": "opt", - "histogram_choice": "stretch_factors", - "color": "blue", - "histogram_width": 5, - "bin_min": 1.1, - "bin_max": 1.5 - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt_refined" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt_refined" - }, - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "conf_refined" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "conf_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/objectives_log_length_p4.json b/figures/pipelines/objectives_log_length_p4.json deleted file mode 100644 index f64c187..0000000 --- a/figures/pipelines/objectives_log_length_p4.json +++ /dev/null @@ -1,211 +0,0 @@ -{ - "global_args": { - "energy_choice": "p_norm", - "colormap": "scale_factors", - "power": 4, - "histogram_choice": "scale_factors", - "height": 1600, - "width": 2560, - "ylim": 100 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles" - } - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt", - "histogram_choice": "scale_factors", - "color": "blue", - "bin_min": -0.5, - "bin_max": 0.5 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt", - "histogram_choice": "scale_factors", - "color": "blue", - "bin_min": -0.5, - "bin_max": 0.5 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt", - "histogram_choice": "scale_residuals", - "color": "blue", - "bin_min": -2.0, - "bin_max": 2.0 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt", - "histogram_choice": "scale_residuals", - "color": "blue", - "bin_min": -2.0, - "bin_max": 2.0 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt", - "histogram_choice": "stretch_factors", - "color": "blue", - "histogram_width": 5, - "bin_min": 1.0, - "bin_max": 1.1 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt", - "histogram_choice": "stretch_factors", - "color": "blue", - "histogram_width": 5, - "bin_min": 1.0, - "bin_max": 1.1 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "output_dir": "output/objectives/log_length_p_4/strectch_tail", - "suffix": "opt", - "histogram_choice": "stretch_factors", - "color": "blue", - "histogram_width": 5, - "bin_min": 1.1, - "bin_max": 1.5 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "output_dir": "output/objectives/log_length_p_4/strectch_tail", - "suffix": "opt", - "histogram_choice": "stretch_factors", - "color": "blue", - "histogram_width": 5, - "bin_min": 1.1, - "bin_max": 1.5 - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt_refined" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/objectives_log_scale.json b/figures/pipelines/objectives_log_scale.json deleted file mode 100644 index ce7ef1f..0000000 --- a/figures/pipelines/objectives_log_scale.json +++ /dev/null @@ -1,176 +0,0 @@ -{ - "global_args": { - "energy_choice": "scale_distortion", - "colormap": "scale_factors", - "histogram_choice": "scale_factors", - "height": 1600, - "width": 2560, - "ylim": 100 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles" - } - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt", - "histogram_choice": "scale_factors", - "color": "blue", - "bin_min": -0.5, - "bin_max": 0.5 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt", - "histogram_choice": "scale_factors", - "color": "blue", - "bin_min": -0.5, - "bin_max": 0.5 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt", - "histogram_choice": "scale_residuals", - "color": "blue", - "bin_min": -2.0, - "bin_max": 2.0 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt", - "histogram_choice": "scale_residuals", - "color": "blue", - "bin_min": -2.0, - "bin_max": 2.0 - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt", - "histogram_choice": "stretch_factors", - "color": "blue", - "bin_min": 1.0, - "bin_max": 1.1 - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt", - "histogram_choice": "stretch_factors", - "color": "blue", - "bin_min": 1.0, - "bin_max": 1.1 - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "fname": [ - "kitten100K.obj" - ], - "input_dir": "data/closed-Myles", - "suffix": "opt_refined" - }, - { - "fname": [ - "julius.obj" - ], - "input_dir": "data/open-Myles", - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/pipelines/teaser.json b/figures/pipelines/teaser.json deleted file mode 100644 index 9f830ba..0000000 --- a/figures/pipelines/teaser.json +++ /dev/null @@ -1,116 +0,0 @@ -{ - "global_args": { - "fname": [ - "raptor50K.obj" - ], - "input_dir": "data/closed-Myles", - "energy_choice": "quadratic_sym_dirichlet", - "colormap": "scale_factors", - "histogram_choice": "scale_factors", - "direction_choice": "projected_newton", - "height": 1600, - "width": 2560 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "interpolate", - "skip": false, - "args_list": [ - { - "num_steps": 3 - } - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "0" - }, - { - "suffix": "1" - }, - { - "suffix": "2" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "0" - }, - { - "suffix": "1" - }, - { - "suffix": "2" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "0", - "histogram_choice": "compare_scale_factors", - "color": "red", - "second_color": "blue", - "bin_min": -5, - "bin_max": 5 - }, - { - "suffix": "1", - "histogram_choice": "compare_scale_factors", - "color": "red", - "second_color": "blue", - "comparison_label": "interpolated", - "bin_min": -5, - "bin_max": 5 - }, - { - "suffix": "2", - "histogram_choice": "compare_scale_factors", - "color": "red", - "second_color": "blue", - "bin_min": -5, - "bin_max": 5 - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "0_refined", - "colormap_scale": 2, - "uv_scale": 0.5 - }, - { - "suffix": "1_refined", - "colormap_scale": 2, - "uv_scale": 0.5 - }, - { - "suffix": "2_refined", - "colormap_scale": 2, - "uv_scale": 0.5 - } - ] - } - ] -} - diff --git a/figures/pipelines/test.json b/figures/pipelines/test.json deleted file mode 100644 index 4bc9fa1..0000000 --- a/figures/pipelines/test.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "global_args": { - "fname": [ - "eight.obj" - ], - "input_dir": "data/cut-Myles", - "energy_choice": "p_norm", - "colormap": "scale_factors", - "opt_num_iter": 50 - }, - "pipeline": - [ - { - "method": "optimize", - "skip": false, - "args_list": [ - {} - ] - }, - { - "method": "overlay", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "refine", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "energy_table", - "skip": false, - "args_list": [ - { - "suffix": "conf" - }, - { - "suffix": "opt" - } - ] - }, - { - "method": "error_table", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - }, - { - "method": "histogram", - "skip": false, - "args_list": [ - { - "suffix": "opt", - "histogram_choice": "compare_scale_factors" - } - ] - }, - { - "method": "render_uv", - "skip": false, - "args_list": [ - { - "suffix": "conf_refined" - }, - { - "suffix": "opt_refined" - } - ] - } - ] -} - diff --git a/figures/test.sh b/figures/test.sh deleted file mode 100644 index 6bb9d2f..0000000 --- a/figures/test.sh +++ /dev/null @@ -1,5 +0,0 @@ -#! /bin/bash -SCRIPT=$(realpath "$0") -SCRIPTPATH=$(dirname "$SCRIPT") - -bash ${SCRIPTPATH}/pipeline.sh test \ No newline at end of file diff --git a/include/holonomy/holonomy/core/boundary_basis.h b/include/holonomy/holonomy/core/boundary_basis.h new file mode 100644 index 0000000..ed2d86f --- /dev/null +++ b/include/holonomy/holonomy/core/boundary_basis.h @@ -0,0 +1,60 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "util/spanning_tree.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Class to generate boundary loops and boundary path basis loops + */ +class BoundaryBasisGenerator +{ +public: + /** + * @brief Construct a new Boundary Basis Generator object on a mesh + * + * @param m: mesh + */ + BoundaryBasisGenerator(const Mesh& m); + + /** + * @brief Get number of boundaries corresponding to basis loops. + * + * This value is one less than the total number of boundary loops, and each + * basis boundary corresponds to two loops + * + * @return number of boundary basis loops + */ + int n_basis_boundaries() const { return (m_basis_boundary_handles.size()); } + + /** + * @brief Construct a basis loop corresponding to the basis boundary with given index + * + * @param index: index of the basis boundary + * @return sequence of faces defining the dual loop + */ + std::vector construct_boundary_basis_loop(int index) const; + + /** + * @brief Construct a basis loop corresponding to the path from the basis boundary + * with the given index to a designated base boundary. + * + * @param index: index of the basis boundary + * @return sequence of faces defining the dual loop + */ + std::vector construct_boundary_path_basis_loop(int index) const; + +private: + Mesh m_mesh; + std::vector m_he2e; + std::vector m_e2he; + + int m_root_boundary_handle; + std::vector m_basis_boundary_handles; + DualTree m_dual_tree; +}; + +} // namespace Holonomy +} // namespace Penner diff --git a/include/holonomy/holonomy/core/common.h b/include/holonomy/holonomy/core/common.h new file mode 100644 index 0000000..2aee27f --- /dev/null +++ b/include/holonomy/holonomy/core/common.h @@ -0,0 +1,74 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include + +#include "optimization/interface.h" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" +#include "util/vector.h" + +namespace Penner { +namespace Holonomy { + +// Typedefs +using Optimization::DifferentiableConeMetric; +typedef Eigen::Matrix RowVectors2i; + + +/** + * @brief Compute the square of a numeric value. + * + * @tparam type of object (must support multiplication) + * @param a: value to square + * @return squared value + */ +template +Type square(const Type& a) +{ + return a * a; +} + +/** + * @brief Compute the real modulus of x mod y + * + * @param x: positive number to mod + * @param y: positive modulus + * @return x (mod y) + */ +inline +Scalar pos_fmod(Scalar x, Scalar y) { return (0 == y) ? x : x - y * floor(x / y); } + +/** + * @brief Compute the Euler characteristic of the mesh + * + * @param m: mesh + * @return Euler characteristic + */ +int compute_euler_characteristic(const Mesh& m); + +/** + * @brief Compute the genus of the mesh + * + * @param m: mesh + * @return genus + */ +int compute_genus(const Mesh& m); + +/** + * @brief Compute the map from vertex-vertex edges to primal mesh halfedges + * + * WARNING: 1-indexing is used for halfedges instead of the usual 0 indexing. + * + * @param m mesh + * @return vertex-vertex to halfedge map + */ +Eigen::SparseMatrix compute_vv_to_halfedge_matrix(const Mesh& m); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/core/dual_lengths.h b/include/holonomy/holonomy/core/dual_lengths.h new file mode 100644 index 0000000..6944aa5 --- /dev/null +++ b/include/holonomy/holonomy/core/dual_lengths.h @@ -0,0 +1,33 @@ +#pragma once + +#include "holonomy/core/common.h" + +#include "util/spanning_tree.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Compute dual edge lengths using the DEC formulation with dual vertices at circumcenters. + * + * @param m: mesh with primal edge lengths + * @return per-halfedge dual edge lengths + */ +std::vector compute_dual_edge_lengths(const Mesh& m); + +/** + * @brief Compute the edge weights for a primal tree given by the length of the root cycle + * generated by adding an edge to a dual spanning tree (or 0 if the edge is in the spanning tree) + * + * @param m: underlying mesh + * @param weights: weights on the dual mesh for loop lengths + * @param dual_tree: spanning dual tree + * @return per-halfedge dual loop length weights + */ +std::vector compute_dual_loop_length_weights( + const Mesh& m, + const std::vector& weights, + const DualTree& dual_tree); + +} // namespace Holonomy +} // namespace Penner diff --git a/include/holonomy/holonomy/core/dual_loop.h b/include/holonomy/holonomy/core/dual_loop.h new file mode 100644 index 0000000..75f5405 --- /dev/null +++ b/include/holonomy/holonomy/core/dual_loop.h @@ -0,0 +1,351 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/core/dual_segment.h" + +namespace Penner { +namespace Holonomy { + +typedef std::array DualSegment; + +class DenseHalfedgeMap { +public: + DenseHalfedgeMap(int num_halfedges); + void clear(); + void add_segment(int halfedge_index, int segment_index); + void erase_entry(int halfedge_index); + const std::vector& get_halfedge_segments(int halfedge_index); +private: + std::vector m_h_num_segments; + std::vector m_h_first_segment; + std::vector m_h_bucket; + std::vector> m_h2segments; + + // Pre-allocated temporary data structures + std::vector m_empty_list; + std::vector m_temp_list; +}; + +class SparseHalfedgeMap { +public: + SparseHalfedgeMap(); + void clear(); + void add_segment(int halfedge_index, int segment_index); + void erase_entry(int halfedge_index); + const std::vector& get_halfedge_segments(int halfedge_index); +private: + std::unordered_map> m_h2segments; + std::vector m_empty_list; +}; + +typedef SparseHalfedgeMap HalfedgeMap; + +/** + * @brief Interface for dual loop tracking on a mesh + * + */ +class DualLoop +{ +public: + virtual void update_under_ccw_flip(const Mesh& m, int halfedge_index) = 0; + virtual std::vector generate_face_sequence(const Mesh& m) const = 0; + virtual std::unique_ptr clone() const = 0; + virtual ~DualLoop() = default; + + /** + * @brief Iterator to iterate over the segments of the dual loop + * + */ + class DualSegmentIterator + { + public: + DualSegmentIterator(const DualLoop& parent, bool is_start = true) + : m_parent(parent) + , m_is_start(is_start) + { + m_start_segment_index = m_parent.get_start_segment_index(); + m_current_segment_index = m_start_segment_index; + } + + DualSegmentIterator& operator++() + { + m_is_start = false; + m_current_segment_index = m_parent.get_next(m_current_segment_index); + return *this; + } + + DualSegmentIterator operator++(int) + { + DualSegmentIterator temp = *this; + ++*this; + return temp; + } + + bool is_equal(const DualSegmentIterator& rhs) const + { + return ( + (m_is_start == rhs.m_is_start) && + (m_current_segment_index == rhs.m_current_segment_index)); + } + + friend bool operator==(const DualSegmentIterator& lhs, const DualSegmentIterator& rhs) + { + return (lhs.is_equal(rhs)); + } + + friend bool operator!=(const DualSegmentIterator& lhs, const DualSegmentIterator& rhs) + { + return (!(lhs == rhs)); + } + + bool is_end() + { + return ((!m_is_start) && (m_current_segment_index == m_start_segment_index)); + } + + DualSegment operator*() { return m_parent.get_dual_segment(m_current_segment_index); } + + private: + const DualLoop& m_parent; + bool m_is_start; + int m_start_segment_index; + int m_current_segment_index; + }; + + /** + * @brief Construct an iterator to the beginning of the dual loop + * + * @return loop start iterator + */ + DualSegmentIterator begin() const { return DualSegmentIterator(*this, true); } + + /** + * @brief Construct an iterator to one past the end of the dual loop + * + * @return loop end iterator + */ + DualSegmentIterator end() const { return DualSegmentIterator(*this, false); } + +protected: + virtual int get_next(int segment_index) const = 0; + virtual int get_start_segment_index() const = 0; + virtual DualSegment get_dual_segment(int segment_index) const = 0; +}; + +/** + * @brief Representation of a dual loop on a mesh. This data structure supports iteration over the + * dual loop, local updates for flips in the underlying mesh, and conversions to and from sequences + * of faces on the mesh constituting a closed dual loop. + * + */ +class DualLoopConnectivity : public DualLoop +{ +public: + /** + * @brief Construct a trivial dual loop connectivity + */ + DualLoopConnectivity(); + + /** + * @brief Construct a new dual loop connectivity from a sequence of dual segments + * + * @param dual_loop_segments: sequence of continuous dual segments on the mesh + */ + DualLoopConnectivity(const std::vector& dual_loop_segments); + + /** + * @brief Construct a new dual loop connectivity from a sequence of faces on a mesh. + * + * @param[in] m: mesh + * @param[in] dual_loop_faces: sequence of faces in the mesh (must be adjacent) + */ + DualLoopConnectivity(const Mesh& m, const std::vector& dual_loop_faces); + + std::unique_ptr clone() const override + { + return std::make_unique(*this); + } + + /** + * @brief Update dual loop connectivity after a flip in the underlying mesh. + * + * @param m: underlying mesh (before the flip) + * @param halfedge_index: halfedge to flip + */ + void update_under_ccw_flip(const Mesh& m, int halfedge_index) override; + + /** + * @brief Generate the sequence of faces the dual loop traverses. + * + * @param m: underlying mesh + * @return sequence of faces in the dual loop + */ + std::vector generate_face_sequence(const Mesh& m) const override; + + /** + * @brief Clear all internal data. + * + */ + void clear(); + + /** + * @brief Enable dual loop validity checks (with a large runtime cost); + * + */ + void enable_validity_checks() { m_check_validity = true; } + +protected: + // Connectivity getters + int count_segment_indices() const { return m_next.size(); } + + int get_next(int segment_index) const override + { + assert(is_valid_segment_index(segment_index)); + return m_next[segment_index]; + } + + int get_prev(int segment_index) const + { + assert(is_valid_segment_index(segment_index)); + return m_prev[segment_index]; + } + + int get_start(int segment_index) const + { + assert(is_valid_segment_index(segment_index)); + return m_start[segment_index]; + } + + int get_end(int segment_index) const + { + assert(is_valid_segment_index(segment_index)); + return m_end[segment_index]; + } + + bool is_deleted(int segment_index) const { return m_is_deleted[segment_index]; } + + // Index based segment management + int get_start_segment_index() const override; + DualSegment get_dual_segment(int segment_index) const override; + + // Validity tests + bool is_valid_segment_index(int segment_index) const; + bool is_valid_connectivity() const; + bool is_valid_dual_loop(const Mesh& m) const; + +private: + // Segment connectivity + std::vector m_next; + std::vector m_prev; + + // Dual segment information + std::vector m_start; + std::vector m_end; + + // Track garbage collection + std::vector m_is_deleted; + std::deque m_free_indices; + + // Maps from mesh halfedges to segments starting at them + HalfedgeMap m_halfedge_map; + + bool m_check_validity = false; + + // Atomic loop change operations + void split_segment(int segment_index, int halfedge_index, int opposite_halfedge); + void flip_segments( + int first_segment_index, + int second_segment_index, + int halfedge_index, + int opposite_halfedge); + void combine_segments(int first_segment_index, int second_segment_index); + + // Index memory management + int create_segment_index(); + void delete_segment_index(int segment_index); + + // TODO Add garbage collector for resizing +}; + +/** + * @brief Minimal representation for a dual loop as a list of dual segments + * + */ +class DualLoopList : public DualLoop +{ +public: + /** + * @brief Construct a new dual loop list from a sequence of dual segments + * + * @param m: underlying mesh + * @param dual_loop_segments: sequence of continuous dual segments on the mesh + */ + DualLoopList(const std::vector& dual_loop_segments) + : m_dual_path(dual_loop_segments) + {} + + /** + * @brief Construct a new dual loop list from a sequence of faces on a mesh. + * + * @param[in] m: mesh + * @param[in] dual_loop_faces: sequence of faces in the mesh (must be adjacent) + */ + DualLoopList(const Mesh& m, const std::vector& dual_loop_faces) + : m_dual_path(build_dual_path_from_face_sequence(m, dual_loop_faces)) + {} + + std::unique_ptr clone() const override + { + return std::make_unique(*this); + } + + void update_under_ccw_flip(const Mesh& m, int halfedge_index) override + { + update_dual_loop_under_ccw_flip(m, halfedge_index, m_dual_path); + } + + std::vector generate_face_sequence(const Mesh& m) const override + { + return build_face_sequence_from_dual_path(m, m_dual_path); + } + +protected: + int get_next(int segment_index) const override + { + return (segment_index + 1) % count_segment_indices(); + } + + int get_start_segment_index() const override { return 0; } + + DualSegment get_dual_segment(int segment_index) const override + { + return m_dual_path[segment_index]; + } + +private: + std::vector m_dual_path; + + int count_segment_indices() const { return m_dual_path.size(); } +}; + +class DualLoopManager{ +public: + DualLoopManager(int num_edges); + void clear(); + void add_loop(int edge_index, int loop_index); + void register_loop_edges(int loop_index, const Mesh& m, const DualLoop& dual_loop); + void erase_entry(int edge_index); + const std::vector& get_edge_loops(int edge_index); +private: + std::vector m_e_num_loops; + std::vector m_e_first_loop; + std::vector m_e_bucket; + std::vector> m_e2loops; + + // Pre-allocated temporary data structures + std::vector m_empty_list; + std::vector m_temp_list; +}; + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/core/dual_segment.h b/include/holonomy/holonomy/core/dual_segment.h new file mode 100644 index 0000000..29e6621 --- /dev/null +++ b/include/holonomy/holonomy/core/dual_segment.h @@ -0,0 +1,141 @@ +#pragma once + +#include "holonomy/core/common.h" + +namespace Penner { +namespace Holonomy { + +typedef std::array DualSegment; + +/** + * @brief Check if a dual segment is valid. + * + * The only condition is that both halfedges of the segment belong to the same face. + * + * @param m: mesh + * @param dual_segment: pair of halfedge indices specifying a dual segment in a face + * @return true if the dual segment is valid + * @return false otherwise + */ +bool is_valid_dual_segment(const Mesh& m, const DualSegment& dual_segment); + +/** + * @brief Determine if a vector of dual segments specifies a valid dual path. + * + * The conditions are: + * - each dual segment is valid + * - sequential dual segments are adjacent in the mesh + * + * @param m: mesh + * @param dual_path: vector of dual segments specifying a dual path + * @return true if the dual path is valid + * @return false otherwise + */ +bool is_valid_dual_path(const Mesh& m, const std::vector& dual_path); + +/** + * @brief Determine if a vector of dual segments specifies a valid dual loop. + * + * The conditions are that the loop is a nontrivial valid dual path and the last segment + * is adjacent to the first. + * + * @param m: mesh + * @param dual_loop: vector of dual segments specifying a dual loop + * @return true if the dual loop is valid + * @return false otherwise + */ +bool is_valid_dual_loop(const Mesh& m, const std::vector& dual_loop); + +/** + * @brief Get the face index containing a dual segment. + * + * @param m: mesh + * @param dual_segment: pair of halfedge indices specifying a dual segment in a face + * @return face containing the segment + */ +int compute_dual_segment_face(const Mesh& m, const DualSegment& dual_segment); + +/** + * @brief Reverse the orientation of a dual segment. + * + * @param dual_segment: pair of halfedge indices specifying a dual segment in a face + * @return reversed dual segment + */ +DualSegment reverse_dual_segment(const DualSegment& dual_segment); + +/** + * @brief Reverse the orientation of a dual path. + * + * @param dual_path: vector of dual segments specifying a dual path + * @return reversed dual path + */ +std::vector reverse_dual_path(const std::vector& dual_path); + +/** + * @brief Construct a sequence of faces on a mesh from a dual loop path. + * + * @param m: mesh + * @param dual_path: dual path composed of dual segments + * @return sequence of faces in the mesh (must be adjacent) + */ +std::vector build_face_sequence_from_dual_path( + const Mesh& m, + const std::vector& dual_path); + +/** + * @brief Construct a sequence of dual segments from a dual loop face sequence + * + * NOTE: The face sequence must constitute a closed dual loop and not just a dual path. + * + * @param m: underlying mesh + * @param dual_loop_faces: closed dual loop on the mesh + * @return vector of dual segments specifying a dual path + */ +std::vector build_dual_path_from_face_sequence( + const Mesh& m, + const std::vector& dual_loop_faces); + +/** + * @brief Update the dual loop for a mesh to be flipped at a given halfedge. + * + * NOTE: Dual path sequence must be a closed loop + * + * @param m: mesh before flip + * @param halfedge_index: halfedge to be flipped + * @param dual_loop: dual loop to be modified + */ +void update_dual_loop_under_ccw_flip( + const Mesh& m, + int halfedge_index, + std::vector& dual_loop); + +/** + * @brief Update the dual loop for a mesh to be flipped according to a sequence. + * + * NOTE: Dual path sequence must be a closed loop + * + * @param m: mesh before flips + * @param flip_seq: sequence of flips to perform + * @param dual_loop: dual loop to be modified + */ +void update_dual_loop_under_ccw_flip_sequence( + const Mesh& m, + const std::vector& flip_seq, + std::vector& dual_loop); + +/** + * @brief View a dual loop on a mesh. + * + * @param V: vertices of the mesh + * @param F: faces of the mesh + * @param m: mesh + * @param dual_loop + */ +void view_dual_path( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Mesh& m, + const std::vector& dual_path); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/core/field.h b/include/holonomy/holonomy/core/field.h new file mode 100644 index 0000000..e61eb71 --- /dev/null +++ b/include/holonomy/holonomy/core/field.h @@ -0,0 +1,23 @@ + +#pragma once + +#include "holonomy/core/common.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Generate a cross field for a mesh + * + * @param V: mesh vertices + * @param F: mesh faces + * @return |F|x3 frame field of per-face field direction vectors + * @return per-vertex cone angles corresponding to the frame field + */ +std::tuple> generate_cross_field( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F); + + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/core/forms.h b/include/holonomy/holonomy/core/forms.h new file mode 100644 index 0000000..1c56594 --- /dev/null +++ b/include/holonomy/holonomy/core/forms.h @@ -0,0 +1,133 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/core/dual_loop.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Determine if a one form is valid. + * + * @param m: mesh + * @param one_form: per-halfedge one-form + * @return true iff the one form is valid + */ +bool is_valid_one_form(const Mesh& m, const VectorX& one_form); + +/** + * @brief Determine if a one form is closed + * + * @param m: mesh + * @param one_form: per-halfedge one-form + * @return true iff the one form is closed + */ +bool is_closed_one_form(const Mesh& m, const VectorX& one_form); + +/** + * @brief Given a list of dual loops, compute a matrix with columns given by the corresponding + * closed one forms for the dual loops. + * + * @param m: mesh + * @param dual_loops: loops defining the one forms + * @return matrix of dual loop one forms + */ +MatrixX build_dual_loop_basis_one_form_matrix( + const Mesh& m, + const std::vector>& dual_loops); + +/** + * @brief Given a mesh and a homology basis, compute a matrix for the space of closed one forms + * with the first |V| (or |V| - 1) columns corresponding to vertex hat function derivatives and the + * last 2g corresponding to the homology basis loops. + * + * @param m: mesh + * @param homology_basis_loops: homology basis loops for the mesh + * @param eliminate_vertex: remove the last vertex of the mesh so that the matrix is full rank + * @return matrix with basis forms as columns + */ +MatrixX build_closed_one_form_matrix( + const Mesh& m, + const std::vector>& homology_basis_loops, + bool eliminate_vertex = false); + +/** + * @brief Compute the matrix that integrates a closed one-form over a cut of the mesh to a disk. + * + * @param m: mesh + * @param cut_h: per-halfedge list of halfedges to cut (or an empty vector for no pregiven cuts) + * @param is_cut_h: per-halfedge list of halfedges that are cut + * @param start_h: (optional) halfedge to start the integration at + * @return matrix representing the linear integration operation + */ +MatrixX build_one_form_integral_matrix( + const Mesh& m, + const std::vector& cut_h, + std::vector& is_cut_h, + int start_h=0); + +/** + * @brief Given a closed one form, compute its integral over a cut of the mesh to a disk. + * + * The integrated form is represented as a per-halfedge attribute, where the value corresponds + * to the integrated value at the tip of the halfedge. The cut is implicitly defined by the + * discontinuities in the resulting per-corner values. + * + * @param m: mesh + * @param one_form: per-halfedge closed one-form + * @param cut_h: per-halfedge list of halfedges to cut (or an empty vector for no pregiven cuts) + * @param is_cut_h: per-halfedge list of halfedges that are cut + * @param start_h: (optional) halfedge to start the integration at + * @return: per-halfedge integrated one-form + */ +VectorX integrate_one_form( + const Mesh& m, + const VectorX& one_form, + const std::vector& cut_h, + std::vector& is_cut_h, + int start_h = 0); + +/** + * @brief Generate matrix to scale the halfedges of a mesh with metric by scale factors associated + * to the tips of halfedges of an integrated one form. + * + * @param m: mesh + */ +MatrixX build_integrated_one_form_scaling_matrix(const Mesh& m); + +/** + * @brief Scale the halfedges of a mesh with metric by scale factors associated to the tips of + * halfedges of an integrated one form. + * + * Since we are using log coordinates, the scaling corresponds to addition of the values at + * corners adjacent to the halfedge. Note that this operation may not preserve discrete metrics + * as values for paired halfedges can be different. + * + * @param m: mesh + * @param metric_coords: initial per-halfedge metric coordinates + * @param integrated_one_form: per-halfedge integrated one-form + * @return scaled metric: metric after scaling + */ +VectorX scale_halfedges_by_integrated_one_form( + const Mesh& m, + const VectorX& metric_coords, + const VectorX& integrated_one_form); + +/** + * @brief Scale the edges of a mesh with metric by scale factors associated to vertices. + * + * Since we are using log coordinates, the scaling corresponds to addition of the values at + * corners adjacent to the halfedge. Note that this operation will preserve discrete metrics. + * + * @param m: mesh + * @param metric_coords: initial per-halfedge metric coordinates + * @param zero_form: per-vertex zero-form + * @return scaled metric: metric after scaling + */ +VectorX scale_edges_by_zero_form( + const Mesh& m, + const VectorX& metric_coords, + const VectorX& zero_form); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/core/homology_basis.h b/include/holonomy/holonomy/core/homology_basis.h new file mode 100644 index 0000000..d61e157 --- /dev/null +++ b/include/holonomy/holonomy/core/homology_basis.h @@ -0,0 +1,138 @@ + +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/core/dual_loop.h" +#include "util/spanning_tree.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Construct a clockwise dual path around a vertex in the mesh. + * + * @param m: mesh + * @param vertex_index: index of the vertex to build a dual loop around + * @return dual loop composed of dual segments + */ +std::vector build_clockwise_vertex_dual_segment_sequence( + const Mesh& m, + int vertex_index); + +/** + * @brief Construct a counterclockwise dual path around a vertex in the mesh. + * + * @param m: mesh + * @param vertex_index: index of the vertex to build a dual loop around + * @return dual loop composed of dual segments + */ +std::vector build_counterclockwise_vertex_dual_segment_sequence( + const Mesh& m, + int vertex_index); + +/** + * @brief Class to generate a homotopy basis via a tree-cotree decomposition. + */ +class HomotopyBasisGenerator +{ +public: + // Weighting scheme for the tree-cotree construction + enum Weighting { + minimal_homotopy, // Use a shortest path dual tree with maximal dual loop length + // primal cotree (default choice with good theoretical properties) + maximal_homotopy, // Use a longest path dual tree with minimal dual loop length + // primal cotree + dual_min_primal_max, // Use dual edge lengths with a minimal dual tree and maximal primal cotree + primal_min_dual_max // Use primal edge lengths with a minimal primal tree and maximal dual cotree + }; + + /** + * @brief Construct a new Homotopy Basis Generator object on a mesh + * + * @param m: mesh + * @param root (optional) root vertex (or dual-vertex) for the tree construction + * @param weighting (optional) weighting for the tree-cotree construction + */ + HomotopyBasisGenerator( + const Mesh& m, + int root = 0, + Weighting weighting = Weighting::minimal_homotopy); + + /** + * @brief Get number of homology basis loops (twice the genus) + * + * @return number of homology basis loops + */ + int n_homology_basis_loops() const { return m_homotopy_basis_edge_handles.size(); } + + /** + * @brief Construct a dual loop corresponding to the homotopy basis loop with given index + * + * NOTE: The dual loop is rooted and thus generally is not simple + * + * @param index: index of the homotopy basis loop + * @return sequence of faces defining the dual loop + * @return sequence of edges between the dual loop faces + */ + std::tuple, std::vector> construct_homotopy_basis_edge_loop( + int index) const; + + /** + * @brief Construct a dual loop corresponding to the homotopy basis loop with given index with + * the path to the root contracted to make the loop simple + * + * @return sequence of faces defining the dual loop + * @return sequence of edges between the dual loop faces + */ + std::tuple, std::vector> construct_homology_basis_edge_loop( + int index) const; + + /** + * @brief Construct a dual loop corresponding to the homotopy basis loop with given index with + * the path to the root contracted to make the loop simple + * + * @return sequence of faces defining the dual loop + */ + std::vector construct_homology_basis_loop(int index) const; + + /** + * @brief Get the edge handle for the homotopy basis loop with the given index that forms a + * homotopy basis cycle when added to the dual tree + * + * @param index: index of the homotopy basis loop + * @return edge index in the mesh of the handle edge + */ + int homotopy_basis_handle(int index) const { return m_homotopy_basis_edge_handles[index]; } + + /** + * @brief Get the primal tree object used for the tree-cotree construction + * + * @return primal tree reference + */ + const PrimalTree& get_primal_tree() const { return m_primal_tree; } + + /** + * @brief Get the dual tree object used for the tree-cotree construction + * + * @return dual tree reference + */ + const DualTree& get_dual_tree() const { return m_dual_tree; } + + +private: + Mesh m_mesh; + std::vector m_he2e; + std::vector m_e2he; + + PrimalTree m_primal_tree; + DualTree m_dual_tree; + std::vector m_homotopy_basis_edge_handles; + + std::tuple, std::vector> trace_dual_vertex_to_root(int face_index) const; +}; + +typedef HomotopyBasisGenerator HomologyBasisGenerator; + + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/core/intrinsic_field.h b/include/holonomy/holonomy/core/intrinsic_field.h new file mode 100644 index 0000000..12ca381 --- /dev/null +++ b/include/holonomy/holonomy/core/intrinsic_field.h @@ -0,0 +1,62 @@ +#pragma once + +#include "holonomy/core/common.h" + +namespace Penner { +namespace Holonomy { + +class IntrinsicNRosyField +{ +public: + IntrinsicNRosyField() {}; + VectorX run(const Mesh& m); + VectorX run_with_viewer( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V); + + Scalar min_angle = 0.; + +private: + // Local frames + VectorX theta; // per-face angle from local frame to face vector + VectorX kappa; // per-halfedge angle between reference frames + std::vector face_reference_halfedge; // index of reference halfedges + std::vector is_face_fixed; + bool constrain_bd; + + // Period jumps + std::vector he2e; + std::vector e2he; + Eigen::VectorXi period_jump; + VectorX period_value; + std::vector is_period_jump_fixed; + + // Metric information + VectorX he2angle; + VectorX he2cot; + + // MI system + std::vector face_var_id; + std::vector halfedge_var_id; + MatrixX A; + VectorX b; + Eigen::Matrix C; + + Scalar compute_angle_to_reference(const Mesh& m, const VectorX& he2angle, int h) const; + Scalar compute_angle_between_frames(const Mesh& m, const VectorX& he2angle, int h) const; + + void initialize_local_frames(const Mesh& m); + void initialize_period_jump(const Mesh& m); + void initialize_mixed_integer_system(const Mesh& m); + + void initialize_double_local_frames(const Mesh& m); + void initialize_double_period_jump(const Mesh& m); + void initialize_double_mixed_integer_system(const Mesh& m); + + void solve(const Mesh& m); + VectorX compute_rotation_form(const Mesh& m); +}; + +} // namespace Holonomy +} // namespace Penner diff --git a/include/holonomy/holonomy/core/quality.h b/include/holonomy/holonomy/core/quality.h new file mode 100644 index 0000000..aac459b --- /dev/null +++ b/include/holonomy/holonomy/core/quality.h @@ -0,0 +1,33 @@ +#include "holonomy/core/common.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Compute the triangle quality as the ratio of outradius to inradius. + * + * @param lij: first edge length + * @param ljk: second edge length + * @param lki: third edge length + * @return triangle quality measure + */ +Scalar compute_triangle_quality(Scalar lij, Scalar ljk, Scalar lki); + + /** + * @brief Compute the mesh per-face triangle qualities + * + * @param cone_metric: mesh with metric + * @return: per-face triangle quality measures + */ +VectorX compute_mesh_quality(const DifferentiableConeMetric& cone_metric); + +/** + * @brief Compute the minimum corner angle of the mesh + * + * @param cone_metric: mesh with metric + * @return: minimum corner angle + */ +Scalar compute_min_angle(const DifferentiableConeMetric& cone_metric); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/core/viewer.h b/include/holonomy/holonomy/core/viewer.h new file mode 100644 index 0000000..2d6d218 --- /dev/null +++ b/include/holonomy/holonomy/core/viewer.h @@ -0,0 +1,146 @@ + +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/holonomy/marked_penner_cone_metric.h" + +#ifdef ENABLE_VISUALIZATION +#include "polyscope/surface_mesh.h" +#endif + +namespace Penner { +namespace Holonomy { + +#ifdef ENABLE_VISUALIZATION +extern glm::vec3 BEIGE; +extern glm::vec3 BLACK_BROWN; +extern glm::vec3 TAN; +extern glm::vec3 MUSTARD; +extern glm::vec3 FOREST_GREEN; +#endif + +// TODO Refactor and add some more convenient viewers + +std::tuple generate_cone_vertices( + const Eigen::MatrixXd& V, + const std::vector& vtx_reindex, + const Mesh& m); + +std::tuple generate_closed_cone_vertices( + const Eigen::MatrixXd& V, + const std::vector& Th_hat); + +std::tuple generate_mesh_faces( + const Mesh& m, + const std::vector& vtx_reindex); + +std::tuple generate_doubled_mesh( + const Eigen::MatrixXd& V, + const Mesh& m, + const std::vector& vtx_reindex); + +VectorX generate_FV_halfedge_data(const Eigen::MatrixXi& F_halfedge, const VectorX& halfedge_data); + +Eigen::MatrixXd generate_subset_vertices( + const Eigen::MatrixXd& V, + const std::vector& vertex_indices); + +void view_frame_field( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& frame_field, + const std::vector& Th_hat, + std::string mesh_handle=""); + +void view_rotation_form( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const VectorX& rotation_form, + const std::vector& Th_hat, + std::string mesh_handle="", + bool show=true); + +void view_mesh_quality( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + std::string mesh_handle="", + bool show=true); + +void view_mesh_topology( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + std::string mesh_handle="", + bool show=true); + +void view_parameterization( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& FT, + std::string mesh_handle="", + bool show=true); + +void view_layout( + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& FT, + std::string mesh_handle="", + bool show=true); + +void view_dual_graph( + const Eigen::MatrixXd& V, + const Mesh& m, + const std::vector& vtx_reindex, + const std::vector is_edge); + +void view_primal_graph( + const Eigen::MatrixXd& V, + const Mesh& m, + const std::vector& vtx_reindex, + const std::vector is_edge); + +void view_triangulation( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const std::vector& fn_to_f, + std::string mesh_handle="", + bool show=true); + +void view_constraint_error( + const MarkedPennerConeMetric& marked_metric, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + std::string mesh_handle="", + bool show=true); + +void view_quad_mesh( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + std::string mesh_handle="", + bool show=true); + +void view_vertex_function( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const std::vector& vertex_function, + std::string mesh_handle="", + bool show=true); +void view_vertex_function( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const VectorX& vertex_function, + std::string mesh_handle="", + bool show=true); + +void view_independent_vertex_function( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const VectorX& vertex_function, + std::string mesh_handle="", + bool show=true); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/holonomy/cones.h b/include/holonomy/holonomy/holonomy/cones.h new file mode 100644 index 0000000..5ca4d53 --- /dev/null +++ b/include/holonomy/holonomy/holonomy/cones.h @@ -0,0 +1,80 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/holonomy/marked_penner_cone_metric.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Compute the cones from a rotation form on an intrinsic mesh. + * + * @param m: mesh with metric + * @param rotation_form: per-halfedge rotation form + * @return per-vertex cones corresponding to the rotation form + */ +std::vector generate_cones_from_rotation_form( + const Mesh& m, + const VectorX& rotation_form); + +/** + * @brief Compute the cones from a rotation form on an extrinsic mesh with reindexed vertices. + * + * @param m: mesh with metric + * @param vtx_reindex: map from halfedge to VF vertex indices + * @param rotation_form: per-halfedge rotation form + * @param has_boundary: (optional) if true, treat mesh as a doubled mesh with boundary + * @return per-vertex cones corresponding to the rotation form + */ +std::vector generate_cones_from_rotation_form( + const Mesh& m, + const std::vector& vtx_reindex, + const VectorX& rotation_form, + bool has_boundary=false); + +/** + * @brief Determine if a mesh cone prescription corresponds to a trivial torus + * + * @param m: mesh with cone constraints + * @return true if the cones correspond to a trivial torus + * @return false otherwise + */ +bool is_trivial_torus(const Mesh& m); + +/** + * @brief Check if the cones are valid for seamless holonomy constraints + * + * Checks both for invalid cones that cannot be satisfied independently (i.e., a negative + * or zero cone) and for cones that cannot be satisfied with seamless holonomy constraints + * (i.e., a torus with a pair of cones). + * + * WARNING: Don't check for trivial torus constraints, but must be accounted for by removing + * holonomy constraints as the trivial torus only supports trivial topology. + * + * @param Th_hat: per-vertex cone angles + * @return true if the cones are valid for seamless holonomy constraints + * @return false otherwise + */ +bool validate_cones(const Mesh& m); + +/** + * @brief Given target cone angles, fix any problems that prevent them from being valid + * for seamless holonomy constraints. + * + * @param m: mesh with cone constraints + * @param min_cone_index: replace cones smaller index + */ +void fix_cones(Mesh& m, int min_cone_index=1); + +void add_random_cone_pair(Mesh& m, bool only_interior=false); + +std::tuple get_constraint_outliers( + MarkedPennerConeMetric& marked_metric, + bool use_interior_vertices=true, + bool use_flat_vertices=true); +std::tuple add_optimal_cone_pair(MarkedPennerConeMetric& marked_metric); + +void make_interior_free(Mesh& m); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/holonomy/constraint.h b/include/holonomy/holonomy/holonomy/constraint.h new file mode 100644 index 0000000..930d1b9 --- /dev/null +++ b/include/holonomy/holonomy/holonomy/constraint.h @@ -0,0 +1,104 @@ + +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/holonomy/marked_penner_cone_metric.h" + +namespace Penner { +namespace Holonomy { +/** + * @brief Compute vertex holonomy angles for a mesh with given angles + * + * @param[in] m: mesh topology + * @param[in] alpha: per-halfedge angles for the mesh + * @return vector of vertex holonomy angles + */ +VectorX Theta(const Mesh& m, const VectorX& alpha); + +/** + * @brief Compute dual loop holonomy angles for a mesh with given angles + * + * @param[in] m: mesh topology + * @param[in] homology_basis_loops: dual loops for holonomy angle computation + * @param[in] alpha: per-halfedge angles for the mesh + * @return vector of dual loop holonomy angles + */ +VectorX +Kappa(const Mesh& m, const std::vector>& homology_basis_loops, const VectorX& alpha); + +/** + * @brief Compute vertex cone holonomy constraints + * + * @param[in] marked_metric: marked mesh with metric + * @param[in] angles: per-corner angles of the metric + * @return vector of vertex constraint errors + */ +VectorX compute_vertex_constraint( + const MarkedPennerConeMetric& marked_metric, + const VectorX& angles); + +/** + * @brief Compute vertex and dual loop holonomy constraints + * + * @param[in] marked_metric: marked mesh with metric + * @param[in] angles: per-corner angles of the metric + * @param[in] only_free_vertices: (optional) only add constraints for free vertices if true + * @return vector of holonomy constraint errors + */ +VectorX compute_metric_constraint( + const MarkedPennerConeMetric& marked_metric, + const VectorX& angles, + bool only_free_vertices=true); + +/** + * @brief Compute the jacobian of vertex and dual loop holonomy constraints with respect + * to the metric coordinates + * + * @param[in] marked_metric: marked mesh with metric + * @param[in] cotangents: per-corner cotangent angles of the metric + * @param[in] only_free_vertices: (optional) only add constraints for free vertices if true + * @return holonomy constraint error jacobian matrix + */ +MatrixX compute_metric_constraint_jacobian( + const MarkedPennerConeMetric& marked_metric, + const VectorX& cotangents, + bool only_free_vertices=true); + +MatrixX compute_holonomy_matrix( + const Mesh& m, + const std::vector& v_map, + const std::vector>& dual_loops, + int num_vertex_forms); + +/** + * @brief Compute the holonomy constraints and the jacobian with respect to metric coordinates. + * + * @param[in] marked_metric: marked mesh with metric + * @param[out] constraint: vector of one form constraint errors + * @param[out] J_constraint: one form constraint error jacobian matrix + * @param[in] need_jacobian: (optional) only build jacobian if true + * @param[in] only_free_vertices: (optional) only add constraints for free vertices if true + */ +void compute_metric_constraint_with_jacobian( + const MarkedPennerConeMetric& similarity_metric, + VectorX& constraint, + MatrixX& J_constraint, + bool need_jacobian=true, + bool only_free_vertices=true); + + +void add_vertex_constraints( + const MarkedPennerConeMetric& marked_metric, + const std::vector v_map, + const VectorX& angles, + VectorX& constraint, + int offset = 0); + +void add_basis_loop_constraints( + const MarkedPennerConeMetric& marked_metric, + const VectorX& angles, + VectorX& constraint, + int offset = 0); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/holonomy/holonomy.h b/include/holonomy/holonomy/holonomy/holonomy.h new file mode 100644 index 0000000..d7ce7e5 --- /dev/null +++ b/include/holonomy/holonomy/holonomy/holonomy.h @@ -0,0 +1,49 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/holonomy/marked_penner_cone_metric.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Compute the holonomy angle with respect to a metric along a dual segment. + * + * @param m: Delaunay mesh + * @param he2angle: map from halfedges to opposing angle in a given metric + * @param dual_segment: dual segment in a face + * @return holonomy of the dual segment + */ +Scalar compute_dual_segment_holonomy( + const Mesh& m, + const VectorX& he2angle, + const DualSegment& dual_segment); + +/** + * @brief Compute the holonomy angle with respect to a metric along a dual loop. + * + * @param m: Delaunay mesh + * @param he2angle: map from halfedges to opposing angle in a given metric + * @param dual_loop: vector of dual segments specifying a dual loop + * @return holonomy of the dual loop + */ +Scalar compute_dual_loop_holonomy( + const Mesh& m, + const VectorX& he2angle, + const DualLoop& dual_loop); + +/** + * @brief Compute the rotation angle with respect to a rotation form along a dual loop. + * + * @param m: mesh + * @param rotation_form: map from halfedges to rotation across that edge + * @param dual_loop: vector of dual segments specifying a dual loop + * @return rotation along dual loop + */ +Scalar compute_dual_loop_rotation( + const Mesh& m, + const VectorX& rotation_form, + const DualLoop& dual_loop); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/holonomy/marked_penner_cone_metric.h b/include/holonomy/holonomy/holonomy/marked_penner_cone_metric.h new file mode 100644 index 0000000..73604b4 --- /dev/null +++ b/include/holonomy/holonomy/holonomy/marked_penner_cone_metric.h @@ -0,0 +1,169 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/core/dual_loop.h" +#include "holonomy/core/homology_basis.h" + +#include "optimization/core/cone_metric.h" + +namespace Penner { +namespace Holonomy { + +// TODO Refactor this and cone metric for more minimal inheritance + +/** + * @brief Check if a mesh is valid + * + * @param m: mesh to check + * @return true if the mesh is valid + * @return false otherwise + */ +bool is_valid_mesh(const Mesh& m); + +/** + * @brief Class to represent a mesh with a Penner metric and homology basis markings + */ +class MarkedPennerConeMetric : public Optimization::PennerConeMetric +{ +public: + // Additional constraints for homology loops + std::vector kappa_hat; + + // TODO move + VectorX original_coords; + + /** + * @brief Construct a new Marked Penner Cone Metric object with given metric coordinates + * and dual loop markings with holonomy constraints. + * + * @param m: mesh connectivity + * @param metric_coords: initial metric coordinates + * @param homology_basis_loops: homology basis loops for the surface + * @param kappa: holonomy constraints on the basis loops + */ + MarkedPennerConeMetric( + const Mesh& m, + const VectorX& metric_coords, + const std::vector>& homology_basis_loops, + const std::vector& kappa); + + MarkedPennerConeMetric(const MarkedPennerConeMetric& marked_metric); + void operator=(const MarkedPennerConeMetric& m); + + /** + * @brief Reset the connectivity and dual loops to that of another mesh of the same size. + * + * @param m: mesh with the same element counts as the current mesh + */ + void reset_marked_metric(const MarkedPennerConeMetric& m); + + /** + * @brief Change the metric of the given mesh given new coordinates on the original + * connectivity. + * + * The new metric is assumed to be defined on the same initial connectivity as the current + * metric but with potentially new metric coordinates. + * + * TODO: Move to base class + * + * @param m: mesh used to initialize the current mesh + * @param metric_coords: new metric coordinates + * @param need_jacobian: (optional) track change of metric jacobian if true + * @param do_repeat_flips: (optional) repeat flips to restore current connectivity if true + */ + void change_metric( + const MarkedPennerConeMetric& m, + const VectorX& metric_coords, + bool need_jacobian = true, + bool do_repeat_flips = false); + + /** + * @brief Get number of homology basis loops + * + * @return homology basis loop count + */ + int n_homology_basis_loops() const { return m_homology_basis_loops.size(); } + + /** + * @brief Get the homology basis loops + * + * @return const reference to the homology basis loops + */ + const std::vector>& get_homology_basis_loops() const + { + return m_homology_basis_loops; + } + + /////////////////////////// + // Virtual Method Overrides + /////////////////////////// + + /** + * @brief Clone the differentiable cone metric + * + * @return pointer to a copy of the cone metric + */ + virtual std::unique_ptr clone_cone_metric() const override + { + return std::make_unique(MarkedPennerConeMetric(*this)); + } + + /** + * @brief Clone the differentiable cone metric with new metric coordinates + * + * TODO: Make this non-virtual and implement with change metric + * + * @return pointer to a copy of the cone metric with new metric coordinates + */ + virtual std::unique_ptr set_metric_coordinates( + const VectorX& reduced_metric_coords) const override; + + // TODO: Remove scale conformally from cone metric interface + + // TODO Use full holonomy constraints + virtual bool constraint( + VectorX& constraint, + MatrixX& J_constraint, + bool need_jacobian, + bool only_free_vertices) const override; + + // TODO Use Newton + virtual std::unique_ptr project_to_constraint( + SolveStats& solve_stats, + std::shared_ptr proj_params = + nullptr) const override; + + // Flip method + virtual bool flip_ccw(int _h, bool Ptolemy = true) override; + + virtual VectorX constraint(const VectorX& angles); + + virtual MatrixX constraint_jacobian(const VectorX& cotangents); + + Scalar max_constraint_error() const; + + virtual std::unique_ptr clone_marked_metric() const + { + return std::make_unique(MarkedPennerConeMetric(*this)); + } + + virtual void write_status_log(std::ostream& stream, bool write_header=false); + +protected: + std::vector> m_homology_basis_loops; + DualLoopManager m_dual_loop_manager; + void reset_connectivity(const MarkedPennerConeMetric& m); + void reset_markings(const MarkedPennerConeMetric& m); +}; + + +void view_homology_basis( + const MarkedPennerConeMetric& marked_metric, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + int num_homology_basis_loops=-1, + std::string mesh_handle="", + bool show=true); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/holonomy/newton.h b/include/holonomy/holonomy/holonomy/newton.h new file mode 100644 index 0000000..9589f92 --- /dev/null +++ b/include/holonomy/holonomy/holonomy/newton.h @@ -0,0 +1,227 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "util/io.h" +#include "holonomy/holonomy/marked_penner_cone_metric.h" +#include + +#include + +namespace Penner { +namespace Holonomy { + +/** + * @brief Parameters for Newton holonomy optimization + * + */ +struct NewtonParameters +{ + std::string output_dir = ""; // directory name for genearting all stats + bool error_log = false; // when true: write out per-newton iterations stats + int checkpoint_frequency = 0; // when true: write out checkpoint state per n iterations + bool reset_lambda = true; // when true: start with lambda = lambda0 for each newton iteration; + // if false, start with lambda from the previous + bool do_reduction = + false; // when true: reduce step, if the components of descent direction vary too much + Scalar lambda0 = 1.0; // starting lambda value for the line search, normally 1 + Scalar error_eps = 0; // max angle error tolerance, terminate if below + int max_itr = 500; // upper bound for newton iterations + double max_time = 1e10; // upper bound for runtime (in seconds) before termination + Scalar min_lambda = 1e-16; // terminate if lambda drops below this threshold + int log_level = -1; // controlling detail of console logging + Scalar bound_norm_thres = 1e-8; // line step threshold to stop bounding the error norm + +#ifdef USE_SUITESPARSE + std::string solver = "cholmod"; // solver to use for pseudoinverse computation +#else + std::string solver = "ldlt"; // solver to use for pseudoinverse computation +#endif +}; + +/** + * @brief Per iteration data log for Newton optimization. + * + * This data is used for writing per iteration data to file, but can also be used for extracting + * final optimization status. + * + */ +struct NewtonLog +{ + int num_iter = 0; // iterations of Newton descent performed + Scalar max_error = 0.0; // maximum holonomy error (sup norm) + Scalar step_size = 0.0; // step size taken along the Newton descent direction + int num_flips = 0; // number of flips to make delaunay from initial connectivity + + Scalar l2_energy = 0.0; // l2 deviation from original metric coordinates + Scalar rmse = 0.0; // root-mean-square-error of metric coordinates + Scalar rrmse = 0.0; // relative-root-mean-square-error of metric coordinates + Scalar rmsre = 0.0; // root-mean-square-relative-error of metric coordinates + + double time = 0.0; // time since start of Newton optimization + double solve_time = + 0.0; // iteration time spent solving the linear system for the descent direction + double constraint_time = 0.0; // iteration time spent constructing the constraint and jacobian + double direction_time = + 0.0; // iteration time spent finding the descent direction (includes solve time) + double line_search_time = 0.0; // time spent in the line search along the Newton direction + + Scalar min_corner_angle = 0.0; // minimum angle at a corner + Scalar max_corner_angle = 0.0; // maximum angle at a corner + + Scalar direction_angle_change = 0.0; // angle between current and previous iteration descent direction + + Scalar direction_norm = 0.0; // norm of the Newton descent direction + Scalar direction_residual = 0.0; // residual ||Ax - b|| of the linear solve + + Scalar error_norm_sq; // TODO + Scalar proj_grad; // TODO +}; + +/** + * @brief Optime holonomy constraints at vertices and along dual loop markings on the marked metric. + * + * This optimization minimizes deviation of the computed holonomy from constraints, and is expected + * to produce solutions that satisfy the constraints up to near numerical precision. Constraints at + * vertices (satisfying Gauss-Bonnet) and along a full system of loops on the surface are sufficient + * to completely constrain the holonomy of any loop on the surface. + * + * @param initial_marked_metric: mesh with metric, dual loop markings, and holonomy constraint values + * @param alg_params: parameters for the optimization + * @return mesh with metric optimized to satisfy holonomy constraints + */ +MarkedPennerConeMetric optimize_metric_angles( + const MarkedPennerConeMetric& initial_marked_metric, + const NewtonParameters& alg_params); + +/** + * @brief Optime holonomy constraints at vertices and along dual loop markings on the marked metric using + * a subspace of the metric coordinate space. + * + * @param initial_marked_metric: mesh with metric, dual loop markings, and holonomy constraint values + * @param metric_basis_matrix: matrix with basis vectors for the metric coordinate space as columns + * @param alg_params: parameters for the optimization + * @return mesh with metric optimized to satisfy holonomy constraints + */ +MarkedPennerConeMetric optimize_subspace_metric_angles( + const MarkedPennerConeMetric& initial_marked_metric, + const MatrixX& metric_basis_matrix, + const NewtonParameters& alg_params); + +/** + * @brief Optime holonomy constraints at vertices and along dual loop markings on the marked metric using + * a subspace of the metric coordinate space with exposed log for analysis of the final optimization state. + * + * @param initial_marked_metric: mesh with metric, dual loop markings, and holonomy constraint values + * @param metric_basis_matrix: matrix with basis vectors for the metric coordinate space as columns + * @param alg_params: parameters for the optimization + * @param log: Newton iteration log + * @return mesh with metric optimized to satisfy holonomy constraints + */ +MarkedPennerConeMetric optimize_subspace_metric_angles_log( + const MarkedPennerConeMetric& initial_marked_metric, + const MatrixX& metric_basis_matrix, + const NewtonParameters& alg_params, + NewtonLog& log); + +/** + * @brief Add the state of the optimized metric to the viewer. + * + * @param marked_metric: mesh with initial metric + * @param marked_metric: mesh with metric after optimization + * @param vtx_reindex: map from halfedge to VF vertex indices + * @param V: input mesh vertices + * @param mesh_handle: (optional) handle for mesh in viewer + * @param show: (optional) show viewer if true + */ +void view_optimization_state( + const MarkedPennerConeMetric& init_marked_metric, + const MarkedPennerConeMetric& marked_metric, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + std::string mesh_handle="", + bool show=true); + +class OptimizeHolonomyNewton +{ +public: + MarkedPennerConeMetric run( + const MarkedPennerConeMetric& initial_marked_metric, + const MatrixX& metric_basis_matrix, + const NewtonParameters& input_alg_params); + + OptimizeHolonomyNewton() {} + + NewtonLog get_log() { return log; } + +protected: + // Metric data + VectorX reduced_metric_init; + VectorX reduced_metric_coords; + VectorX alpha; + VectorX cot_alpha; + + // Constraint and descent direction data + VectorX constraint; + MatrixX J; + VectorX descent_direction; + + // Previous descent direction data (for logging) + VectorX prev_descent_direction; + + // Algorithm data + Scalar lambda; + NewtonParameters alg_params; + + // Logging data + std::string checkpoint_dir; + std::ofstream log_file; + std::ofstream timing_file; + std::ofstream energy_file; + std::ofstream stability_file; + std::ofstream metric_status_file; + igl::Timer timer; + NewtonLog log; + std::unique_ptr l2_energy; + + void initialize_logging(); + void initialize_metric_status_log(MarkedPennerConeMetric& marked_metric); + + void initialize_data_log(); + void write_data_log_entry(); + + void initialize_timing_log(); + void write_timing_log_entry(); + + void initialize_energy_log(); + void write_energy_log_entry(); + + void initialize_stability_log(); + void write_stability_log_entry(); + + void initialize_logs(); + void write_log_entries(); + void close_logs(); + + void initialize_checkpoints(); + void checkpoint_direction(); + void checkpoint_metric(const MarkedPennerConeMetric& marked_metric); + + void update_log_error(const MarkedPennerConeMetric& marked_metric); + + void solve_linear_system(const MatrixX& metric_basis_matrix); + + void update_lambda(); + void update_holonomy_constraint(MarkedPennerConeMetric& marked_metric); + void update_descent_direction( + MarkedPennerConeMetric& marked_metric, + const MatrixX& metric_basis_matrix); + + void perform_line_search( + const MarkedPennerConeMetric& initial_marked_metric, + MarkedPennerConeMetric& marked_metric); + + bool is_converged(); +}; + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/holonomy/rotation_form.h b/include/holonomy/holonomy/holonomy/rotation_form.h new file mode 100644 index 0000000..39759ea --- /dev/null +++ b/include/holonomy/holonomy/holonomy/rotation_form.h @@ -0,0 +1,43 @@ +#pragma once + +#include "holonomy/core/common.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Parameters for cross field generation + * + */ +struct FieldParameters +{ + Scalar min_angle = 0.; // minimum allowed cone angle in the cross field +}; + +/** + * @brief Given a mesh with a per-face frame field, + * + * @param m: halfedge mesh + * @param vtx_reindex: map from halfedge to VF vertex indices + * @param V: mesh vertices + * @param F: mesh faces + * @param frame_field: |F|x3 frame field of per-face field direction vectors + * @return per-halfedge rotation form + */ +VectorX generate_rotation_form_from_cross_field( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& frame_field); + +VectorX generate_intrinsic_rotation_form(const Mesh& m, const FieldParameters& field_params); + +VectorX generate_intrinsic_rotation_form( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const FieldParameters& field_params); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/interface.h b/include/holonomy/holonomy/interface.h new file mode 100644 index 0000000..9963e84 --- /dev/null +++ b/include/holonomy/holonomy/interface.h @@ -0,0 +1,209 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/core/field.h" +#include "holonomy/holonomy/rotation_form.h" +#include "holonomy/holonomy/marked_penner_cone_metric.h" +#include "holonomy/similarity/similarity_penner_cone_metric.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Parameters for marked metric construction + * + */ +struct MarkedMetricParameters +{ + typedef HomotopyBasisGenerator::Weighting Weighting; + + bool use_initial_zero = false; // use initial zero Penner coordinates + bool use_log_length = false; // use initial log length coordinates instead of Penner + bool remove_loop_constraints = false; // don't set dual loop holonomy constraints if true + int max_loop_constraints = -1; // set maximum number of loop constraints if positive + int max_boundary_constraints = -1; // set maximum number of boundary constraints if positive + Weighting weighting = Weighting::minimal_homotopy; // weighting for tree-cotree + bool remove_symmetry = false; // remove symmetry structure from doubled mesh + bool free_interior = false; // remove interior cone constraints +}; + +/** + * @brief Generate a mesh with metric from a VF mesh and cones. + * + * @param V: mesh vertices + * @param F: mesh faces + * @param uv: mesh metric vertices + * @param F_uv: mesh metric faces + * @param Th_hat: per-vertex cone angles + * @param free_cones: (optional) list of cones to leave free + * @return mesh with metric + * @return vertex reindexing from the halfedge to VF vertices + */ +std::tuple, std::vector> +generate_mesh( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& F_uv, + const std::vector& Th_hat, + std::vector free_cones={}); + +/** + * @brief Generate a marked metric from a VF mesh, cones, and rotation form. + * + * @param V: mesh vertices + * @param F: mesh faces + * @param uv: mesh metric vertices + * @param F_uv: mesh metric faces + * @param Th_hat: per-vertex cone angles + * @param rotation_form: per-halfedge rotation values + * @param free_cones: list of cones to leave free + * @param marked_mesh_params: (optional) parameters for the marked mesh construction + * @return marked cone metric + * @return vertex reindexing from the halfedge to VF vertices + */ +std::tuple> generate_marked_metric( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& F_uv, + const std::vector& Th_hat, + const VectorX& rotation_form, + std::vector free_cones, + MarkedMetricParameters marked_mesh_params = MarkedMetricParameters()); + +/** + * @brief Generate a marked metric from a VF mesh using the embedding metric and holonomy + * constraints inferred from a fit cross-field. + * + * @param V: mesh vertices + * @param F: mesh faces + * @param use_intrinsic: (optional) if true, use intrinsic instead of extrinsic frame field + * @param marked_mesh_params: (optional) parameters for the marked mesh construction + * @return marked cone metric + * @return vertex reindexing from the halfedge to VF vertices + * @return per-halfedge rotation form + * @return per-vertex cone angles + */ +std::tuple, VectorX, std::vector> +infer_marked_metric( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + bool use_intrinsic=false, + MarkedMetricParameters marked_mesh_params = MarkedMetricParameters()); + +/** + * @brief Generate an intrinsic refined marked metric from a VF mesh using the embedding metric and + * holonomy constraints inferred from a fit cross-field on the refined mesh + * + * @param V: mesh vertices + * @param F: mesh faces + * @param min_angle: (optional) minimum angle allowed for the intrinsic refinement (may not converge + * above 30) + * @param marked_mesh_params: (optional) parameters for the marked mesh construction + * @return refined marked cone metric + * @return per-halfedge rotation form + * @return per-vertex cone angles + */ +std::tuple> generate_refined_marked_metric( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + double min_angle = 25., + MarkedMetricParameters marked_mesh_params = MarkedMetricParameters()); + +/** + * @brief Generate a marked metric from a halfedge mesh and rotation form. + * + * @param m: mesh with metric and cones + * @param rotation_form: per-halfedge rotation values + * @param marked_mesh_params: (optional) parameters for the marked mesh construction + * @return marked cone metric + */ +MarkedPennerConeMetric generate_marked_metric_from_mesh( + const Mesh& m, + const VectorX& rotation_form, + MarkedMetricParameters marked_mesh_params = MarkedMetricParameters()); + +/** + * @brief Generate a similarity metric from a VF mesh, cones, and rotation form. + * + * @param V: mesh vertices + * @param F: mesh faces + * @param uv: mesh metric vertices + * @param F_uv: mesh metric faces + * @param Th_hat: per-vertex cone angles + * @param rotation_form: per-halfedge rotation values + * @param free_cones: list of cones to leave free + * @param marked_mesh_params: (optional) parameters for the marked mesh construction + * @return similarity metric + * @return vertex reindexing from the halfedge to VF vertices + */ +std::tuple> generate_similarity_metric( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& F_uv, + const std::vector& Th_hat, + const VectorX& rotation_form, + std::vector free_cones, + MarkedMetricParameters marked_mesh_params = MarkedMetricParameters()); + +/** + * @brief Generate a similarity metric from a halfedge mesh and rotation form. + * + * @param m: mesh with metric and cones + * @param rotation_form: per-halfedge rotation values + * @param marked_mesh_params: (optional) parameters for the marked mesh construction + * @return similarity metric + */ +SimilarityPennerConeMetric generate_similarity_metric_from_mesh( + const Mesh& m, + const VectorX& rotation_form, + MarkedMetricParameters marked_mesh_params = MarkedMetricParameters()); + +/** + * @brief Regularize the metric to have bounded triangle quality. + * + * @param marked_metric: metric to regularize + * @param max_triangle_quality: (optional) maximum allowed triangle quality measure + */ +void regularize_metric(MarkedPennerConeMetric& marked_metric, double max_triangle_quality = 50); + +/** + * @brief Regularize the metric to have bounded triangle quality by using gradient descent. + * + * WARNING: Works poorly and distorts metric substantially. + * + * @param marked_metric: metric to regularize + * @param max_triangle_quality: (optional) maximum allowed triangle quality measure + */ +void optimize_triangle_quality(MarkedPennerConeMetric& marked_metric, double max_triangle_quality = 50); + +VectorX generate_penner_coordinates(const Mesh& m); + +void generate_basis_loops( + const Mesh& m, + std::vector>& basis_loops, + MarkedMetricParameters marked_metric_params); + +std::tuple +parameterize_components( + const MarkedPennerConeMetric& embedding_metric, + const MarkedPennerConeMetric& original_metric, + const MarkedPennerConeMetric& marked_metric, + const Eigen::MatrixXd& V_cut, + const std::vector& vtx_reindex +); + +std::vector extend_vtx_reindex( + const Mesh& m, + const std::vector& vtx_reindex +); + +std::tuple> generate_intrinsic_rotation_form( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const FieldParameters& field_params); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/similarity/conformal.h b/include/holonomy/holonomy/similarity/conformal.h new file mode 100644 index 0000000..2227a63 --- /dev/null +++ b/include/holonomy/holonomy/similarity/conformal.h @@ -0,0 +1,25 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/similarity/similarity_penner_cone_metric.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Compute a conformally equivalent similarity metric satisfying holonomy constraints. + * + * In order to satisfy dual loop constraints, this method computes a scaling one form rather than + * a scaling zero form that must be integrated first, potentially resulting in jumps. + * + * @param similarity_metric: similarity metric structure with holonomy constraints + * @param alg_params: global parameters for the algorithm + * @param ls_params: parameters for the line search + */ +void compute_conformal_similarity_metric( + SimilarityPennerConeMetric& similarity_metric, + const AlgorithmParameters& alg_params, + const LineSearchParameters& ls_params); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/similarity/constraint.h b/include/holonomy/holonomy/similarity/constraint.h new file mode 100644 index 0000000..a7e4173 --- /dev/null +++ b/include/holonomy/holonomy/similarity/constraint.h @@ -0,0 +1,52 @@ + +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/similarity/similarity_penner_cone_metric.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Compute vector of one form constraints. + * + * The constraints are vertex holonomy constraints, dual loop holonomy constraints, and + * closed form constraints. + * + * @param[in] similarity_metric: mesh with similarity metric + * @param[in] angles: per-corner angles of the metric + * @return vector of one form constraint errors + */ +VectorX compute_similarity_constraint( + const SimilarityPennerConeMetric& similarity_metric, + const VectorX& angles); + +/** + * @brief Compute jacobian of the similarity one form constraints with respect to one form + * edge values. + * + * @param[in] similarity_metric: mesh with similarity metric + * @param[in] cotangents: per-corner cotangent angles of the metric + * @return one form constraint error jacobian matrix + */ +MatrixX compute_similarity_constraint_jacobian( + const SimilarityPennerConeMetric& similarity_metric, + const VectorX& cotangents); + +/** + * @brief Compute the similarity one form constraints and the jacobian with respect to one form + * edge values. + * + * @param[in] similarity_metric: mesh with similarity metric + * @param[out] constraint: vector of one form constraint errors + * @param[out] J_constraint: one form constraint error jacobian matrix + * @param[in] need_jacobian: (optional) only build jacobian if true + */ +void compute_similarity_constraint_with_jacobian( + const SimilarityPennerConeMetric& similarity_metric, + VectorX& constraint, + MatrixX& J_constraint, + bool need_jacobian=true); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/similarity/energy.h b/include/holonomy/holonomy/similarity/energy.h new file mode 100644 index 0000000..c1c29f7 --- /dev/null +++ b/include/holonomy/holonomy/similarity/energy.h @@ -0,0 +1,145 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/similarity/similarity_penner_cone_metric.h" + +#include "optimization/metric_optimization/energy_functor.h" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Energy for the difference between halfedge lengths. For a metric, this energy + * is always zero, but it may be nontrivial for similarity structures. + * + */ +class JumpEnergy : public Optimization::EnergyFunctor +{ +public: + /** + * @brief Construct a new Jump Energy object for a given connectivity. + * + * @param m: mesh connectivity + */ + JumpEnergy(const Mesh& m); + +private: + std::vector m_opp; + + virtual Scalar energy(const VectorX& metric_coords) const override; + virtual VectorX gradient(const VectorX& metric_coords) const override; + virtual MatrixX hessian(const VectorX& metric_coords) const override; + virtual MatrixX hessian_inverse(const VectorX& metric_coords) const override; +}; + +/** + * @brief Squared two-norm energy for a given subset of coordinates. This can be used to + * represent a jump energy for a similarity structure by using the scaling form coordinates. + * + */ +class CoordinateEnergy : public Optimization::EnergyFunctor +{ +public: + /** + * @brief Construct a new Coordinate Energy object for a given target metric. + * + * @param target_cone_metric: target metric + * @param coordinate_indices: coordinate indices to use in the energy + */ + CoordinateEnergy( + const DifferentiableConeMetric& target_cone_metric, + std::vector coordinate_indices); + +private: + VectorX m_metric_target; + std::vector m_coordinate_indices; + + virtual Scalar energy(const VectorX& metric_coords) const override; + virtual VectorX gradient(const VectorX& metric_coords) const override; + virtual MatrixX hessian(const VectorX& metric_coords) const override; + virtual MatrixX hessian_inverse(const VectorX& metric_coords) const override; +}; + +/** + * @brief Squared two-norm energy for the Penner coordinates for a similarity structure after scaling + * by the integrated scaling one-form. This energy depends on the choice of cut for the integration + * of the scaling one-form. + * + */ +class IntegratedEnergy : public Optimization::EnergyFunctor +{ +public: + /** + * @brief Construct a new Integrated Energy object. + * + * @param target_similarity_metric: target similarity metric + */ + IntegratedEnergy(const SimilarityPennerConeMetric& target_similarity_metric); + +private: + SimilarityPennerConeMetric m_target_similarity_metric; + MatrixX m_scaling_matrix; + MatrixX m_expansion_matrix; + VectorX m_metric_target; + + // Precomputed matrix products + MatrixX Axx, Axy, Ayx, Ayy; + VectorX bx, by; + + virtual Scalar energy(const VectorX& metric_coords) const override; + virtual VectorX gradient(const VectorX& metric_coords) const override; + virtual MatrixX hessian(const VectorX& metric_coords) const override; + virtual MatrixX hessian_inverse(const VectorX& metric_coords) const override; +}; + +/** + * @brief Sum of per-face ratios of outradius to inradius. This is a rational energy. + * + * TODO: Replace with inverse of current energy to ensure well defined. + * + */ +class TriangleQualityEnergy : public Optimization::EnergyFunctor +{ +public: + /** + * @brief Construct a new Triangle Quality Energy object + * + * @param target_marked_metric: mesh connectivity + */ + TriangleQualityEnergy(const MarkedPennerConeMetric& target_marked_metric); + +private: + MarkedPennerConeMetric m_target_marked_metric; + + virtual Scalar energy(const VectorX& metric_coords) const override; + virtual VectorX gradient(const VectorX& metric_coords) const override; + virtual MatrixX hessian(const VectorX& metric_coords) const override; + virtual MatrixX hessian_inverse(const VectorX& metric_coords) const override; +}; + +/** + * @brief Logarithmic triangle quality measure taking the per-face sum of squared differences + * of log edge lengths (lij + ljk - 2lki). + * + */ +class LogTriangleQualityEnergy : public Optimization::EnergyFunctor +{ +public: + /** + * @brief Construct a new Log Triangle Quality Energy object + * + * @param target_marked_metric: mesh connectivity + */ + LogTriangleQualityEnergy(const MarkedPennerConeMetric& target_marked_metric); + +private: + MarkedPennerConeMetric m_target_marked_metric; + + virtual Scalar energy(const VectorX& metric_coords) const override; + virtual VectorX gradient(const VectorX& metric_coords) const override; + virtual MatrixX hessian(const VectorX& metric_coords) const override; + virtual MatrixX hessian_inverse(const VectorX& metric_coords) const override; +}; + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/similarity/layout.h b/include/holonomy/holonomy/similarity/layout.h new file mode 100644 index 0000000..db8e5f6 --- /dev/null +++ b/include/holonomy/holonomy/similarity/layout.h @@ -0,0 +1,54 @@ + +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/similarity/similarity_penner_cone_metric.h" +#include "optimization/parameterization/interpolation.h" + +#include "conformal_ideal_delaunay/OverlayMesh.hh" + +namespace Penner { +namespace Holonomy { + +/** + * @brief Generate a parameterization for a VF mesh with a similarity metric structure. + * + * The integrated scaled metric is used, so the parameterization may have different edge + * lengths across the parameterization cut. + * + * @param V: mesh vertices + * @param F: mesh faces + * @param Th_hat: per-vertex cone angles + * @param initial_similarity_metric: similarity metric structure for the mesh with the given cone angles + * @param cut_h: (optional) cut to disk for the mesh for the parameterization + * @return overlay mesh for the metric flipped to a Delaunay connectivity + * @return overlay mesh vertices + * @return overlay mesh faces + * @return overlay mesh uv vertices + * @return overlay mesh uv faces + * @return cut for the mesh + * @return cut for the overlay mesh + * @return map from overlay face indices to faces in the original mesh + * @return map from overlay vertices to endpoint vertices in the original mesh + */ +std:: + tuple< + OverlayMesh, // m_o + Eigen::MatrixXd, // V_o + Eigen::MatrixXi, // F_o + Eigen::MatrixXd, // uv_o + Eigen::MatrixXi, // FT_o + std::vector, // is_cut_h + std::vector, // is_cut_o + std::vector, // Fn_to_F + std::vector> // endpoints_o + > + generate_VF_mesh_from_similarity_metric( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const std::vector& Th_hat, + const SimilarityPennerConeMetric& initial_similarity_metric, + std::vector cut_h); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/include/holonomy/holonomy/similarity/similarity_penner_cone_metric.h b/include/holonomy/holonomy/similarity/similarity_penner_cone_metric.h new file mode 100644 index 0000000..641a262 --- /dev/null +++ b/include/holonomy/holonomy/similarity/similarity_penner_cone_metric.h @@ -0,0 +1,107 @@ +#pragma once + +#include "holonomy/core/common.h" +#include "holonomy/core/forms.h" +#include "holonomy/holonomy/marked_penner_cone_metric.h" + +namespace Penner { +namespace Holonomy { + +// TODO Refactor this and mesh class + +/** + * @brief Class to represent a mesh with a Penner similarity structure + */ +class SimilarityPennerConeMetric : public MarkedPennerConeMetric +{ +public: + SimilarityPennerConeMetric( + const Mesh& m, + const VectorX& metric_coords, + const std::vector>& homology_basis_loops, + const std::vector& kappa, + const VectorX& harmonic_form_coords); + + SimilarityPennerConeMetric( + const Mesh& m, + const VectorX& reduced_metric_coords, + const std::vector>& homology_basis_loops, + const std::vector& kappa); + + // Metric access methods + VectorX get_reduced_metric_coordinates() const override; + void get_corner_angles(VectorX& he2angle, VectorX& he2cot) const override; + + // Flip method + bool flip_ccw(int _h, bool Ptolemy = true) override; + + // Metric change methods + std::unique_ptr clone_cone_metric() const override + { + return std::make_unique(SimilarityPennerConeMetric(*this)); + } + std::unique_ptr set_metric_coordinates( + const VectorX& reduced_metric_coords) const override; + std::unique_ptr scale_conformally(const VectorX& u) const override; + + // Constraint methods + bool constraint( + VectorX& constraint, + MatrixX& J_constraint, + bool need_jacobian, + bool only_free_vertices) const override; + std::unique_ptr project_to_constraint( + SolveStats& solve_stats, + std::shared_ptr proj_params = + nullptr) const override; + + // Discrete metric methods + void make_discrete_metric() override; + + // One form getters and setters + void set_one_form(const VectorX& one_form) + { + assert(is_closed_one_form(*this, one_form)); + m_one_form = one_form; + } + void set_one_form_direction(const VectorX& one_form_direction) + { + assert(is_closed_one_form(*this, one_form_direction)); + m_one_form_direction = one_form_direction; + } + const VectorX& get_one_form() const { return m_one_form; } + const VectorX& get_one_form_direction() const { return m_one_form_direction; } + + std::tuple> get_integrated_metric_coordinates( + std::vector cut_h = {}) const; + + VectorX reduce_one_form(const VectorX& one_form) const; + + SimilarityPennerConeMetric scale_by_one_form() const; + + void make_delaunay(std::vector& flip_seq); + + void separate_coordinates( + const VectorX& reduced_metric_coords, + VectorX& metric_coords, + VectorX& harmonic_form_coords) const; + +private: + VectorX m_harmonic_form_coords; + VectorX m_one_form; + VectorX m_one_form_direction; +}; + +void similarity_corner_angles( + const SimilarityPennerConeMetric& similarity_metric, + VectorX& he2angle, + VectorX& he2cot); + +void MakeSimilarityDelaunay( + SimilarityPennerConeMetric& m, + DelaunayStats& delaunay_stats, + SolveStats& solve_stats, + bool Ptolemy = true); + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/core/area.hh b/include/optimization/optimization/core/area.h similarity index 88% rename from src/core/area.hh rename to include/optimization/optimization/core/area.h index cd273c3..73dd657 100644 --- a/src/core/area.hh +++ b/include/optimization/optimization/core/area.h @@ -30,10 +30,11 @@ *********************************************************************************/ #pragma once -#include "common.hh" -#include "cone_metric.hh" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Compute the squared area of the triangle with edge lengths li, lj, lk using /// the numerically stable version of Heron's formula. @@ -66,28 +67,29 @@ Scalar squared_area_length_derivative(Scalar variable_length, Scalar lj, Scalar /// Compute the squared area of the triangle containing each halfedge for the cone metric /// -/// @param[in] cone_metric: mesh with differentiable metric +/// @param[in] m: mesh with metric /// @return map from halfedges to the square of the area of the face containing it -VectorX squared_areas(const DifferentiableConeMetric& cone_metric); +VectorX squared_areas(const Mesh& m); /// Compute the area of the triangle containing each halfedge for the cone metric /// -/// @param[in] cone_metric: mesh with differentiable metric +/// @param[in] m: mesh with metric /// @param[out] he2area: map from halfedges to the area of the face containing it -VectorX areas(const DifferentiableConeMetric& cone_metric); +VectorX areas(const Mesh& m); /// Compute the derivatives of the squared area of the triangle containing each /// halfedge for the mesh with respect to the halfedge length coordinates. /// -/// @param[in] cone_metric: mesh with differentiable metric +/// @param[in] m: mesh with metric /// @return map from halfedges to the derivative of the square of the area of the face containing it -VectorX squared_area_length_derivatives(const DifferentiableConeMetric& cone_metric); +VectorX squared_area_length_derivatives(const Mesh& m); /// Compute the derivatives of the squared area of the triangle containing each /// halfedge for the mesh with respect to the halfedge log length coordinates. /// -/// @param[in] cone_metric: mesh with differentiable metric +/// @param[in] m: mesh with metric /// @return map from halfedges to the derivative of the square of the area of the face containing it -VectorX squared_area_log_length_derivatives(const DifferentiableConeMetric& cone_metric); +VectorX squared_area_log_length_derivatives(const Mesh& m); -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/common.hh b/include/optimization/optimization/core/common.h similarity index 71% rename from src/core/common.hh rename to include/optimization/optimization/core/common.h index 0864d41..095502e 100644 --- a/src/core/common.hh +++ b/include/optimization/optimization/core/common.h @@ -30,57 +30,17 @@ *********************************************************************************/ #pragma once -#include "globals.hh" +#include "util/common.h" +#include "optimization/core/globals.h" -#include -#include -#include #include #include #include #include -#include -#include -#include -#include -#include -#include "spdlog/sinks/basic_file_sink.h" -#include "spdlog/sinks/ostream_sink.h" -#include "spdlog/spdlog.h" - -namespace CurvatureMetric { - -/// Swap two doubles. -/// -/// @param[in, out] a: first double to swap -/// @param[in, out] b: second double to swap -inline void swap(double& a, double& b) -{ - std::swap(a, b); -} - -/// Get the max of two doubles. -/// -/// @param[in] a: first double to max -/// @param[in] b: second double to max -/// @return max of a and b -inline double max(const double& a, const double& b) -{ - return std::max(a, b); -} - -/// Check if two values are equal, up to a tolerance. -/// -/// @param[in] a: first value to compare -/// @param[in] b: second value to compare -/// @param[in] eps: tolerance for equality -/// @return true iff |a - b| < eps -inline bool float_equal(Scalar a, Scalar b, Scalar eps = 1e-10) -{ - return (abs(a - b) < eps); -} +namespace Penner { +namespace Optimization { // Check if two vectors are component-wise equal, up to a tolerance. // @@ -108,14 +68,6 @@ Scalar sup_norm(const VectorX& v); /// @return sup norm of the matrix Scalar matrix_sup_norm(const MatrixX& matrix); -/// Create a vector with values 0,1,...,n-1 -/// -/// @param[in] n: size of the output vector -/// @param[out] vec: output arangement vector -inline void arange(size_t n, std::vector& vec) -{ - vec.resize(n); - std::iota(vec.begin(), vec.end(), 0); -} -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/cone_metric.hh b/include/optimization/optimization/core/cone_metric.h similarity index 98% rename from src/core/cone_metric.hh rename to include/optimization/optimization/core/cone_metric.h index 0706c79..33a4a4c 100644 --- a/src/core/cone_metric.hh +++ b/include/optimization/optimization/core/cone_metric.h @@ -32,11 +32,12 @@ #include -#include "common.hh" -#include "embedding.hh" -#include "flip_matrix_generator.hh" +#include "util/embedding.h" +#include "optimization/core/common.h" +#include "optimization/core/flip_matrix_generator.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Representation of a differentiable intrinsic metric on a mesh with angle constraints at cones. /// @@ -354,4 +355,5 @@ class DiscreteMetric : public DifferentiableConeMetric void expand_metric_coordinates(const VectorX& metric_coords); }; -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/constraint.hh b/include/optimization/optimization/core/constraint.h similarity index 97% rename from src/core/constraint.hh rename to include/optimization/optimization/core/constraint.h index 9f2816e..0f17694 100644 --- a/src/core/constraint.hh +++ b/include/optimization/optimization/core/constraint.h @@ -30,10 +30,11 @@ *********************************************************************************/ #pragma once -#include "common.hh" -#include "cone_metric.hh" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Check the triangle inequality for every triangle in the mesh with respect to the /// halfedge metric coordinates @@ -106,4 +107,5 @@ Scalar compute_max_constraint(const DifferentiableConeMetric& cone_metric); /// TODO Optionally add halfedge coordinate Jacobians -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/flip_matrix_generator.hh b/include/optimization/optimization/core/flip_matrix_generator.h similarity index 97% rename from src/core/flip_matrix_generator.hh rename to include/optimization/optimization/core/flip_matrix_generator.h index 58001de..0c3f26a 100644 --- a/src/core/flip_matrix_generator.hh +++ b/include/optimization/optimization/core/flip_matrix_generator.h @@ -30,9 +30,10 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "optimization/core/common.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Class for incrementally building the flip change of coordinate matrix class FlipMatrixGenerator @@ -107,4 +108,5 @@ class FlipMapMatrixGenerator std::vector> m_list_of_lists; }; -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/globals.hh b/include/optimization/optimization/core/globals.h similarity index 90% rename from src/core/globals.hh rename to include/optimization/optimization/core/globals.h index 11b3bee..bf9020e 100644 --- a/src/core/globals.hh +++ b/include/optimization/optimization/core/globals.h @@ -30,26 +30,10 @@ *********************************************************************************/ #pragma once -#include -#include -#include "conformal_ideal_delaunay/OverlayMesh.hh" -#include "conformal_ideal_delaunay/globals.hh" +#include "util/common.h" -namespace CurvatureMetric { -using namespace OverlayProblem; - -#ifdef MULTIPRECISION -#include -#include "mpreal.h" -typedef mpfr::mpreal Scalar; -#else -typedef double Scalar; -#endif - -typedef Eigen::Matrix VectorX; -typedef Eigen::SparseMatrix MatrixX; -typedef Eigen::Triplet T; -const Scalar INF = 1e10; +namespace Penner { +namespace Optimization { /// Energies available for optimization enum class EnergyChoice { @@ -107,4 +91,6 @@ struct OptimizationParameters Scalar max_grad_range = 10; // maximum allowed gradient range (reduce if larger) Scalar max_angle = INF; // maximum allowed cone angle error (reduce if larger) }; -} // namespace CurvatureMetric + +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/projection.hh b/include/optimization/optimization/core/projection.h similarity index 97% rename from src/core/projection.hh rename to include/optimization/optimization/core/projection.h index 39b7917..8c85353 100644 --- a/src/core/projection.hh +++ b/include/optimization/optimization/core/projection.h @@ -30,10 +30,11 @@ *********************************************************************************/ #pragma once -#include "common.hh" -#include "cone_metric.hh" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Create matrix mapping vertex scale factors to their corresponding edges. /// @@ -117,4 +118,5 @@ VectorX project_descent_direction( const DifferentiableConeMetric& cone_metric, const VectorX& descent_direction); -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/reparametrization.hh b/include/optimization/optimization/core/reparametrization.h similarity index 96% rename from src/core/reparametrization.hh rename to include/optimization/optimization/core/reparametrization.h index dbe9489..5088201 100644 --- a/src/core/reparametrization.hh +++ b/include/optimization/optimization/core/reparametrization.h @@ -30,9 +30,10 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "optimization/core/common.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Reparametrize the barycentric coordinates for the equilateral triangle by /// translating a constant hyperbolic distance along each halfedge. If the sum @@ -61,4 +62,5 @@ void reparametrize_equilateral( #ifdef PYBIND #endif -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner diff --git a/src/core/shear.hh b/include/optimization/optimization/core/shear.h similarity index 96% rename from src/core/shear.hh rename to include/optimization/optimization/core/shear.h index 5891848..eb9248d 100644 --- a/src/core/shear.hh +++ b/include/optimization/optimization/core/shear.h @@ -30,10 +30,11 @@ *********************************************************************************/ #pragma once -#include "common.hh" -#include "cone_metric.hh" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Compute the per halfedge logarithmic shear values for the mesh m with /// logarithmic lengths lambdas_he @@ -106,4 +107,5 @@ void compute_shear_basis_coordinates( VectorX& shear_coords, VectorX& scale_factors); -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner diff --git a/src/penner_optimization_interface.hh b/include/optimization/optimization/interface.h similarity index 96% rename from src/penner_optimization_interface.hh rename to include/optimization/optimization/interface.h index de87af7..854d302 100644 --- a/src/penner_optimization_interface.hh +++ b/include/optimization/optimization/interface.h @@ -29,11 +29,12 @@ * * * *********************************************************************************/ #pragma once -#include "common.hh" -#include "cone_metric.hh" -#include "energy_functor.hh" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" +#include "optimization/metric_optimization/energy_functor.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Generate a mesh with initial target metric coordinates for optimization /// @@ -161,4 +162,5 @@ std:: const VectorX& reduced_log_edge_lengths, std::vector cut_h = {}); -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/convergence.hh b/include/optimization/optimization/metric_optimization/convergence.h similarity index 93% rename from src/metric_optimization/convergence.hh rename to include/optimization/optimization/metric_optimization/convergence.h index 09a657c..55112df 100644 --- a/src/metric_optimization/convergence.hh +++ b/include/optimization/optimization/metric_optimization/convergence.h @@ -30,15 +30,16 @@ *********************************************************************************/ #pragma once -#include "common.hh" -#include "cone_metric.hh" -#include "embedding.hh" -#include "energy_functor.hh" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" +#include "util/embedding.h" +#include "optimization/metric_optimization/energy_functor.h" /// @file Methods to analyze the convergence of a metric to a global minimum on the /// constraint surface. -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Given a metric and direction, compute the energy values for optimization /// at given (potentially negative) step sizes before and after the projection to the @@ -63,4 +64,5 @@ void compute_direction_energy_values( VectorX& projected_energies); -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/energies.hh b/include/optimization/optimization/metric_optimization/energies.h similarity index 98% rename from src/metric_optimization/energies.hh rename to include/optimization/optimization/metric_optimization/energies.h index a2b2c88..c4166a8 100644 --- a/src/metric_optimization/energies.hh +++ b/include/optimization/optimization/metric_optimization/energies.h @@ -30,17 +30,18 @@ *********************************************************************************/ #pragma once -#include "common.hh" -#include "cone_metric.hh" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" #include "conformal_ideal_delaunay/OverlayMesh.hh" -#include "embedding.hh" +#include "util/embedding.h" -/// \file energies.hh +/// \file energies.h /// /// Differentiable per face energy functions with gradients for the metric /// optimization for both halfedge and VF representations -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { // ****************** // Halfedge Functions @@ -316,4 +317,5 @@ VectorX second_invariant_vf_pybind( #endif -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/energy_functor.hh b/include/optimization/optimization/metric_optimization/energy_functor.h similarity index 97% rename from src/metric_optimization/energy_functor.hh rename to include/optimization/optimization/metric_optimization/energy_functor.h index cf0f9c6..084c939 100644 --- a/src/metric_optimization/energy_functor.hh +++ b/include/optimization/optimization/metric_optimization/energy_functor.h @@ -30,13 +30,14 @@ *********************************************************************************/ #pragma once -#include "common.hh" -#include "cone_metric.hh" +#include "util/embedding.h" +#include "util/linear_algebra.h" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" #include "conformal_ideal_delaunay/OverlayMesh.hh" -#include "embedding.hh" -#include "linear_algebra.hh" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Functor to compute a differentiable energy over a mesh with an intrinsic /// metric in terms of log edge or Penner coordinates. @@ -230,4 +231,5 @@ class RegularizedQuadraticEnergy : public EnergyFunctor // TODO: Cone energy -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/energy_weights.hh b/include/optimization/optimization/metric_optimization/energy_weights.h similarity index 82% rename from src/metric_optimization/energy_weights.hh rename to include/optimization/optimization/metric_optimization/energy_weights.h index ed366fb..0a87af4 100644 --- a/src/metric_optimization/energy_weights.hh +++ b/include/optimization/optimization/metric_optimization/energy_weights.h @@ -30,16 +30,17 @@ *********************************************************************************/ #pragma once -#include "common.hh" -#include "cone_metric.hh" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" #include "conformal_ideal_delaunay/OverlayMesh.hh" -#include "embedding.hh" +#include "util/embedding.h" -/// \file energy_functor.hh +/// \file energy_functor.h /// /// Methods to weight energies, e.g., per element energies by element area weights -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// @brief Given a vector of weights and a vector of values, compute the weighted 2 norm /// as the sum of the product of the weights and squared values @@ -52,17 +53,29 @@ Scalar compute_weighted_norm(const VectorX& weights, const VectorX& values); /// @brief Compute per edge weights for a mesh with a given metric as 1/3 of the areas /// of the two adjacent faces /// -/// @param[in] m: mesh -/// @param[in] log_edge_lengths: log edge length metric for the mesh +/// @param[in] cone_metric: mesh with differentiable metric /// @param[out] edge_area_weights: weights per edge VectorX compute_edge_area_weights(const DifferentiableConeMetric& cone_metric); +/// @brief Compute per vertex area weights for a mesh +/// +/// @param[in] m: mesh +/// @return weights per vertex +VectorX compute_vertex_area_weights(const Mesh& m); + +/// @brief Compute per independent vertex area weights for a mesh +/// +/// The weights are half the sum of identified vertex weights. +/// +/// @param[in] m: mesh +/// @return weights per independent vertex +VectorX compute_independent_vertex_area_weights(const Mesh& m); + /// @brief Compute per face area weights for a mesh /// /// @param[in] m: mesh -/// @param[in] log_edge_lengths: log edge length metric for the mesh -/// @param[out] face_area_weights: weights per face -VectorX compute_face_area_weights(const DifferentiableConeMetric& cone_metric); +/// @return weights per face +VectorX compute_face_area_weights(const Mesh& m); /// Compute a vector of weights for faces adjacent to the boundary. /// @@ -77,4 +90,5 @@ void compute_boundary_face_weights( std::vector& face_weights); -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/explicit_optimization.hh b/include/optimization/optimization/metric_optimization/explicit_optimization.h similarity index 97% rename from src/metric_optimization/explicit_optimization.hh rename to include/optimization/optimization/metric_optimization/explicit_optimization.h index 64e1b4a..8cdfdbc 100644 --- a/src/metric_optimization/explicit_optimization.hh +++ b/include/optimization/optimization/metric_optimization/explicit_optimization.h @@ -31,16 +31,17 @@ #pragma once #include -#include "common.hh" -#include "embedding.hh" -#include "energy_functor.hh" +#include "optimization/core/common.h" +#include "util/embedding.h" +#include "optimization/metric_optimization/energy_functor.h" /// @file Methods to optimize a metric satisfying angle constraints using an explicit /// representation of the constraint manifold as a graph over a linear subspace of the /// space of Penner coordinates. The domain of this space has |E| - |V| + d degrees of /// freedom, where d > 0 is the number of vertices with free angles in the mesh. -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// @brief Compute a maximal independent optimization domain for optimization from a mesh with /// a specified shear subspace basis. @@ -153,4 +154,5 @@ VectorX optimize_shear_basis_coordinates( std::shared_ptr proj_params, std::shared_ptr opt_params); -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/implicit_optimization.hh b/include/optimization/optimization/metric_optimization/implicit_optimization.h similarity index 95% rename from src/metric_optimization/implicit_optimization.hh rename to include/optimization/optimization/metric_optimization/implicit_optimization.h index 2b57daf..f688f25 100644 --- a/src/metric_optimization/implicit_optimization.hh +++ b/include/optimization/optimization/metric_optimization/implicit_optimization.h @@ -31,13 +31,14 @@ #pragma once #include -#include "common.hh" -#include "cone_metric.hh" +#include "optimization/core/common.h" +#include "optimization/core/cone_metric.h" #include "conformal_ideal_delaunay/OverlayMesh.hh" -#include "embedding.hh" -#include "energy_functor.hh" +#include "util/embedding.h" +#include "optimization/metric_optimization/energy_functor.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { // Log for implicit optimization iteration values struct OptimizationLog @@ -100,4 +101,5 @@ std::unique_ptr optimize_metric( std::shared_ptr proj_params = nullptr, std::shared_ptr opt_params = nullptr); -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/nonlinear_optimization.hh b/include/optimization/optimization/metric_optimization/nonlinear_optimization.h similarity index 97% rename from src/metric_optimization/nonlinear_optimization.hh rename to include/optimization/optimization/metric_optimization/nonlinear_optimization.h index af3c883..efc240f 100644 --- a/src/metric_optimization/nonlinear_optimization.hh +++ b/include/optimization/optimization/metric_optimization/nonlinear_optimization.h @@ -31,14 +31,15 @@ #pragma once #include -#include "common.hh" +#include "optimization/core/common.h" /// @file Methods to perform advanced nonlinear optimization, including conjugate gradient /// and L-BFGS-B, using the current gradient, previous gradient, and previous descent direction. /// These methods are intended to be general enough to apply to both implicit projected gradient /// descent and unconstrained gradient descent. -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Compute a nonlinear conjugate gradient descent direction from a given gradient and /// previous iteration data. Supported choices for the coefficient of "beta" are: @@ -88,4 +89,5 @@ void compute_lbfgs_direction( const VectorX& gradient, VectorX& descent_direction); -} // namespace CurvatureMetric \ No newline at end of file +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/parameterization/interpolation.hh b/include/optimization/optimization/parameterization/interpolation.h similarity index 99% rename from src/parameterization/interpolation.hh rename to include/optimization/optimization/parameterization/interpolation.h index d4d190d..08299aa 100644 --- a/src/parameterization/interpolation.hh +++ b/include/optimization/optimization/parameterization/interpolation.h @@ -30,10 +30,11 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "optimization/core/common.h" #include "conformal_ideal_delaunay/OverlayMesh.hh" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// A mesh structure to perform interpolation of points in a surface. /// @@ -296,4 +297,5 @@ void interpolate_vertex_positions( bool overlay_has_all_original_halfedges(OverlayMesh& mo); -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/parameterization/layout.hh b/include/optimization/optimization/parameterization/layout.h similarity index 98% rename from src/parameterization/layout.hh rename to include/optimization/optimization/parameterization/layout.h index 78cba24..d6b42ed 100644 --- a/src/parameterization/layout.hh +++ b/include/optimization/optimization/parameterization/layout.h @@ -30,10 +30,11 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "optimization/core/common.h" #include "conformal_ideal_delaunay/OverlayMesh.hh" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Generate an overlay mesh for the mesh m with given metric coordinates integrated /// as the mesh metric. @@ -176,4 +177,5 @@ std:: #endif -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/parameterization/refinement.hh b/include/optimization/optimization/parameterization/refinement.h similarity index 98% rename from src/parameterization/refinement.hh rename to include/optimization/optimization/parameterization/refinement.h index 95299a4..7e9625f 100644 --- a/src/parameterization/refinement.hh +++ b/include/optimization/optimization/parameterization/refinement.h @@ -29,14 +29,15 @@ * * * *********************************************************************************/ #include -#include "common.hh" +#include "optimization/core/common.h" -/// @file refinement.hh +/// @file refinement.h /// /// Methods to refine a triangulation with an accompanying overlay layout sufficiently /// to ensure the parametrization does not have inverted elements. -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// A class to represent a mesh that supports an overlay refinement scheme. class RefinementMesh @@ -285,4 +286,5 @@ class RefinementMesh bool is_valid_refinement_mesh() const; }; -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/parameterization/translation.hh b/include/optimization/optimization/parameterization/translation.h similarity index 96% rename from src/parameterization/translation.hh rename to include/optimization/optimization/parameterization/translation.h index 495792e..1b007ae 100644 --- a/src/parameterization/translation.hh +++ b/include/optimization/optimization/parameterization/translation.h @@ -30,9 +30,10 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "optimization/core/common.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Generate the least squares solution to the halfedge translations in the /// hyperbolic metric needed to satisfy the per halfedge shear change and face @@ -52,4 +53,5 @@ void compute_as_symmetric_as_possible_translations( VectorX& he_translations); // TODO: Add option to bypass and use zero translations or to solve in double precision -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner diff --git a/src/parameterization/triangulation.hh b/include/optimization/optimization/parameterization/triangulation.h similarity index 97% rename from src/parameterization/triangulation.hh rename to include/optimization/optimization/parameterization/triangulation.h index e30a75b..4324a2f 100644 --- a/src/parameterization/triangulation.hh +++ b/include/optimization/optimization/parameterization/triangulation.h @@ -28,13 +28,14 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "common.hh" +#include "optimization/core/common.h" /// @file refinement.hh /// /// Methods to determine if polygons are self-overlapping and triangulate them -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Given three vertices in the plane, compute the triangle area /// @@ -89,4 +90,5 @@ void triangulate_self_overlapping_polygon( std::vector>& faces); -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/util/shapes.hh b/include/optimization/optimization/util/shapes.h similarity index 96% rename from src/util/shapes.hh rename to include/optimization/optimization/util/shapes.h index 106a782..c1796d5 100644 --- a/src/util/shapes.hh +++ b/include/optimization/optimization/util/shapes.h @@ -30,7 +30,7 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "optimization/core/common.h" // Some good simple tests are simplex embeddings that are natural (one vertex // at the origin and others at unit vectors) with metrics that are uniform @@ -38,7 +38,9 @@ // embedded case has three symmetric edges adjacent to the origin and three // symmetric edges not adjacent to the origin. -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { + void map_to_sphere(size_t num_vertices, std::vector& Th_hat); @@ -85,4 +87,5 @@ std::tuple, // m > generate_double_triangle_mesh_pybind(); -} +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/util/validation.hh b/include/optimization/optimization/util/validation.h similarity index 96% rename from src/util/validation.hh rename to include/optimization/optimization/util/validation.h index 40fb51e..66e9189 100644 --- a/src/util/validation.hh +++ b/include/optimization/optimization/util/validation.h @@ -32,12 +32,13 @@ #pragma once #include #include -#include "common.hh" -#include "energy_functor.hh" -#include "constraint.hh" -#include "cone_metric.hh" +#include "optimization/core/common.h" +#include "optimization/metric_optimization/energy_functor.h" +#include "optimization/core/constraint.h" +#include "optimization/core/cone_metric.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { inline std::vector validate_hencky_strain_face(Scalar l1, Scalar l2, Scalar l3) @@ -183,4 +184,5 @@ validate_constraint( spdlog::error("Constraint error for norm {} vector is {}", h.norm(), error); } } -} +} // namespace Optimization +} // namespace Penner diff --git a/src/util/viewers.hh b/include/optimization/optimization/util/viewers.h similarity index 96% rename from src/util/viewers.hh rename to include/optimization/optimization/util/viewers.h index 42e6db2..4eab09a 100644 --- a/src/util/viewers.hh +++ b/include/optimization/optimization/util/viewers.h @@ -28,13 +28,15 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "common.hh" +#pragma once +#include "optimization/core/common.h" /// @file viewers.hh /// /// Some simple viewers to be used to analyze and debug the optimization pipeline. -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// View the triangles in the mesh with inverted elements. /// @@ -74,4 +76,5 @@ void view_parameterization( const Eigen::MatrixXd& uv, const Eigen::MatrixXi& FT); -} +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/util/visualization.hh b/include/optimization/optimization/util/visualization.h similarity index 96% rename from src/util/visualization.hh rename to include/optimization/optimization/util/visualization.h index aedb01c..c96ad42 100644 --- a/src/util/visualization.hh +++ b/include/optimization/optimization/util/visualization.h @@ -28,14 +28,14 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#ifndef VISUALIZATION_HH -#define VISUALIZATION_HH +#pragma once + #include #include #include -namespace CurvatureMetric -{ +namespace Penner { +namespace Optimization { typedef igl::opengl::glfw::Viewer Viewer; @@ -58,5 +58,6 @@ void save_mesh_screen_capture(Viewer &viewer, int width, int height); -} -#endif + +} // namespace Optimization +} // namespace Penner diff --git a/include/util/util/boundary.h b/include/util/util/boundary.h new file mode 100644 index 0000000..0c32339 --- /dev/null +++ b/include/util/util/boundary.h @@ -0,0 +1,71 @@ +#pragma once + +#include "util/common.h" +#include "util/vector.h" +#include "util/map.h" + +namespace Penner { + +/** + * @brief Circulate halfedge ccw to next halfedge on boundary + * + * @param m: mesh + * @param halfedge_index: starting halfedge + * @return next halfedge ccw around the base vertex of the halfedge + */ + +int circulate_ccw_to_boundary(const Mesh& m, int halfedge_index); + +/** + * @brief Generate a list of boundary halfedge indices in the primal copy of the mesh. + * + * @param m: mesh + * @return list of boundary halfedge indices + */ +std::vector find_primal_boundary_halfedges(const Mesh& m); + +/** + * @brief Generate a list of boundary vertex indices in a mesh. + * + * @param m: mesh + * @return list of boundary vertex indices + */ +std::vector find_boundary_vertices(const Mesh& m); + +/** + * @brief Generate a list of boundary vertex indices in the original VF mesh. + * + * @param m: mesh + * @param vtx_reindex: map from mesh to original vertex indices + * @return list of boundary vertex indices in the original mesh + */ +std::vector find_boundary_vertices( + const Mesh& m, + const std::vector& vtx_reindex); + +/** + * @brief Compute the boundary vertices of a mesh. + * + * @param m: mesh + * @return boolean mask of boundary vertices + */ +std::vector compute_boundary_vertices(const Mesh& m); + +/** + * @brief Generate a list of representative halfedges on each boundary component + * + * @param m: mesh + * @return list of primal halfedges on the boundaries + */ +std::vector find_boundary_components(const Mesh& m); + +/** + * @brief Generate a list of halfedges on the boundary from a given index + * + * @param m: mesh + * @param halfedge_index: starting halfedge of the boundary component + * @return list of primal halfedges on the boundary + */ +std::vector build_boundary_component(const Mesh& m, int halfedge_index); + +} // namespace Penner diff --git a/include/util/util/common.h b/include/util/util/common.h new file mode 100644 index 0000000..80ed73c --- /dev/null +++ b/include/util/util/common.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +#include "spdlog/spdlog.h" + +#include "conformal_ideal_delaunay/OverlayMesh.hh" +#include "conformal_ideal_delaunay/globals.hh" + +namespace Penner { +using namespace OverlayProblem; + +#ifdef MULTIPRECISION +#include +#include "mpreal.h" +typedef mpfr::mpreal Scalar; +#else +typedef double Scalar; +#endif + +typedef Eigen::Matrix VectorX; +typedef Eigen::SparseMatrix MatrixX; + +typedef Eigen::Matrix Vector2; +typedef Eigen::Matrix Vector3; +typedef Eigen::Matrix Matrix2x2; + +typedef Eigen::Triplet T; + + +using std::max; +using std::min; +using std::isnan; + +const Scalar INF = 1e10; + +/// Swap two doubles. +/// +/// @param[in, out] a: first double to swap +/// @param[in, out] b: second double to swap +inline void swap(double& a, double& b) +{ + std::swap(a, b); +} + +/// Get the max of two doubles. +/// +/// @param[in] a: first double to max +/// @param[in] b: second double to max +/// @return max of a and b +//inline double max(const double& a, const double& b) +//{ +// return std::max(a, b); +//} + +/// Check if two values are equal, up to a tolerance. +/// +/// @param[in] a: first value to compare +/// @param[in] b: second value to compare +/// @param[in] eps: tolerance for equality +/// @return true iff |a - b| < eps +inline bool float_equal(Scalar a, Scalar b, Scalar eps = 1e-10) +{ + return (abs(a - b) < eps); +} + +/// Create a vector with values 0,1,...,n-1 +/// +/// @param[in] n: size of the output vector +/// @param[out] vec: output arangement vector +inline void arange(size_t n, std::vector& vec) +{ + vec.resize(n); + std::iota(vec.begin(), vec.end(), 0); +} + +} // namespace Penner \ No newline at end of file diff --git a/src/core/embedding.hh b/include/util/util/embedding.h similarity index 99% rename from src/core/embedding.hh rename to include/util/util/embedding.h index 664aedf..dc612f6 100644 --- a/src/core/embedding.hh +++ b/include/util/util/embedding.h @@ -30,9 +30,9 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "util/common.h" -namespace CurvatureMetric { +namespace Penner { // Methods to expand symmetric functions defined on unique representative edges // embedded in a symmetric mesh to per edge and per halfedge functions on the @@ -224,4 +224,4 @@ bool is_valid_halfedge(const Mesh& m); /// @param[in] m: underlying mesh bool is_valid_symmetry(const Mesh& m); -} // namespace CurvatureMetric +} // namespace Penner diff --git a/src/core/io.hh b/include/util/util/io.h similarity index 89% rename from src/core/io.hh rename to include/util/util/io.h index ffa8790..a7a4599 100644 --- a/src/core/io.hh +++ b/include/util/util/io.h @@ -30,9 +30,14 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "util/common.h" -namespace CurvatureMetric { +#include "util/embedding.h" + +#include +#include + +namespace Penner { /// Join two filepaths. /// @@ -127,12 +132,23 @@ void create_log(const std::filesystem::path& log_dir, const std::string& log_nam /// @param[in] log_name: name of the log for global access void log_mesh_information(const Mesh& m, const std::string& log_name); -/// Write vector to file -/// -/// @param[in] vec: vector to write to file -/// @param[in] filename: file to write to -/// @param[in] precision: precision for output -void write_vector(const VectorX& vec, const std::string& filename, int precision = 17); +/** + * @brief Write a vector to file. + * + * @tparam Random access vector with size method + * @param v: vector to write + * @param output_filename: filename for writing + */ +template +void write_vector(const VectorType& v, const std::string& output_filename, int precision = 17) +{ + std::ofstream output_file(output_filename, std::ios::out | std::ios::trunc); + int n = v.size(); + for (int i = 0; i < n; ++i) { + output_file << std::setprecision(precision) << v[i] << std::endl; + } + output_file.close(); +} /// Write a matrix to file. /// @@ -150,4 +166,4 @@ void write_sparse_matrix( const std::string& filename, std::string format = "csv"); -} // namespace CurvatureMetric +} // namespace Penner diff --git a/src/core/linear_algebra.hh b/include/util/util/linear_algebra.h similarity index 84% rename from src/core/linear_algebra.hh rename to include/util/util/linear_algebra.h index 9516c65..0661b3c 100644 --- a/src/core/linear_algebra.hh +++ b/include/util/util/linear_algebra.h @@ -30,9 +30,9 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "util/common.h" -namespace CurvatureMetric { +namespace Penner { /// Compute the Kronecker product of two vectors. /// @@ -80,6 +80,15 @@ VectorX solve_psd_system(const MatrixX& A, const VectorX& b); /// @return solution x to Ax = b VectorX solve_linear_system(const MatrixX& A, const VectorX& b); +/** + * @brief Solve a linear system with matrix valued right hand side + * + * @param A: matrix to invert + * @param B: right hand side matrix + * @return solution to AX = B + */ +MatrixX solve_linear_system(const MatrixX& A, const MatrixX& B); + /// Given a matrix and lists of row and column indices, compute the /// corresponding submatrix /// @@ -93,4 +102,23 @@ void compute_submatrix( const std::vector& col_indices, MatrixX& submatrix); -} // namespace CurvatureMetric +/** + * @brief Compute a rotation matrix corresponding to a given angle + * + * @param theta: rotation angle + * @return rotation angle + */ +Matrix2x2 compute_rotation(Scalar theta); + +/** + * @brief Generate equally spaced values in an interval [a, b] + * + * @param a: starting value + * @param b: ending value + * @param num_steps: number of steps to use in the linspace + * @return a vector of equally spaced values between a and b + */ +std::vector generate_linspace(Scalar a, Scalar b, int num_steps); + + +} // namespace Penner diff --git a/include/util/util/map.h b/include/util/util/map.h new file mode 100644 index 0000000..f6bb40e --- /dev/null +++ b/include/util/util/map.h @@ -0,0 +1,300 @@ +#pragma once + +#include "util/common.h" + +#include + +namespace Penner { + +/** + * @brief Compute the max of a std vector. + * + * @param[in] v: vector + * @return max of v + */ +Scalar vector_max(const std::vector& v); + +/** + * @brief Negate every value of a vector of scalars + * + * @param[in] v: vector + * @return negation of v + */ +std::vector vector_negate(const std::vector& v); + +template +int argmax(const VectorType& v) +{ + int size = v.size(); + if (size == 0) return -1; + int max_index = 0; + for (int i = 1; i < size; ++i) + { + if (v[i] > v[max_index]) + { + max_index = i; + } + } + + return max_index; +} + +template +int argmin(const VectorType& v) +{ + int size = v.size(); + if (size == 0) return -1; + int min_index = 0; + for (int i = 1; i < size; ++i) + { + if (v[i] < v[min_index]) + { + min_index = i; + } + } + + return min_index; +} + +/** + * @brief Determine if a vector contains a NaN + * + * @param v: vector to check + * @return true if the vector contains a NaN + * @return false otherwise + */ +bool vector_contains_nan(const VectorX& v); + +/** + * @brief Compose two vectors + * + * @param[in] v: first vector + * @param[in] w: second vector + * @return composition i -> v[w[i]] + */ +template +VectorTypeOuter vector_compose(const VectorTypeOuter& v, const VectorTypeInner& w) +{ + int domain_size = w.size(); + VectorTypeOuter composition(domain_size); + for (int i = 0; i < domain_size; ++i) { + composition[i] = v[w[i]]; + } + + return composition; +} + +/** + * @brief Permute a vector by a permutation + * + * @param[in] v: vector to reindex + * @param[in] reindex: permutation + * @return composition i -> v[reindex[i]] + */ +template +VectorType vector_reindex(const VectorType& v, const IndexVectorType& reindex) +{ + int domain_size = reindex.size(); + VectorType composition(domain_size); + for (int i = 0; i < domain_size; ++i) { + composition[i] = v[reindex[i]]; + } + + return composition; +} + +/** + * @brief Permute a vector by the inverse of a permutation + * + * @param[in] v: vector to reindex + * @param[in] reindex: permutation to invert + * @return composition i -> v[reindex[i]] + */ +template +VectorType vector_inverse_reindex(const VectorType& v, const IndexVectorType& reindex) +{ + int domain_size = reindex.size(); + VectorType composition(domain_size); + for (int i = 0; i < domain_size; ++i) { + composition[reindex[i]] = v[i]; + } + + return composition; +} + +/** + * @brief Compute the range n of a map f: [0,...,m-1] -> [0,...,n-1] + * + * @param map: integer index map + * @return range of the map + */ +int compute_map_range(const std::vector& map); + +/** + * @brief Invert a map (using a left inverse for noninvertible maps) + * + * @param map: map to invert + * @return left inverse of the map + */ +std::vector invert_map(const std::vector& map); + +/** + * @brief Generate a random permutation + * + * @param n: size of the permutation + * @return permutation vector + */ +std::vector generate_permutation(int n); + +/** + * @brief Shuffle the image indices of a map + * + * @param map: integer index map + * @return shuffled index map + */ +std::vector shuffle_map_image(const std::vector& map); + +/** + * @brief Union a collection of n maps m_i: X_i -> Y_i into a single map m: X -> Y. + * + * We concatenate X = [X_1,...,X_n] and Y = [Y_1,...,Y_n], and we define m so that it maps + * X_i to Y_i as m_i with appropriate index offsets in both the domain and range. + * + * @tparam type of the vector used for the map + * @param maps: n maps to union + * @param range_sizes: n sizes of Y_1,...,Y_n + * @return union map + */ +template +VectorType union_maps(const std::vector& maps, const std::vector& range_sizes) +{ + // Precompute the total map size + int total_domain_size = 0; + for (const auto& map : maps) { + total_domain_size += map.size(); + } + VectorType total_map(total_domain_size); + + // combine maps + int num_maps = maps.size(); + int count = 0; + int offset = 0; + for (int i = 0; i < num_maps; ++i) { + const auto& map = maps[i]; + + // Add reindexed map entries to total map + for (const auto& map_val : map) { + total_map[count] = map_val + offset; + count++; + } + + // Increase offset + offset += range_sizes[i]; + } + + return total_map; +} + +/** + * @brief Union a collection of n vectors m_i: X_i -> F into a single vector m: X -> F. + * + * We concatenate X = [X_1,...,X_n] and take F as a fixed value field, and we define m so that it + * maps X_i to F as m_i with appropriate index offsets in the domain. + * + * @tparam type of the vector used for the map + * @param vectors: n vectors to union + * @return union vector + */ +template +VectorType union_vectors(const std::vector& vectors) +{ + // Precompute the total attribute domain size + int total_domain_size = 0; + for (const auto& vector : vectors) { + total_domain_size += vector.size(); + } + VectorType total_vector(total_domain_size); + + int count = 0; + for (const auto& vector : vectors) { + int vector_size = vector.size(); + total_vector.segment(count, vector_size) = vector; + count += vector_size; + } + + return total_vector; +} + +/** + * @brief Create a map from a set of a given size to a subset, with -1 for entries not in the subset. + * + * @param set_size: size of the ambient set + * @param subset_indices: indices of the subset in the set + * @return map from set indices to subset indices + */ +std::vector index_subset(size_t set_size, const std::vector& subset_indices); + +/** + * @brief Check if a map is invariant under some permutation and thus descends to a well-defined + * function on the orbits of the permutation + * + * @param map: map from {0,...,n-1} to {0,...,m-1} + * @param perm: permutation of n elements + * @return true iff the map is invariant under perm + */ +bool is_invariant_under_permutation(const std::vector& map, const std::vector& perm); + +/** + * @brief Check if two maps are one-sided inverses of each other, i.e., f(g(i)) = i + * + * The functions are allowed to have negative (denoting invalid) values; indices i where g(i) < 0 + * are skipped + * + * @param left_inverse: map f:{0,...,m-1}->Z + * @param right_inverse: map g:{0,...,n-1}->{0,...,m-1} + * @return true iff f composed with g is the identity where g is defined + */ +bool is_one_sided_inverse( + const std::vector& left_inverse, + const std::vector& right_inverse); + +/** + * @brief Check if the maps defining the edge connectivity of a polygonal mesh (next and prev) + * are valid + * + * @param next: size #he vector, next halfedge id + * @param prev: size #he vector, prev halfedge id + * @return true iff the edge maps are valid + */ +bool are_polygon_mesh_edges_valid(const std::vector& next, const std::vector& prev); + +/** + * @brief Check if the maps defining the vertex connectivity of a polygonal mesh (to and out) + * are valid + * + * @param prev: size #he vector, prev halfedge id + * @param to: size #he vector, halfedge vertex tip id + * @param out: size #v vector, arbitrary halfedge id outgoing from vertex + * @return true iff the vertex maps are valid + */ +bool are_polygon_mesh_vertices_valid( + const std::vector& opp, + const std::vector& prev, + const std::vector& to, + const std::vector& out); + +/** + * @brief Check if the maps defining the face connectivity of a polygonal mesh (he2f and f2he) + * are valid + * + * @param next: size #he vector, next halfedge id + * @param he2f: size #he vector, face id adjacent to halfedge + * @param f2he: size #f vector, arbitrary halfedge id adjacent to face + * @return true iff the face maps are valid + */ +bool are_polygon_mesh_faces_valid( + const std::vector& next, + const std::vector& he2f, + const std::vector& f2he); + +} // namespace Penner diff --git a/include/util/util/spanning_tree.h b/include/util/util/spanning_tree.h new file mode 100644 index 0000000..2168da6 --- /dev/null +++ b/include/util/util/spanning_tree.h @@ -0,0 +1,341 @@ +#pragma once + +#include "util/common.h" + +#include "util/embedding.h" +#include "util/map.h" + +namespace Penner { + +/** + * @brief Base representation for a forest (primal or dual) on a mesh + * + * Supports queries to get parents of edges and vertices, determine if a vertex is + * a root, and determine if an edge in the mesh is in the forest + * + */ +class Forest +{ +public: + Forest() {} + + /** + * @brief Get the number of edges in the forest + * + * @return number of edges + */ + int n_edges() const { return m_edges.size(); } + + /** + * @brief Get the number of vertices in the forest + * + * @return number of vertices + */ + int n_vertices() const { return m_out.size(); } + + /** + * @brief Get the mesh edge corresponding to an edge in the forest + * + * @param index: index of an edge in the forest + * @return index of the edge in the mesh + */ + int edge(int index) const + { + assert(is_valid_index(index)); + return m_edges[index]; + } + + /** + * @brief Get the parent vertex of an edge in the rooted forest + * + * @param index: index of an edge in the forest + * @return index of the parent vertex + */ + int to(int index) const + { + assert(is_valid_index(index)); + return m_to[index]; + } + + /** + * @brief Get the child vertex of an edge in the rooted forest + * + * @param index: index of an edge in the forest + * @return index of the child vertex + */ + int from(int index) const + { + assert(is_valid_index(index)); + return m_from[index]; + } + + /** + * @brief Get the parent edge of a vertex in the rooted forest + * + * @param vertex_index: index of a vertex in the forest + * @return index of the parent edge + */ + int out(int vertex_index) const + { + assert(is_valid_vertex_index(vertex_index)); + return m_out[vertex_index]; + } + + /** + * @brief Determine if a vertex is a root of the forest + * + * @param vertex_index: index of a vertex in the forest + * @return true if the vertex is a root + * @return false otherwise + */ + bool is_root(int vertex_index) const + { + assert(is_valid_vertex_index(vertex_index)); + return (m_out[vertex_index] < 0); + } + + /** + * @brief Determine if a mesh edge is in the forest + * + * @param edge_index: index of an edge in the mesh + * @return true if the edge is in the forest + * @return false otherwise + */ + bool is_edge_in_forest(int edge_index) const + { + assert(is_valid_edge_index(edge_index)); + return m_edge_is_in_forest[edge_index]; + } + +protected: + std::vector m_edges; + std::vector m_edge_is_in_forest; + + // Edge to vertex maps + std::vector m_to; + std::vector m_from; + + // Vertex to edge maps + std::vector m_out; + + // Index validation + int num_indices() const { return m_edges.size(); } + int num_edges() const { return m_edge_is_in_forest.size(); } + int num_vertices() const { return m_out.size(); } + bool is_valid_index(int index) const { + return ((index >= 0) && (index < num_indices())); + } + bool is_valid_vertex_index(int vertex_index) const + { + return ((vertex_index >= 0) && (vertex_index < num_vertices())); + } + bool is_valid_edge_index(int edge_index) const + { + return ((edge_index >= 0) && (edge_index < num_edges())); + } + + // Forest validation + bool is_valid_forest(const Mesh& m) const; +}; + +/** + * @brief Representation for a primal minimal spanning tree (on vertices and edges) on a mesh + * + */ +class PrimalTree : public Forest +{ +public: + /** + * @brief Construct an empty Primal Tree object + * + */ + PrimalTree() {} + + /** + * @brief Construct a new Primal Tree object on a mesh + * + * @param m: underlying mesh + * @param weights: per-halfedge weights (assumes halfedge pairs have same weight) + * @param root: (optional) starting vertex root for the tree construction + * @param use_shortest_path: (optional) use shortest path tree instead of minimal weight + * tree + */ + PrimalTree( + const Mesh& m, + const std::vector& weights, + int root = 0, + bool use_shortest_path = false); + + /** + * @brief Determine if a mesh edge is in the tree + * + * @param edge_index: index of an edge in the mesh + * @return true if the edge is in the tree + * @return false otherwise + */ + bool is_edge_in_tree(int edge_index) const + { + return is_edge_in_forest(edge_index); + } + +protected: + void initialize_primal_tree( + const Mesh& m, + const std::vector& vertex_from_halfedge); + bool is_valid_primal_tree(const Mesh& m) const; +}; + +/** + * @brief Representation for a dual minimal spanning tree (on faces and edges) on a mesh + * + */ +class DualTree : public Forest +{ +public: + /** + * @brief Construct an empty Dual Tree object + * + */ + DualTree() {} + + /** + * @brief Construct a new Dual Tree object on a mesh + * + * @param m: underlying mesh + * @param weights: per-halfedge weights (assumes halfedge pairs have same weight) + * @param root: (optional) starting face root for the tree construction + * @param use_shortest_path: (optional) use shortest path tree instead of minimal weight + * tree + */ + DualTree( + const Mesh& m, + const std::vector& weights, + int root = 0, + bool use_shortest_path = false); + + /** + * @brief Determine if a mesh edge is in the tree + * + * @param edge_index: index of an edge in the mesh + * @return true if the edge is in the tree + * @return false otherwise + */ + bool is_edge_in_tree(int edge_index) const + { + return is_edge_in_forest(edge_index); + } + +protected: + void initialize_dual_tree(const Mesh& m, const std::vector& face_from_halfedge); + bool is_valid_dual_tree(const Mesh& m) const; +}; + +/** + * @brief Representation for a primal minimal spanning cotree on a mesh that is disjoint from a + * given dual tree + * + */ +class PrimalCotree : public PrimalTree +{ +public: + /** + * @brief Construct an empty Primal Cotree object + * + */ + PrimalCotree() {} + + /** + * @brief Construct a new Primal Cotree object on a mesh disjoint from the given dual tree + * + * @param m: underlying mesh + * @param weights: per-halfedge weights (assumes halfedge pairs have same weight) + * @param dual_tree: dual tree on the mesh to keep disjoint + * @param root: (optional) starting vertex root for the tree construction + * @param use_shortest_path: (optional) use shortest path tree instead of minimal weight + * tree + */ + PrimalCotree( + const Mesh& m, + const std::vector& weights, + const DualTree& dual_tree, + int root = 0, + bool use_shortest_path = false); + +private: + bool is_valid_primal_cotree(const Mesh& m, const DualTree& dual_tree) const; +}; + +/** + * @brief Representation for a dual minimal spanning cotree on a mesh that is disjoint from a + * given primal tree + * + */ +class DualCotree : public DualTree +{ +public: + /** + * @brief Construct an empty Dual Cotree object + * + */ + DualCotree() {} + + /** + * @brief Construct a new Dual Cotree object on a mesh disjoint from the given primal tree + * + * @param m: underlying mesh + * @param weights: per-halfedge weights (assumes halfedge pairs have same weight) + * @param primal_tree: primal tree on the mesh to keep disjoint + * @param root: (optional) starting face root for the tree construction + * @param use_shortest_path: (optional) use shortest path tree instead of minimal weight + * tree + */ + DualCotree( + const Mesh& m, + const std::vector& weights, + const PrimalTree& primal_tree, + int root = 0, + bool use_shortest_path = false); + +private: + bool is_valid_dual_cotree(const Mesh& m, const PrimalTree& primal_tree) const; +}; + +/** + * @brief Build a minimal spanning forest on a mesh with respect to given edge weights + * and cut along certain edges. + * + * @param m: underlying mesh + * @param weights: per-halfedge weights (assumes halfedge pairs have same weight) + * @param is_cut: mask for cut mesh edges + * @param v_start: starting root for the forest construction + * @param use_shortest_path: use shortest path forest instead of minimal weight forest + * @return map from vertices vj to the halfedge hij connecting them to its parent vi in the forest + */ +std::vector build_primal_forest( + const Mesh& m, + const std::vector& weights, + const std::vector& is_cut, + int v_start = 0, + bool use_shortest_path = false); + +/** + * @brief Build a minimal dual spanning forest on a mesh with respect to given edge weights + * and cut along certain dual edges. + * + * @param m: underlying mesh + * @param weights: per-halfedge weights (assumes halfedge pairs have same weight) + * @param is_cut: mask for cut mesh dual edges + * @param f_start: starting root for the forest construction + * @param use_shortest_path: use shortest path forest instead of minimal weight forest + * @return map from faces fj to the halfedge (adjacent to fi) connecting it to its parent fi in the + * forest + */ +std::vector build_dual_forest( + const Mesh& m, + const std::vector& weights, + const std::vector& is_cut, + int f_start = 0, + bool use_shortest_path = false); + + +} // namespace Penner \ No newline at end of file diff --git a/src/core/vector.hh b/include/util/util/vector.h similarity index 96% rename from src/core/vector.hh rename to include/util/util/vector.h index 5e18a41..9edc233 100644 --- a/src/core/vector.hh +++ b/include/util/util/vector.h @@ -30,9 +30,22 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "util/common.h" -namespace CurvatureMetric { +namespace Penner { + +/** + * @brief Convert an Eigen vector to a std vector + * + * @tparam Type of the vector data + * @param v: Eigen vector + * @return std vector + */ +template +std::vector convert_vector(const Eigen::Matrix& v) +{ + return std::vector(v.data(), v.data() + v.size()); +} /// Convert standard template library vector to an Eigen vector. /// @@ -256,4 +269,4 @@ void enumerate_boolean_array( std::vector& false_entry_list, std::vector& array_to_list_map); -} // namespace CurvatureMetric +} // namespace Penner diff --git a/src/core/vf_mesh.hh b/include/util/util/vf_mesh.h similarity index 74% rename from src/core/vf_mesh.hh rename to include/util/util/vf_mesh.h index 01d2dea..b828463 100644 --- a/src/core/vf_mesh.hh +++ b/include/util/util/vf_mesh.h @@ -30,9 +30,9 @@ *********************************************************************************/ #pragma once -#include "common.hh" +#include "util/common.h" -namespace CurvatureMetric { +namespace Penner { /// Compute the number of connected components of a mesh /// @@ -55,6 +55,18 @@ void remove_unreferenced( Eigen::MatrixXi& FN, std::vector& new_to_old_map); +/// Given a mesh with a parametrization, generate the 3D seam polylines +/// +/// @param[in] V: mesh vertices +/// @param[in] F: mesh faces +/// @param[in] FT: parametrization faces +/// @return seam vertices +/// @return seam edges +std::tuple generate_seams( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXi& FT); + /// Given a mesh with a parametrization, cut the mesh along the parametrization seams to /// create a vertex set corresponding to the faces of the uv domain. /// @@ -70,4 +82,31 @@ void cut_mesh_along_parametrization_seams( const Eigen::MatrixXi& FT, Eigen::MatrixXd& V_cut); -} // namespace CurvatureMetric +/** + * @brief Generate a list of boundary vertex indices in a VF mesh + * + * @param F: mesh faces + * @return list of boundary vertex indices + */ +std::vector find_boundary_vertices(const Eigen::MatrixXi& F, int num_vertices); + +/** + * @brief Compute the boundary vertices of a VF mesh. + * + * @param F: mesh faces + * @return boolean mask of boundary vertices + */ +std::vector compute_boundary_vertices(const Eigen::MatrixXi& F, int num_vertices); + +/** + * @brief Inflate a mesh by displacing vertices along the mesh surface normal. + * + * @param V: input mesh vertices + * @param F: mesh faces + * @param inflation_distance: (optional) distance to displace vertices + * @return inflated mesh vertices + */ +Eigen::MatrixXd +inflate_mesh(const Eigen::MatrixXd& V, const Eigen::MatrixXi& F, double inflation_distance = 1e-8); + +} // namespace Penner diff --git a/py/render.py b/py/render.py new file mode 100644 index 0000000..146aa6c --- /dev/null +++ b/py/render.py @@ -0,0 +1,297 @@ +import igl +import numpy as np +from conformal_impl.overload_math import * +from tqdm import trange +import optimization_py as opt +from matplotlib import cm, colors +import optimize_impl.energies as energies +import os + + +def count_valence(n, opp, h0, is_cut): +# Get the number of cut-edges touching to[h0] + hi = opp[n[n[h0]]] + if is_cut[h0]: + valence = 1 + else: + valence = 0 + while hi != h0: + if is_cut[hi]: + valence += 1 + hi = opp[n[n[hi]]] + return valence + +def trim_cuts(n, opp, to, cones, is_cut_h): +# Given tree-like cutgraph, try to remove any degree-1 vertices that's not a cone + any_trimmed = True + while any_trimmed: # repeatedly trim degree-1 cuts + any_trimmed = False + for hi in range(len(opp)): + v0 = to[hi]; v1 = to[opp[hi]] + valence0 = count_valence(n, opp, opp[hi], is_cut_h) + valence1 = count_valence(n, opp, hi, is_cut_h) + if is_cut_h[hi] and ((valence0 == 1 and v0 not in cones) or (valence1 == 1 and v1 not in cones)): + is_cut_h[hi] = False + is_cut_h[opp[hi]] = False + any_trimmed = True + +def add_cut_to_sin(n, opp, to, cones, edge_labels, is_cut_h, reindex, is_mesh_doubled): + trim_cuts(n, opp, to, cones, is_cut_h) + + cut_to_sin_list = []; + cnt_cut = 0 + + for he in range(len(is_cut_h)): + if (is_cut_h[he] == True) and (not is_mesh_doubled or (is_mesh_doubled and edge_labels[he] == '\x01')): + vid_from = to[opp[he]] + vid_to = to[he] + cut_to_sin_list.append([reindex[vid_from], reindex[vid_to]]) + cnt_cut += 1 + return cut_to_sin_list + +def add_shading(color_rgb, v3d, f, fid_mat_input, bc_mat_input, view, proj, flat_shading=False): + #compute normals (per face) + normals = igl.per_face_normals(v3d,f,np.array([1.0,1.0,1.0])) + pv_normals = igl.per_vertex_normals(v3d, f) + pv_normals4 = np.zeros((pv_normals.shape[0], 4)) + pv_normals4[:pv_normals.shape[0],:3] = pv_normals + normals4 = np.zeros((normals.shape[0],4)) + normals4[:normals.shape[0],:3] = normals + + ao = igl.ambient_occlusion(v3d, f, v3d, pv_normals,500) + + # normal transformation matrix + norm_trans = np.linalg.inv(view).transpose() + + light_eye = np.array([0.0, 0.3, 0.0]) + (H, W, _) = color_rgb.shape + for i in trange(H): + for j in range(W): + fid = fid_mat_input[i][j] + bc = bc_mat_input[i][j] + if fid > -1: + diff = color_rgb[i,j,:3] + amb = 0.2 * diff + spec = 0.3 + 0.1 * (diff - 0.3) + ao_factor = ao[f[fid, 0]] * bc[0] + ao[f[fid, 1]] * bc[1] + ao[f[fid, 2]] * bc[2] + + pos = v3d[f[fid, 0]] * bc[0] + v3d[f[fid, 1]] * bc[1] + v3d[f[fid, 2]] * bc[2] + pos4 = np.ones(4); pos4[0:3] = pos + pos_eye = np.dot(view, pos4)[0:3] + if flat_shading: + norm4 = normals4[fid] + else: + norm4 = pv_normals4[f[fid, 0]] * bc[0] + pv_normals4[f[fid, 1]] * bc[1] + pv_normals4[f[fid, 2]] * bc[2] + norm_eye = np.dot(norm_trans, norm4)[0:3] + norm_eye = norm_eye / np.linalg.norm(norm_eye) + + # diff color + vec_to_light_eye = light_eye - pos_eye + dir_to_light_eye = vec_to_light_eye / np.linalg.norm(vec_to_light_eye) + clamped_dot_prod = max(np.dot(dir_to_light_eye, norm_eye), 0) + color_diff = clamped_dot_prod * diff + + # spec color + proj_to_norm = np.dot(-dir_to_light_eye, norm_eye) * norm_eye + refl_eye = proj_to_norm - (-dir_to_light_eye - proj_to_norm) + surf_to_view_eye = - pos_eye / np.linalg.norm(pos_eye) + clamped_dot_prod = max(0, np.dot(refl_eye, surf_to_view_eye)) + spec_factor = pow(clamped_dot_prod, 90) + color_spec = spec_factor * spec + + color_new = amb + 1.2 * color_diff + color_spec + for k in range(3): + color_new[k] = max(0, min(1, color_new[k])) + color_rgb[i,j,:3] = color_new * (0.5 + (1-ao_factor)*0.5) + return color_rgb + + +def color_mesh_with_grid(fid_mat, bc_mat, h, n, to, u, v, r, H, W, colormap, norm, N_bw = 15, thick = 0.1, uv_scale=0): + # Get uv units + if (uv_scale == 0): + uv_scale = max((np.max(u) - np.min(u)), (np.max(v) - np.min(v))) + u = u/uv_scale + v = v/uv_scale + u_min = float(np.nanmin(u)) + u_max = float(np.nanmax(u)) + v_min = float(np.nanmin(v)) + v_max = float(np.nanmax(v)) + u_unit = 1 / N_bw + v_unit = u_unit + u_thick = thick * u_unit + v_thick = thick * v_unit + + # Generate color grid + color_rgb_gd = np.zeros((H, W, 4)) + for i in trange(H): + for j in range(W): + if fid_mat[i][j] > -1: + # Get grid point information + fid = fid_mat[i][j] + bc = bc_mat[i][j] + e0 = h[fid] + e1 = n[e0] + e2 = n[e1] + + # Interpolate color + r0 = float(r[to[e0]]) + r1 = float(r[to[e1]]) + r2 = float(r[to[e2]]) + r_pt = float(r0 * bc[1] + r1 * bc[2] + r2 * bc[0]) + + # Interpolate uv coordinate + u_pt = float(u[e0]) * bc[1] + float(u[e1]) * bc[2] + float(u[e2]) * bc[0] + v_pt = float(v[e0]) * bc[1] + float(v[e1]) * bc[2] + float(v[e2]) * bc[0] + + # Color according to r in interior of grid cells + if u_thick < ((u_pt - u_min)%u_unit) <= (u_unit - u_thick) and v_thick < ((v_pt - v_min)%v_unit) <= (v_unit-v_thick): + color_rgb_gd[i,j,:] = np.array(colormap(norm(r_pt))[:4]) + # Shade grid lines darker + else: + color_rgb_gd[i,j,:] = 0.55 * np.array(colormap(norm(r_pt))[:4]) + color_rgb_gd[i,j,3] = 1 + + # Color additional features + elif fid_mat[i][j] == -1: + color_rgb_gd[i,j,:] = np.array([1.0,1.0,1.0,0]) + elif fid_mat[i][j] == -2: # Red sphere + color_rgb_gd[i,j,:] = np.array([255.0, 50.0, 50.0, 255.0]) / 256.0 + #color_rgb_gd[i,j,:] = np.array([223.0, 119.0, 1.0, 255.0]) / 256.0 + #color_rgb_gd[i,j,:] = np.array([0.7,0.1,0.2]) + #color_rgb_gd[i,j,:] = np.array([1.0,0.0, 0.75]) + elif fid_mat[i][j] == -3: # Blue sphere + color_rgb_gd[i,j,:] = np.array([50.0, 50.0, 255.0, 255.0]) / 256.0 + #color_rgb_gd[i,j,:] = np.array([0.0, 96.0, 128.0, 255.0]) / 256.0 + #color_rgb_gd[i,j,:] = np.array([0.5,0.7,0.35]) + #color_rgb_gd[i,j,:] = np.array([0.0,0.75,1.0]) + elif fid_mat[i][j] == -4: + color_rgb_gd[i,j,:] = np.array([0,0,0,1]) + elif fid_mat[i][j] == -5: + color_rgb_gd[i,j,:] = np.array([223.0, 119.0, 1.0, 255.0]) / 256.0 + #color_rgb_gd[i,j,:] = np.array([1,0.1,0.1,1]) + + return color_rgb_gd + + +def get_corner_uv(n, h, to, f, fuv, uv): + u = np.zeros(len(n)) + v = np.zeros(len(n)) + + # Get per corner uv + for i in range(len(fuv)): + hh = h[i] + if (to[hh] != f[i,1]): + print("error") + u[hh] = uv[fuv[i,1],0] + v[hh] = uv[fuv[i,1],1] + u[n[hh]] = uv[fuv[i,2],0] + v[n[hh]] = uv[fuv[i,2],1] + u[n[n[hh]]] = uv[fuv[i,0],0] + v[n[n[hh]]]= uv[fuv[i,0],1] + + return u, v + +def generate_ambient_occlusion(v, f, n_rays=500): + """ + Compute ambient occlusion values for the mesh (v,f) + + param[in] np.array v: vertex positions for the mesh + param[in] np.array f: triangulation for the mesh + param[in] int n_rays: number of rays for occulsion computation + return np.array: per vertex ambient occlusion values + """ + # Compute per vertex normals + face_normals = igl.per_face_normals(v,f,np.array([1.0,1.0,1.0])) + vertex_normals = igl.per_vertex_normals(v, f) + vertex_normals4 = np.zeros((vertex_normals.shape[0], 4)) + vertex_normals4[:vertex_normals.shape[0],:3] = vertex_normals + + # Compute ambient occlusion + ao = igl.ambient_occlusion(v, f, v, vertex_normals, n_rays) + + return 1.0 - ao + +def cprs_arr(x): + zeros = np.zeros_like(x) + ones = np.ones_like(x) + x = np.maximum(zeros,np.minimum(ones, x)) + return np.maximum(0, np.minimum(ones, 3 * x * x - 2 * x * x * x)) + + +def generate_colormap(x, + shift=0, + scale=None, + clamp=True): + """ + Generate a color map from an array of function values x. + + param[in] np.array x: function to generate color values for + param[in] float shift: translate x by this value before generating color map + param[in] float scale: scale x by this value (after translation) before generating color + map. If None is used, the average value of x (after translation) is used instead + param[in] clamp: If true, clamp linear interpolation instead of using arctan + return np.array: colormap giving an RGB value for each element of x + """ + # Shift function + c = x - shift + + if not clamp: + c = c / scale + # Clamp colormap to range [0,1] + #c = np.maximum(c, 0) + #c = np.minimum(c, 1) + # Map (-infty,infty) -> (0,1) with arctan followed by a linear map + c = (np.arctan(c) / (np.pi/2)) + + if (scale > 0): + norm = colors.CenteredNorm(scale*0.5, scale*0.6) + else: + norm = colors.CenteredNorm(0, 1) + + print(np.max(c), np.average(c)) # FIXME + # Use the coolwarm color scheme + return np.array(cm.coolwarm(norm(c))[:,:3]) + + +def get_layout_colormap( + v, + f, + uv, + fuv, + colormap, + scale = 1.0, + use_sqrt_scale=False, + use_log_scale=False, + average_per_vertex=False +): + # Get energy + energy = energies.get_face_energy(v, f, uv, fuv, colormap, use_sqrt_scale=use_sqrt_scale, use_log_scale=use_log_scale) + print(np.max(energy), np.average(energy)) + + # Generate colormap + c = generate_colormap(energy, shift=0, scale=scale) + if (average_per_vertex): + c = igl.average_onto_vertices(v, f, c) + + return c + + +def render_layout( + v, + f, + uv, + c, + show_lines, + lighting_factor, + average_per_vertex +): + # Average ambient shading for face or colormap for vertex functions + ao = generate_ambient_occlusion(v, f) + if (not average_per_vertex): + ao = np.average(ao[f], axis=1) + + # Generate mesh viewer with shading for layout + viewer = opt.generate_mesh_viewer(uv, f, show_lines) + opt.add_shading_to_mesh(viewer, c, ao, lighting_factor) + + return viewer diff --git a/scripts/holonomy_histogram.py b/scripts/holonomy_histogram.py new file mode 100644 index 0000000..0e45fd9 --- /dev/null +++ b/scripts/holonomy_histogram.py @@ -0,0 +1,184 @@ +# Script to create histograms for mesh optimization results. +# +# By default, runs all meshes specified by the `fname` argument in parallel. +# Functions to run the parallelized script and the method without parllelization +# are also exposed for use in other modules. + +import os, sys +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) +import igl +import numpy as np +import seaborn as sns +import holonomy_py as holonomy +import optimize_impl.energies as energies +import optimize_impl.analysis as analysis +from conformal_impl.halfedge import * +import optimization_scripts.script_util as script_util + +def add_similarity_histogram_arguments(parser): + parser.add_argument( + "-o", "--output_dir", + help="directory for output images" + ) + parser.add_argument( + "--suffix", + help="suffix for output files", + default="" + ) + parser.add_argument( + "--histogram_choice", + help="histogram value to plot", + default="compare_scale_factors" + ) + parser.add_argument( + "--bin_min", + help="minimum value for bin", + type=float + ) + parser.add_argument( + "--bin_max", + help="maximum value for bin", + type=float + ) + parser.add_argument( + "--ylim", + help="y limit for the histogram", + type=float, + default=50 + ) + parser.add_argument( + "--histogram_width", + help="histogram width", + type=float, + default=7 + ) + parser.add_argument( + "--comparison_label", + help="label for comparison figures", + default="optimized" + ) + parser.add_argument( + "--color", + help="color for histogram", + default='red' + ) + parser.add_argument( + "--second_color", + help="second color for comparison histograms", + default='blue' + ) + +def similarity_histogram_one(args, fname): + # Get mesh and test name + dot_index = fname.rfind(".") + m = fname[:dot_index] + if (args['suffix'] == ""): + name = m + else: + name = m + '_'+args['suffix'] + V, F = igl.read_triangle_mesh(os.path.join(args['input_dir'], fname)) + Th_hat = np.loadtxt(os.path.join(args['input_dir'], m + "_Th_hat"), dtype=float) + rotation_form = np.loadtxt(os.path.join(args['input_dir'], m + "_kappa_hat"), dtype=float) + + # Create output directory for the mesh + output_dir = script_util.get_mesh_output_directory(args['output_dir'], m) + os.makedirs(output_dir, exist_ok=True) + os.makedirs(os.path.join(args['output_dir'], 'histograms'), exist_ok=True) + + # Get logger + log_path = os.path.join(output_dir, name+'_similarity_histogram.log') + logger = script_util.get_logger(log_path) + logger.info("Generating histograms for {}".format(name)) + + # Generate initial similarity metric + free_cones = [] + marked_metric_params = holonomy.MarkedMetricParameters() + marked_metric, _ = holonomy.generate_marked_metric(V, F, V, F, Th_hat, rotation_form, free_cones, marked_metric_params) + + # get target metric + num_homology_loops = marked_metric.n_homology_basis_loops() + if (args['use_delaunay']): + logger.info("Using Delaunay connectivity") + marked_target = marked_metric.clone_cone_metric() + marked_target.make_discrete_metric() + flip_seq = np.array(marked_target.get_flip_sequence()) + penner_target = marked_target.get_reduced_metric_coordinates() + else: + penner_target = marked_metric.get_reduced_metric_coordinates() + + # get final metric coordinates + try: + lambdas_path = os.path.join(args['lambdas_dir'], m + "_output", name + '_metric_coords') + logger.info("Loading metric coordinates from {}".format(lambdas_path)) + reduced_metric_coords = np.loadtxt(lambdas_path) + except: + logger.error('Could not load metric coordinates') + return + + # ensure coordinates are defined on same connectivity + marked_metric = marked_metric.set_metric_coordinates(reduced_metric_coords) + if (args['use_delaunay']): + logger.info("Flipping to Delaunay connectivity") + for h in flip_seq: + marked_metric.flip_ccw(h, True) + penner_coords = marked_metric.get_reduced_metric_coordinates() + + + + # Get bin range (or None if no range values provided) + if (args['bin_min'] or args['bin_max']): + binrange = (args['bin_min'], args['bin_max']) + else: + binrange = None + + # Get histogram color + color_dict = { + 'red': "#b90f29", + 'blue': "#3c4ac8" + } + first_color = args['color'] + if first_color in color_dict: + first_color = color_dict[first_color] + colors = [first_color,] + + # Add second color if it exists + second_color = args['second_color'] + if second_color: + if second_color in color_dict: + second_color = color_dict[second_color] + colors.append(second_color) + + # Set palette + sns.set_palette(colors) + + if (args['histogram_choice'] == 'stretch_factors'): + logger.info("Computing symmetric stretches") + X = energies.symmetric_stretches(penner_coords, penner_target) + print("Average stretch factor:", np.average(X)) + print("Max stretch factor:", np.max(X)) + print("Stretch factors above 2:", len(np.where(X > 2)[0]), "/", len(X)) + + label = 'stretch factors' + output_path = os.path.join( + args['output_dir'], + 'histograms', + name+"_stretch_factors.png" + ) + analysis.generate_histogram(X, label, binrange, output_path, ylim=args['ylim'], width=args['histogram_width']) + else: + logger.info("No histogram selected") + +def similarity_histogram_many(args): + script_util.run_many(similarity_histogram_one, args) + + +if __name__ == "__main__": + # Parse arguments for the script + parser = script_util.generate_parser("Generate histograms for mesh") + add_similarity_histogram_arguments(parser) + args = vars(parser.parse_args()) + + # Run method in parallel + similarity_histogram_many(args) diff --git a/scripts/holonomy_overlay.py b/scripts/holonomy_overlay.py new file mode 100644 index 0000000..e1b4917 --- /dev/null +++ b/scripts/holonomy_overlay.py @@ -0,0 +1,167 @@ +# Script to generate an overlay from a marked mesh + +import os, sys +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) +import numpy as np +import holonomy_py as holonomy +import optimization_py as opt +import pickle, math, logging +import igl +import optimization_scripts.script_util as script_util +import optimize_impl.render as render + +def similarity_overlay_one(args, fname): + # Get mesh and test name + dot_index = fname.rfind(".") + m = fname[:dot_index] + if (args['suffix'] == ""): + name = m + else: + name = m + '_'+args['suffix'] + + # Create output directory for the mesh + output_dir = script_util.get_mesh_output_directory(args['output_dir'], m) + os.makedirs(output_dir, exist_ok=True) + + # Skip meshes that are already processed + try: + uv_mesh_path = os.path.join(output_dir, name + '_refined_with_uv.obj') + if os.path.isfile(uv_mesh_path): + V, F = igl.read_triangle_mesh(uv_mesh_path) + if (len(V) > 0): + print("Skipping processed mesh") + return + except: + pass + + # Get logger + log_path = os.path.join(output_dir, name+'_convert_to_vf.log') + logger = script_util.get_logger(log_path) + logger.info("Converting {} to vf".format(name)) + + try: + V, F = igl.read_triangle_mesh(os.path.join(args['input_dir'], fname)) + Th_hat = np.loadtxt(os.path.join(output_dir, m + '_Th_hat'), dtype=float) + rotation_form = np.loadtxt(os.path.join(output_dir, m + '_kappa_hat'), dtype=float) + except: + logger.info("Could not open mesh data at {}", args['input_dir']) + return + + # Get final optimized lambdas + try: + metric_coords_path = os.path.join(output_dir, name + "_metric_coords") + logger.info("Loading metric coordinates from {}".format(metric_coords_path)) + reduced_metric_coords = np.loadtxt(metric_coords_path) + except: + logger.error('Could not load metric') + return + + # Get mesh information + is_bd = igl.is_border_vertex(V, F) + build_double = (np.sum(is_bd) != 0) + _, vtx_reindex = opt.fv_to_double(V, F, V, F, Th_hat, [], False) + + # Get cones + cones = np.array([id for id in range(len(Th_hat)) if np.abs(Th_hat[id]-2*math.pi) > 1e-15 and not is_bd[id]], dtype=int) + cones = [idx for idx in range(len(vtx_reindex)) if vtx_reindex[idx] in cones] + + # Get flip sequence + flip_seq = np.array([]) + try: + flip_seq = np.loadtxt(os.path.join(output_dir, name + "_flip_seq"), dtype=int) + except: + logger.error('Could not load flip sequence') + + # Generate initial similarity metric + free_cones = [] + marked_metric_params = holonomy.MarkedMetricParameters() + marked_metric, _ = holonomy.generate_marked_metric(V, F, V, F, Th_hat, rotation_form, free_cones, marked_metric_params) + + # Make overlay + cut_h = [] + #_, V_o, F_o, uv_o, FT_o, is_cut_h, _, fn_to_f, endpoints = holonomy.generate_VF_mesh_from_marked_metric(V, F, Th_hat, marked_metric, cut_h) + #vf_res = opt.generate_VF_mesh_from_metric(V, F, Th_hat, marked_metric, marked_metric.get_metric_coordinates(), cut_h, False) + vf_res = opt.generate_VF_mesh_from_metric(V, F, Th_hat, marked_metric, reduced_metric_coords, cut_h, False) + _, V_o, F_o, uv_o, FT_o, is_cut_h, _, fn_to_f_o, endpoints_o = vf_res + + # Save new meshes + uv_mesh_path = os.path.join(output_dir, name + '_overlay_with_uv.obj') + logger.info("Saving uv mesh at {}".format(uv_mesh_path)) + opt.write_obj_with_uv(uv_mesh_path, V_o, F_o, uv_o, FT_o) + + # Refine original mesh using overlay + logger.info("Running refinement") + refinement_mesh = opt.RefinementMesh(V_o, F_o, uv_o, FT_o, fn_to_f_o, endpoints_o) + V_r, F_r, uv_r, FT_r, fn_to_f_r, endpoints_r = refinement_mesh.get_VF_mesh() + + # Save cut information + simp_path = os.path.join(output_dir, name + '_is_cut_h') + logger.info("Saving cut information at {}".format(simp_path)) + np.savetxt(simp_path, is_cut_h) + + # Save cut to singularity information + # TODO Generate this from file data instead of pickle + cut_to_sin_list = render.add_cut_to_sin(marked_metric.n, marked_metric.opp, marked_metric.to, cones, marked_metric.type, is_cut_h, vtx_reindex, build_double) + simp_path = os.path.join(output_dir, name + '_cut_to_sin_list.pickle') + logger.info("Saving cut to singularity information at {}".format(simp_path)) + with open(simp_path, 'wb') as file: + pickle.dump(cut_to_sin_list, file) + simp_path = os.path.join(output_dir, name + '_overlay_with_uv_cut_to_sin_list.pickle') + logger.info("Saving cut to singularity information at {}".format(simp_path)) + with open(simp_path, 'wb') as file: + pickle.dump(cut_to_sin_list, file) + simp_path = os.path.join(output_dir, name + '_refined_with_uv_cut_to_sin_list.pickle') + logger.info("Saving cut to singularity information at {}".format(simp_path)) + with open(simp_path, 'wb') as file: + pickle.dump(cut_to_sin_list, file) + + # Write fn_to_f to file + face_map_path = os.path.join(output_dir, name + '_fn_to_f') + logger.info("Saving new to old face map at {}".format(face_map_path)) + np.savetxt(face_map_path, fn_to_f_o, fmt='%i') + + # Write vn_to_v to file + vertex_map_path = os.path.join(output_dir, name + '_vn_to_v') + logger.info("Saving trivial new to old vertex map at {}".format(vertex_map_path)) + vn_to_v = np.arange(len(uv_o)) + np.savetxt(vertex_map_path, vn_to_v, fmt='%i') + + # Write endpoints to file + endpoints_path = os.path.join(output_dir, name + '_endpoints') + logger.info("Saving endpoints at {}".format(endpoints_path)) + np.savetxt(endpoints_path, endpoints_o, fmt='%i') + + # Write combined refined mesh with uv + uv_mesh_path = os.path.join(output_dir, name + '_refined_with_uv.obj') + logger.info("Saving refined uv mesh at {}".format(uv_mesh_path)) + opt.write_obj_with_uv(uv_mesh_path, V_r, F_r, uv_r, FT_r) + +def similarity_overlay_many(args): + script_util.run_many(similarity_overlay_one, args) + +def add_similarity_overlay_arguments(parser): + alg_params = opt.AlgorithmParameters() + ls_params = opt.LineSearchParameters() + parser.add_argument("-f", "--fname", help="filenames of the obj file", + nargs='+') + parser.add_argument("-i", "--input_dir", help="input folder that stores obj files and Th_hat") + parser.add_argument("--fit_field", help="fit intrinsic cross field for rotation form", + action="store_true") + parser.add_argument("-o", "--output_dir", + help="directory for output lambdas and logs") + parser.add_argument( + "--suffix", + help="suffix for output files", + default="" + ) + +if __name__ == "__main__": + # Parse arguments for the script + parser = script_util.generate_parser("Run optimization method") + add_similarity_overlay_arguments(parser) + args = vars(parser.parse_args()) + + # Run parallel optimization method + similarity_overlay_many(args) diff --git a/scripts/holonomy_pipeline.py b/scripts/holonomy_pipeline.py new file mode 100644 index 0000000..c1f9c9d --- /dev/null +++ b/scripts/holonomy_pipeline.py @@ -0,0 +1,190 @@ +import os, sys +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) +opt_script_dir = os.path.join(script_dir, 'optimization_scripts') +sys.path.append(opt_script_dir) +import random +import numpy as np +import optimization_scripts.script_util as script_util +import holonomy_render +import optimize_angles +import optimize_fixed_boundary +import optimize_refined_angles +import optimize_aligned_angles +import render_mesh +import statistics +import holonomy_overlay +import optimize_similarity +import holonomy_histogram +import argparse + +def generate_parser(description='Run the optimization method with options.'): + parser = argparse.ArgumentParser(description) + parser.add_argument("--num_processes", help="number of processes for parallelism", + type=int, default=8) + return parser + +if __name__ == "__main__": + np.seterr(invalid='raise') + + # Parse arguments for the script + parser = generate_parser("Run pipeline") + parser.add_argument("pipeline_path") + pipeline_args = parser.parse_args() + + pipeline_spec = script_util.load_pipeline(pipeline_args.pipeline_path) + pipeline_dir = os.path.dirname(pipeline_args.pipeline_path) + + # Load global arguments + global_args = pipeline_spec['global_args'] + if 'output_dir' not in global_args: + global_args['output_dir'] = pipeline_dir + if 'lambdas_dir' not in global_args: + global_args['lambdas_dir'] = pipeline_dir + if 'uv_dir' not in global_args: + global_args['uv_dir'] = pipeline_dir + if 'input_dir' not in global_args: + global_args['input_dir'] = pipeline_dir + if 'fname' not in global_args: + files = os.listdir(global_args['input_dir']) + global_args['fname'] = [f for f in files if f.endswith(".obj")] + random.shuffle(global_args['fname']) + + # Iterate over all scripts to run listed in the pipeline file + pipeline_list = pipeline_spec['pipeline'] + for pipeline_item in pipeline_list: + method = pipeline_item['method'] + args_list = pipeline_item['args_list'] + if pipeline_item['skip']: + continue + if (method == 'optimize_angles'): + for args_spec in args_list: + # Get default arguments for optimization + parser_method = generate_parser() + optimize_angles.add_constrain_similarity_arguments(parser_method) + args_default = vars(parser_method.parse_args("")) + + # Overwrite arguments + args = script_util.overwrite_args(args_default, global_args) + args = script_util.overwrite_args(args_default, args_spec) + + # Run optimization + optimize_angles.constrain_similarity_many(args) + if (method == 'optimize_fixed_boundary'): + for args_spec in args_list: + # Get default arguments for optimization + parser_method = generate_parser() + optimize_fixed_boundary.add_arguments(parser_method) + args_default = vars(parser_method.parse_args("")) + + # Overwrite arguments + args = script_util.overwrite_args(args_default, global_args) + args = script_util.overwrite_args(args_default, args_spec) + + # Run optimization + optimize_fixed_boundary.run_many(args) + if (method == 'optimize_similarity'): + for args_spec in args_list: + # Get default arguments for optimization + parser_method = generate_parser() + optimize_similarity.add_optimize_similarity_arguments(parser_method) + args_default = vars(parser_method.parse_args("")) + + # Overwrite arguments + args = script_util.overwrite_args(args_default, global_args) + args = script_util.overwrite_args(args_default, args_spec) + + # Run optimization + optimize_similarity.optimize_similarity_many(args) + if (method == 'holonomy_overlay'): + for args_spec in args_list: + # Get default arguments for optimization + parser_method = generate_parser() + holonomy_overlay.add_similarity_overlay_arguments(parser_method) + args_default = vars(parser_method.parse_args("")) + + # Overwrite arguments + args = script_util.overwrite_args(args_default, global_args) + args = script_util.overwrite_args(args_default, args_spec) + + # Run optimization + holonomy_overlay.similarity_overlay_many(args) + if (method == 'statistics'): + for args_spec in args_list: + # Get default arguments for method + parser_method = generate_parser() + statistics.add_statistics_arguments(parser_method) + args_default = vars(parser_method.parse_args("")) + + # Overwrite arguments + args = script_util.overwrite_args(args_default, global_args) + args = script_util.overwrite_args(args_default, args_spec) + + # Run method + statistics.run_statistics(args) + if (method == 'holonomy_histogram'): + for args_spec in args_list: + # Get default arguments for method + parser_method = script_util.generate_parser() + holonomy_histogram.add_similarity_histogram_arguments(parser_method) + args_default = vars(parser_method.parse_args("")) + + # Overwrite arguments + args = script_util.overwrite_args(args_default, global_args) + args = script_util.overwrite_args(args_default, args_spec) + + # Run method + holonomy_histogram.similarity_histogram_many(args) + if (method == 'holonomy_render'): + for args_spec in args_list: + # Get default arguments for rendering from uv + parser_method = generate_parser() + holonomy_render.add_render_uv_arguments(parser_method) + args_default = vars(parser_method.parse_args("")) + + # Overwrite arguments + args = script_util.overwrite_args(args_default, global_args) + args = script_util.overwrite_args(args_default, args_spec) + + # Run method + holonomy_render.render_uv_many(args) + if (method == 'render_mesh'): + for args_spec in args_list: + # Get default arguments for rendering from uv + parser_method = generate_parser() + render_mesh.add_render_mesh_arguments(parser_method) + args_default = vars(parser_method.parse_args("")) + + # Overwrite arguments + args = script_util.overwrite_args(args_default, global_args) + args = script_util.overwrite_args(args_default, args_spec) + + # Run method + render_mesh.render_mesh_many(args) + if (method == 'optimize_refined_angles'): + for args_spec in args_list: + # Get default arguments for optimization + parser_method = generate_parser() + optimize_refined_angles.add_optimize_refined_arguments(parser_method) + args_default = vars(parser_method.parse_args("")) + + # Overwrite arguments + args = script_util.overwrite_args(args_default, global_args) + args = script_util.overwrite_args(args_default, args_spec) + + # Run optimization + optimize_refined_angles.optimize_refined_many(args) + if (method == 'optimize_aligned_angles'): + for args_spec in args_list: + # Get default arguments for optimization + parser_method = generate_parser() + optimize_aligned_angles.add_arguments(parser_method) + args_default = vars(parser_method.parse_args("")) + + # Overwrite arguments + args = script_util.overwrite_args(args_default, global_args) + args = script_util.overwrite_args(args_default, args_spec) + + # Run optimization + optimize_aligned_angles.run_many(args) diff --git a/scripts/holonomy_render.py b/scripts/holonomy_render.py new file mode 100644 index 0000000..18b5547 --- /dev/null +++ b/scripts/holonomy_render.py @@ -0,0 +1,282 @@ +# Script to render mesh with texture from uv coordinates. +# +# By default, runs all meshes specified by the `fname` argument in parallel. +# Functions to run the parallelized script and the method without parllelization +# are also exposed for use in other modules. + +import os, sys +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) +import numpy as np +import igl +import pickle +import optimization_py as opt +import render as render +import optimize_impl.energies as energies +from conformal_impl.halfedge import * +from matplotlib import cm, colors +import script_util +import matplotlib.pyplot as plt +from tqdm import trange + + +def render_uv_one(args, fname): + # Get common rendering parameters + W = args['width'] + H = args['height'] + bd_thick = args['bd_thick'] + + # Get mesh and test name + dot_index = fname.rfind(".") + m = fname[:dot_index] + if (args['suffix'] == ""): + name = m + else: + name = m + '_'+args['suffix'] + + # Create output directory for the mesh + output_dir = script_util.get_mesh_output_directory(args['output_dir'], m) + os.makedirs(output_dir, exist_ok=True) + + # Get logger + log_path = os.path.join(output_dir, name+'_holonomy_render.log') + logger = script_util.get_logger(log_path) + logger.info("Rendering {}".format(name)) + + # Load mesh information + try: + input_dir = args['input_dir'] + logger.info("Loading initial mesh at {}".format(input_dir)) + v3d_orig, f_orig = igl.read_triangle_mesh(os.path.join(input_dir, m+'.obj')) + except: + logger.error("Could not load initial mesh") + return + + # Load uv information + try: + uv_dir = args['uv_dir'] + logger.info("Loading uv coordinates at {}".format(uv_dir)) + v3d, uv, _, f, fuv, _ = igl.read_obj(os.path.join(uv_dir, m + "_output", name + ".obj")) + except: + logger.error("Could not load uv coordinates") + return + + # Need to build double mesh when it has boundary + is_bd = igl.is_border_vertex(v3d_orig, f_orig) + build_double = (np.sum(is_bd) != 0) + logger.info("Is double: {}".format(build_double)) + + # FIXME Add option to use conversion directly + # Load vn_to_v from simplification + try: + vertex_map_path = os.path.join(uv_dir, m + "_output", name + '_vn_to_v') + logger.info("Loading vertex map at {}".format(vertex_map_path)) + vn_to_v = np.loadtxt(vertex_map_path, dtype=int) + except: + logger.error("Could not load vertex map") + vn_to_v = np.arange(len(v3d)) + + # Load endpoints from simplification + try: + endpoints_path = os.path.join(uv_dir, m + "_output", name + '_endpoints') + logger.info("Loading endpoints at {}".format(endpoints_path)) + endpoints = np.loadtxt(endpoints_path, dtype=int) + except: + logger.error("Could not load endpoints") + endpoints = np.full((len(v3d), 2), -1) + + # Load camera information + try: + camera_path = os.path.join(args['camera_dir'], m+'_camera.pickle') + logger.info("Loading camera at {}".format(camera_path)) + with open(camera_path, 'rb') as fp: + cam = pickle.load(fp) + vc = pickle.load(fp) + fc = pickle.load(fp) + red_size = pickle.load(fp) + blue_size = pickle.load(fp) + (view, proj, vp) = cam + if not build_double: + fc = fc[:red_size+blue_size,:] + except: + logger.error("Could not load camera") + return + + # Remove cones if flag set + if (args["no_cones"]): + vc = [] + fc = [] + red_size = 0 + blue_size = 0 + + # Get cut to singularity edges (or build for double meshes) + if (args["no_cut"]): + v_cut_to_sin = [] + f_cut_to_sin = [] + logger.info("Skipping cut to singularity") + else: + v_cut_to_sin, f_cut_to_sin = script_util.get_boundary_edges(v3d, uv, f, fuv, bd_thick) + + # Get point matrices + logger.info("Getting point matrices") + fid_mat, bc_mat = opt.get_pt_mat(cam, v3d, f, vc, fc, red_size, blue_size, W, H) + fid_mat_sin, bc_mat_sin = opt.get_pt_mat( + cam, + v3d_orig, + f_orig, + v_cut_to_sin, + f_cut_to_sin, + 0, + 0, + W, + H + ) + for i in trange(H): + for j in range(W): + if fid_mat_sin[i][j] == -4 and fid_mat[i][j] >= 0: + fid_mat[i][j] = -5 + + # Get connectivity for new vf connectivity + logger.info('Getting connectivity') + n, opp, bd, vtx_reind = FV_to_NOB(f) + C = NOB_to_connectivity(n, opp, bd) + h = C.f2he + to = vtx_reind[C.to] + u = np.zeros(len(n)) + v = np.zeros(len(n)) + + # Get per corner uv + logger.info('Getting per corner uv coordinates') + u, v = render.get_corner_uv(n, h, to, f, fuv, uv) + + # Get colormap for the mesh + if args['render_color'] == 'blue': + r = 0.25 + 0.0 * np.abs(r) + elif args['render_color'] == 'purple': + r = 0.5 + 0.0 * np.abs(r) + colormap = cm.get_cmap('cool') + norm = colors.NoNorm() + + # Render image + logger.info('Rendering image') + uv_scale = args['uv_scale'] * igl.bounding_box_diagonal(v3d) + color_rgb = render.color_mesh_with_grid( + fid_mat, + bc_mat, + h, + n, + to, + u, + v, + r, + H, + W, + colormap, + norm, + N_bw = args["N_bw"], + thick = 0.1, + uv_scale=uv_scale + ) + + os.makedirs(os.path.join(args['output_dir'], 'images'), exist_ok=True) + os.makedirs(os.path.join(output_dir, 'images'), exist_ok=True) + + # Save plain image to file + image_path = os.path.join(output_dir, 'images', name+"_plain.png") + logger.info('Saving plain image at {}'.format(image_path)) + plt.imsave(image_path, color_rgb) + + # Add shading + logger.info("Adding shading") + render.add_shading(color_rgb, v3d, f, fid_mat, bc_mat, cam[0], cam[1]) + + # Save shaded image to file + shaded_image_path = os.path.join(output_dir, 'images', name+".png") + logger.info('Saving final image at {}'.format(shaded_image_path)) + plt.imsave(shaded_image_path, color_rgb) + + # Save shaded image to file in global directory + shaded_image_path = os.path.join(args['output_dir'], 'images', name+".png") + logger.info('Saving final image at {}'.format(shaded_image_path)) + plt.imsave(shaded_image_path, color_rgb) + + +def render_uv_many(args): + script_util.run_many(render_uv_one, args) + +def add_render_uv_arguments(parser): + parser.add_argument( + "-o", "--output_dir", + help="directory for output images" + ) + parser.add_argument( + "--uv_dir", + help="path to the directory of meshes with uv coordinates" + ) + parser.add_argument( + "--camera_dir", + help="path to the directory with camera objects", + default="data/cameras" + ) + parser.add_argument( + "--uv_scale", + help="ratio to scale uv coordinates by", + type=float, + default=1 + ) + parser.add_argument( + "--render_color", + help="color for the mesh render", + default="blue" + ) + parser.add_argument( + "--suffix", + help="suffix for output files", + default="" + ) + parser.add_argument( + "-H", "--height", + help="image height", + type=int, + default=800 + ) + parser.add_argument( + "-W", "--width", + help="image width", + type=int, + default=1280 + ) + parser.add_argument( + "--N_bw", + help="number of grid lines ", + type=int, + default=50 + ) + parser.add_argument( + "--bd_thick", + help="line thickness", + type=float, + default=1.0 + ) + parser.add_argument( + "--no_cones", + help="remove cones from image", + type=bool, + default=False + ) + parser.add_argument( + "--no_cut", + help="remove cut to singularity from image", + type=bool, + default=False + ) + +if __name__ == "__main__": + # Parse arguments for the script + parser = script_util.generate_parser("Render mesh from uv coordinates") + add_render_uv_arguments(parser) + args = vars(parser.parse_args()) + + # Run method in parallel + render_uv_many(args) diff --git a/scripts/.render_open.sh.swp b/scripts/optimization_scripts/.render_open.sh.swp similarity index 100% rename from scripts/.render_open.sh.swp rename to scripts/optimization_scripts/.render_open.sh.swp diff --git a/scripts/colormap_histogram.py b/scripts/optimization_scripts/colormap_histogram.py similarity index 100% rename from scripts/colormap_histogram.py rename to scripts/optimization_scripts/colormap_histogram.py diff --git a/scripts/energy_table.py b/scripts/optimization_scripts/energy_table.py similarity index 100% rename from scripts/energy_table.py rename to scripts/optimization_scripts/energy_table.py diff --git a/scripts/error_table.py b/scripts/optimization_scripts/error_table.py similarity index 100% rename from scripts/error_table.py rename to scripts/optimization_scripts/error_table.py diff --git a/scripts/histogram.py b/scripts/optimization_scripts/histogram.py similarity index 100% rename from scripts/histogram.py rename to scripts/optimization_scripts/histogram.py diff --git a/scripts/interpolate.py b/scripts/optimization_scripts/interpolate.py similarity index 100% rename from scripts/interpolate.py rename to scripts/optimization_scripts/interpolate.py diff --git a/scripts/list_meshes.sh b/scripts/optimization_scripts/list_meshes.sh similarity index 100% rename from scripts/list_meshes.sh rename to scripts/optimization_scripts/list_meshes.sh diff --git a/scripts/optimize.py b/scripts/optimization_scripts/optimize.py similarity index 100% rename from scripts/optimize.py rename to scripts/optimization_scripts/optimize.py diff --git a/scripts/optimize_shear.py b/scripts/optimization_scripts/optimize_shear.py similarity index 100% rename from scripts/optimize_shear.py rename to scripts/optimization_scripts/optimize_shear.py diff --git a/scripts/overlay.py b/scripts/optimization_scripts/overlay.py similarity index 100% rename from scripts/overlay.py rename to scripts/optimization_scripts/overlay.py diff --git a/scripts/pipeline.py b/scripts/optimization_scripts/pipeline.py similarity index 100% rename from scripts/pipeline.py rename to scripts/optimization_scripts/pipeline.py diff --git a/scripts/refine.py b/scripts/optimization_scripts/refine.py similarity index 100% rename from scripts/refine.py rename to scripts/optimization_scripts/refine.py diff --git a/scripts/render_layout.py b/scripts/optimization_scripts/render_layout.py similarity index 100% rename from scripts/render_layout.py rename to scripts/optimization_scripts/render_layout.py diff --git a/scripts/render_uv.py b/scripts/optimization_scripts/render_uv.py similarity index 100% rename from scripts/render_uv.py rename to scripts/optimization_scripts/render_uv.py diff --git a/scripts/script_util.py b/scripts/optimization_scripts/script_util.py similarity index 100% rename from scripts/script_util.py rename to scripts/optimization_scripts/script_util.py diff --git a/scripts/slim.py b/scripts/optimization_scripts/slim.py similarity index 100% rename from scripts/slim.py rename to scripts/optimization_scripts/slim.py diff --git a/scripts/trim_whitespace.sh b/scripts/optimization_scripts/trim_whitespace.sh similarity index 100% rename from scripts/trim_whitespace.sh rename to scripts/optimization_scripts/trim_whitespace.sh diff --git a/scripts/optimize_aligned_angles.py b/scripts/optimize_aligned_angles.py new file mode 100644 index 0000000..bdb1a96 --- /dev/null +++ b/scripts/optimize_aligned_angles.py @@ -0,0 +1,147 @@ +# Script to project a marked metric to holonomy constraints with feature alignment + +import os, sys +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) +import numpy as np +import holonomy_py as holonomy +import optimization_py as opt +import igl +import math +import optimization_scripts.script_util as script_util + +def run_one(args, fname): + # Get mesh and test name + dot_index = fname.rfind(".") + m = fname[:dot_index] + name = m + + # Create output directory for the mesh + output_dir = script_util.get_mesh_output_directory(args['output_dir'], m) + os.makedirs(output_dir, exist_ok=True) + + # Get logger + log_path = os.path.join(output_dir, name+'_optimize_aligned_angles.log') + logger = script_util.get_logger(log_path) + logger.info("Projecting {} to constraints".format(name)) + + try: + V, F = igl.read_triangle_mesh(os.path.join(args['input_dir'], fname)) + except: + logger.info("Could not open mesh data") + return + + # Skip meshes that are already processed + try: + final_metric = np.loadtxt(os.path.join(output_dir, name + "_metric_coords"), dtype=float) + if (len(final_metric) > 0): + print("Skipping processed mesh") + return + except: + pass + + # cut mesh along features + feature_finder = holonomy.FeatureFinder(V, F) + feature_finder.mark_dihedral_angle_features(60.) + feature_finder.prune_small_features(5) + if (args['prune_junctions']): + feature_finder.prune_junctions() + feature_finder.prune_closed_loops() + feature_finder.prune_small_features(1) + else: + feature_finder.prune_small_components(5) + V_cut, F_cut, V_map = feature_finder.generate_feature_cut_mesh() + + # Generate initial similarity metric + marked_metric_params = holonomy.MarkedMetricParameters() + marked_metric_params.use_initial_zero = args['use_initial_zero'] + marked_metric_params.remove_loop_constraints = args['remove_holonomy_constraints'] + if (args['remove_boundary_constraints']): + logger.info("Generating union metric") + dirichlet_metric, _ = holonomy.generate_union_metric(V_cut, F_cut, marked_metric_params) + else: + logger.info("Generating dirichlet metric") + dirichlet_metric, _ = holonomy.generate_aligned_metric(V_cut, F_cut, V_map, marked_metric_params) + + # Refine initial metric to avoid spanning triangles + if (False): + refinement_mesh = holonomy.IntrinsicRefinementMesh(dirichlet_metric, []) + refinement_mesh.refine_spanning_faces() + starting_vertices = dirichlet_metric.get_path_starting_vertices() + dirichlet_metric = refinement_mesh.generate_dirichlet_metric( + dirichlet_metric.kappa_hat, + starting_vertices, + dirichlet_metric.get_boundary_constraint_system(), + dirichlet_metric.ell_hat) + + # Initialize parameters + alg_params = holonomy.NewtonParameters() + alg_params.error_eps = args['conf_error_eps'] + alg_params.max_itr = args['conf_max_itr'] + alg_params.do_reduction = args['do_reduction'] + alg_params.reset_lambda = args['reset_lambda'] + alg_params.lambda0 = args['lambda_init'] + alg_params.solver = args['solver'] + alg_params.max_time = args['max_time'] + alg_params.output_dir = output_dir + alg_params.log_level = 0 + alg_params.error_log = True + + # Project to constraint, undoing flips to restore initial connectivity + logger.info("Optimizing metric") + dirichlet_metric = holonomy.optimize_metric_angles(dirichlet_metric, alg_params) + + for i in np.arange(5): + if (dirichlet_metric.max_constraint_error() < 1e-12): + break + holonomy.add_optimal_cone_pair(dirichlet_metric) + dirichlet_metric = holonomy.optimize_metric_angles(dirichlet_metric, alg_params) + + # Save metric coordinate information + output_path = os.path.join(output_dir, name + '_metric_coords') + logger.info("Saving metric coordinates at {}".format(output_path)) + np.savetxt(output_path, dirichlet_metric.get_reduced_metric_coordinates()) + +def run_many(args): + script_util.run_many(run_one, args) + +def add_arguments(parser): + alg_params = holonomy.NewtonParameters() + ls_params = opt.LineSearchParameters() + parser.add_argument("-f", "--fname", help="filenames of the obj file", + nargs='+') + parser.add_argument("-i", "--input_dir", help="input folder that stores obj files and Th_hat") + parser.add_argument("--conf_error_eps", help="maximum error for conformal projection", + type=float, default=alg_params.error_eps) + parser.add_argument("--max_time", help="maximum time for projection", + type=float, default=1e10) + parser.add_argument("-m", "--conf_max_itr", help="maximum number of iterations for the conformal method", + type=int, default=alg_params.max_itr) + parser.add_argument("--do_reduction", help="do reduction for conformal step", + type=bool, default=ls_params.do_reduction) + parser.add_argument("--reset_lambda", help="reset lambda for each conformal step", + type=bool, default=ls_params.reset_lambda) + parser.add_argument("--lambda_init", help="initial lambda", + type=bool, default=ls_params.lambda0) + parser.add_argument("--solver", help="solver to use for matrix inversion", + default=alg_params.solver) + parser.add_argument("--remove_holonomy_constraints", help="remove holonomy constraints", + action="store_true") + parser.add_argument("--remove_boundary_constraints", help="remove boundary constraints", + action="store_true") + parser.add_argument("--use_initial_zero", help="use zero vector for initial metric coordinates", + action="store_true") + parser.add_argument("--prune_junctions", help="remove junctions and closed loops from features", + action="store_true") + parser.add_argument("-o", "--output_dir", + help="directory for output lambdas and logs") + +if __name__ == "__main__": + # Parse arguments for the script + parser = script_util.generate_parser("Optimize angles with alignment") + add_arguments(parser) + args = vars(parser.parse_args()) + + # Run parallel method + run_many(args) diff --git a/scripts/optimize_angles.py b/scripts/optimize_angles.py new file mode 100644 index 0000000..c739860 --- /dev/null +++ b/scripts/optimize_angles.py @@ -0,0 +1,256 @@ +# Script to project a marked metric to holonomy constraints + +import os, sys +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) +import numpy as np +import holonomy_py as holonomy +import optimization_py as opt +import igl +import math +import optimization_scripts.script_util as script_util + +def constrain_similarity_one(args, fname): + # Get mesh and test name + dot_index = fname.rfind(".") + m = fname[:dot_index] + name = m + + # Create output directory for the mesh + output_dir = script_util.get_mesh_output_directory(args['output_dir'], m) + os.makedirs(output_dir, exist_ok=True) + + # Get logger + log_path = os.path.join(output_dir, name+'_optimize_angles.log') + logger = script_util.get_logger(log_path) + logger.info("Projecting {} to constraints".format(name)) + + # Skip meshes that are already processed + try: + final_metric = np.loadtxt(os.path.join(output_dir, name + "_metric_coords"), dtype=float) + if (len(final_metric) > 0): + print("Skipping processed mesh") + return + except: + pass + + # get triangle mesh + try: + V, F = igl.read_triangle_mesh(os.path.join(args['input_dir'], fname)) + if (len(V) < 4): + logger.info("Skipping single triangle mesh") + return + except: + logger.info("Could not open mesh data") + return + + # get precomputed form, or generate on the fly + if args['fit_field']: + logger.info("Fitting cross field") + field_params = holonomy.FieldParameters() + field_params.min_angle = np.pi + rotation_form, Th_hat = holonomy.generate_intrinsic_rotation_form(V, F, field_params) + else: + try: + Th_hat = np.loadtxt(os.path.join(args['input_dir'], name + "_Th_hat"), dtype=float) + rotation_form = np.loadtxt(os.path.join(args['input_dir'], name + "_kappa_hat"), dtype=float) + except: + logger.info("Could not open rotation form") + return + + # save form to output file + output_path = os.path.join(output_dir, name + '_Th_hat') + np.savetxt(output_path, Th_hat) + output_path = os.path.join(output_dir, name + '_kappa_hat') + np.savetxt(output_path, rotation_form) + + # Generate initial similarity metric + free_cones = [] + marked_metric_params = holonomy.MarkedMetricParameters() + marked_metric_params.use_initial_zero = args['use_initial_zero'] + marked_metric_params.remove_loop_constraints = args['remove_holonomy_constraints'] + marked_metric_params.free_interior = args['free_interior'] + marked_metric, _ = holonomy.generate_marked_metric(V, F, V, F, Th_hat, rotation_form, free_cones, marked_metric_params) + + # optionally fix cones + if args['do_fix_cones']: + logger.info("Fixing cones") + holonomy.fix_cones(marked_metric, args['min_cone_index']) + + # Optionally refine initial metric to avoid spanning triangles + if (args['refine']): + refinement_mesh = holonomy.IntrinsicRefinementMesh(marked_metric) + refinement_mesh.refine_spanning_faces() + marked_metric = refinement_mesh.generate_marked_metric(marked_metric.kappa_hat) + if (args['free_interior']): + holonomy.make_interior_free(marked_metric) + + # Optionally make initial mesh delaunay + flip_seq = np.array([]) + if (args['use_delaunay']): + logger.info("Using Delaunay connectivity") + # Flip to delaunay connectivity + marked_metric.make_discrete_metric() + flip_seq = np.array(marked_metric.get_flip_sequence()) + + # Build new mesh with Delaunay connectivity + reduced_metric_coords = marked_metric.get_reduced_metric_coordinates() + marked_metric = marked_metric.set_metric_coordinates(reduced_metric_coords) + + # Regularize the mesh until it has good triangle quality + if (args['regularize']): + logger.info("Regularizing") + reduced_metric_coords = marked_metric.get_reduced_metric_coordinates() + + # Compute quality metrics + mesh_quality = holonomy.compute_mesh_quality(marked_metric) + logger.info("Initial quality is {}".format(np.max(mesh_quality))) + + discrete_metric = marked_metric.clone_cone_metric() + discrete_metric.make_discrete_metric() + min_angle = (360 / (2 * math.pi)) * holonomy.compute_min_angle(discrete_metric) + logger.info("Initial min angle is {}".format(min_angle)) + + num_edges = marked_metric.n_edges() + average_initial_coord = np.average(reduced_metric_coords[:num_edges]) + logger.info("Average metric coordinate is {}".format(average_initial_coord)) + changed = False + while ((np.max(mesh_quality) > args['max_triangle_quality']) or (min_angle < args['min_angle'])): + logger.info("Reducing coordinate norm") + + if (args['max_triangle_quality'] <= 2) or (args['min_angle'] >= 60): + reduced_metric_coords = 0. * reduced_metric_coords + else: + reduced_metric_coords = 0.9 * reduced_metric_coords + + marked_metric = marked_metric.set_metric_coordinates(reduced_metric_coords) + discrete_metric = marked_metric.clone_cone_metric() + discrete_metric.make_discrete_metric() + min_angle = (360 / (2 * math.pi)) * holonomy.compute_min_angle(discrete_metric) + mesh_quality = holonomy.compute_mesh_quality(marked_metric) + logger.info("Quality is {}".format(np.max(mesh_quality))) + logger.info("min angle is {}".format(min_angle)) + + changed = True + if changed: + reduced_metric_coords[:num_edges] += (average_initial_coord - np.average(reduced_metric_coords[:num_edges])) + marked_metric = marked_metric.set_metric_coordinates(reduced_metric_coords) + mesh_quality = holonomy.compute_mesh_quality(marked_metric) + logger.info("Final quality is {}".format(np.max(mesh_quality))) + logger.info("Final average is {}".format(np.average(reduced_metric_coords))) + + # Initialize parameters + alg_params = holonomy.NewtonParameters() + alg_params.error_eps = args['conf_error_eps'] + alg_params.max_itr = args['conf_max_itr'] + alg_params.do_reduction = args['do_reduction'] + alg_params.reset_lambda = args['reset_lambda'] + alg_params.lambda0 = args['lambda_init'] + alg_params.max_time = args['max_time'] + alg_params.solver = args['solver'] + alg_params.output_dir = output_dir + alg_params.log_level = 6 + alg_params.error_log = True + + # Project to constraint, undoing flips to restore initial connectivity + if (args['optimization_method'] == 'metric'): + logger.info("Optimizing metric") + marked_metric = holonomy.optimize_metric_angles(marked_metric, alg_params) + elif (args['optimization_method'] == 'metric_subspace'): + logger.info("Optimizing metric subspace") + subspace_basis = holonomy.compute_jump_newton_optimization_basis(marked_metric) + marked_metric = holonomy.optimize_subspace_metric_angles(marked_metric, subspace_basis, alg_params) + + # try adding cones if failure + for i in np.arange(args['cone_pair_corrections']): + if (marked_metric.max_constraint_error() < 1e-12): + break + holonomy.add_optimal_cone_pair(marked_metric) + marked_metric = holonomy.optimize_metric_angles(marked_metric, alg_params) + + # Undo flips + if flip_seq.size != 0: + for h in flip_seq[::-1]: + marked_metric.flip_ccw(h, True) + marked_metric.flip_ccw(h, True) + marked_metric.flip_ccw(h, True) + + # Return if no output needed + if args['skip_output']: + return + + # Save metric coordinate information + output_path = os.path.join(output_dir, name + '_metric_coords') + logger.info("Saving metric coordinates at {}".format(output_path)) + np.savetxt(output_path, marked_metric.get_reduced_metric_coordinates()) + + # Save flip sequence + output_path = os.path.join(output_dir, name + '_flip_seq') + logger.info("Saving metric coordinates at {}".format(output_path)) + np.savetxt(output_path, flip_seq, fmt="%i") + + +def constrain_similarity_many(args): + script_util.run_many(constrain_similarity_one, args) + +def add_constrain_similarity_arguments(parser): + alg_params = holonomy.NewtonParameters() + ls_params = opt.LineSearchParameters() + parser.add_argument("-f", "--fname", help="filenames of the obj file", + nargs='+') + parser.add_argument("-i", "--input_dir", help="input folder that stores obj files and Th_hat") + parser.add_argument("--conf_error_eps", help="maximum error for conformal projection", + type=float, default=alg_params.error_eps) + parser.add_argument("--max_time", help="maximum time for projection", + type=float, default=1e10) + parser.add_argument("-m", "--conf_max_itr", help="maximum number of iterations for the conformal method", + type=int, default=alg_params.max_itr) + parser.add_argument("--do_reduction", help="do reduction for conformal step", + type=bool, default=ls_params.do_reduction) + parser.add_argument("--reset_lambda", help="reset lambda for each conformal step", + type=bool, default=ls_params.reset_lambda) + parser.add_argument("--lambda_init", help="initial lambda", + type=float, default=ls_params.lambda0) + parser.add_argument("--refine", help="refine spanning triangles if true", + action="store_true") + parser.add_argument("--cone_pair_corrections", help="maximum number of cone pairs to insert", + type=int, default=0) + parser.add_argument("--optimization_method", + help="optimization method to use", + default="metric") + parser.add_argument("--use_delaunay", help="use delaunay mesh for optimization", + action="store_true") + parser.add_argument("--remove_holonomy_constraints", help="remove holonomy constraints", + action="store_true") + parser.add_argument("--free_interior", help="remove interior cone constraints", + action="store_true") + parser.add_argument("--use_initial_zero", help="use zero vector for initial metric coordinates", + action="store_true") + parser.add_argument("--do_fix_cones", help="use heuristics to fix cones", + action="store_true") + parser.add_argument("--min_cone_index", help="minimum cone index to allow for cone fixes", + type=int, default=0) + parser.add_argument("--regularize", help="regularize the mesh before optimization", + action="store_true") + parser.add_argument("--fit_field", help="fit intrinsic cross field for rotation form", + action="store_true") + parser.add_argument("--skip_output", help="don't write metric output if true", + action="store_true") + parser.add_argument("--max_triangle_quality", help="maximum triangle quality for regularization", + type=float, default=1e10) + parser.add_argument("--min_angle", help="minimum triangle angle for regularization", + type=float, default=1) + parser.add_argument("--solver", help="solver to use for matrix inversion", + default=alg_params.solver) + parser.add_argument("-o", "--output_dir", + help="directory for output lambdas and logs") + +if __name__ == "__main__": + # Parse arguments for the script + parser = script_util.generate_parser("Run optimization method") + add_constrain_similarity_arguments(parser) + args = vars(parser.parse_args()) + + # Run parallel optimization method + constrain_similarity_many(args) diff --git a/scripts/optimize_fixed_boundary.py b/scripts/optimize_fixed_boundary.py new file mode 100644 index 0000000..50f6eaf --- /dev/null +++ b/scripts/optimize_fixed_boundary.py @@ -0,0 +1,176 @@ +# Script to project a marked metric to holonomy and fixed boundary constraints + +import os, sys +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) +import numpy as np +import holonomy_py as holonomy +import optimization_py as opt +import igl +import pickle, math, logging +import optimization_scripts.script_util as script_util +import optimize_impl.render as render + +def run_one(args, fname): + # get mesh and test name + dot_index = fname.rfind(".") + m = fname[:dot_index] + name = m + + # create output directory for the mesh + output_dir = script_util.get_mesh_output_directory(args['output_dir'], m) + os.makedirs(output_dir, exist_ok=True) + + # get logger + log_path = os.path.join(output_dir, name+'_optimize_fixed_boundary.log') + logger = script_util.get_logger(log_path) + logger.info("Projecting {} to constraints".format(name)) + + # open mesh + try: + V, F = igl.read_triangle_mesh(os.path.join(args['input_dir'], fname)) + except: + logger.info("Could not open mesh data") + return + + # build common data structures + free_cones = [] + Th_hat_flat = 2 * np.pi * np.ones(len(V)) + rotation_form = [] + m, vtx_reindex = holonomy.generate_mesh(V, F, V, F, Th_hat_flat, free_cones) + marked_metric_params = holonomy.MarkedMetricParameters() + boundary_constraint_generator = holonomy.BoundaryConstraintGenerator(m) + + # build isometric boundary constraints + if (args['boundary_type'] == 'isometric'): + logger.info("Building isometric boundary metric") + Th_hat = Th_hat_flat + free_cones = holonomy.find_boundary_vertices(m, vtx_reindex) + # build polygon boundary constraints + if (args['boundary_type'] == 'polygon'): + logger.info("Building polygon boundary metric") + Th_hat = holonomy.generate_polygon_cones(m, vtx_reindex, args['num_corners'], False) + boundary_constraint_generator.mark_cones_as_junctions(); + boundary_constraint_generator.set_uniform_feature_lengths(0.); + + # build angle constraint metric + logger.info("Building marked metric") + halfedge_matrix, ell = boundary_constraint_generator.build_boundary_constraint_system(); + marked_metric, _ = holonomy.generate_marked_metric(V, F, V, F, Th_hat, rotation_form, free_cones, marked_metric_params) + + # generate boundary paths + logger.info("Building boundary paths") + boundary_paths, boundary_map = holonomy.build_boundary_paths(m) + boundary_constraint_system = halfedge_matrix * boundary_map + + # generate dirichlet constraint mesh + logger.info("Building dirichlet metric") + dirichlet_metric = holonomy.DirichletPennerConeMetric( + marked_metric, + boundary_paths, + boundary_constraint_system, + ell) + + # Initialize parameters + alg_params = holonomy.NewtonParameters() + alg_params.error_eps = args['conf_error_eps'] + alg_params.max_itr = args['conf_max_itr'] + alg_params.do_reduction = args['do_reduction'] + alg_params.reset_lambda = args['reset_lambda'] + alg_params.lambda0 = args['lambda_init'] + alg_params.max_time = args['max_time'] + alg_params.solver = 'ldlt' + alg_params.output_dir = output_dir + alg_params.log_level = 6 + alg_params.error_log = True + + # Project to constraint, undoing flips to restore initial connectivity + logger.info("Optimizing metric") + optimized_metric = holonomy.optimize_metric_angles(dirichlet_metric, alg_params) + + # Return if no output needed + if args['skip_output']: + return + + # Save metric coordinate information + output_path = os.path.join(output_dir, name + '_metric_coords') + logger.info("Saving metric coordinates at {}".format(output_path)) + reduced_metric_coords = optimized_metric.get_reduced_metric_coordinates() + np.savetxt(output_path, reduced_metric_coords) + + # Make overlay + logger.info("Making overlay") + cut_h = [] + vf_res = opt.generate_VF_mesh_from_metric(V, F, Th_hat, marked_metric, reduced_metric_coords, cut_h, False) + _, V_o, F_o, uv_o, FT_o, is_cut_h, _, fn_to_f_o, endpoints_o = vf_res + + # Save new meshes + uv_mesh_path = os.path.join(output_dir, name + '_overlay_with_uv.obj') + logger.info("Saving uv mesh at {}".format(uv_mesh_path)) + opt.write_obj_with_uv(uv_mesh_path, V_o, F_o, uv_o, FT_o) + + # Refine original mesh using overlay + logger.info("Running refinement") + refinement_mesh = opt.RefinementMesh(V_o, F_o, uv_o, FT_o, fn_to_f_o, endpoints_o) + V_r, F_r, uv_r, FT_r, fn_to_f_r, endpoints_r = refinement_mesh.get_VF_mesh() + + # Write combined refined mesh with uv + uv_mesh_path = os.path.join(output_dir, name + '_refined_with_uv.obj') + logger.info("Saving refined uv mesh at {}".format(uv_mesh_path)) + opt.write_obj_with_uv(uv_mesh_path, V_r, F_r, uv_r, FT_r) + + # Save cut information + simp_path = os.path.join(output_dir, name + '_is_cut_h') + logger.info("Saving cut information at {}".format(simp_path)) + np.savetxt(simp_path, is_cut_h) + + # Save cut to singularity information + # TODO Generate this from file data instead of pickle + cones = [] + build_double=True + cut_to_sin_list = render.add_cut_to_sin(marked_metric.n, marked_metric.opp, marked_metric.to, cones, marked_metric.type, is_cut_h, vtx_reindex, build_double) + simp_path = os.path.join(output_dir, name + '_cut_to_sin_list.pickle') + logger.info("Saving cut to singularity information at {}".format(simp_path)) + with open(simp_path, 'wb') as file: + pickle.dump(cut_to_sin_list, file) + + +def run_many(args): + script_util.run_many(run_one, args) + +def add_arguments(parser): + alg_params = opt.AlgorithmParameters() + ls_params = opt.LineSearchParameters() + parser.add_argument("-f", "--fname", help="filenames of the obj file", + nargs='+') + parser.add_argument("-i", "--input_dir", help="input folder that stores obj files and Th_hat") + parser.add_argument("--boundary_type", help="type of boundary constraint to use", + default='isometric') + parser.add_argument("--conf_error_eps", help="maximum error for conformal projection", + type=float, default=alg_params.error_eps) + parser.add_argument("--max_time", help="maximum time for projection", + type=float, default=1e10) + parser.add_argument("-m", "--conf_max_itr", help="maximum number of iterations for the conformal method", + type=int, default=alg_params.max_itr) + parser.add_argument("--do_reduction", help="do reduction for conformal step", + type=bool, default=ls_params.do_reduction) + parser.add_argument("--reset_lambda", help="reset lambda for each conformal step", + type=bool, default=ls_params.reset_lambda) + parser.add_argument("--num_corners", help="number of corners for polygon boundary", + type=int, default=0) + parser.add_argument("--lambda_init", help="initial lambda", + type=bool, default=ls_params.lambda0) + parser.add_argument("--skip_output", help="don't write metric output if true", + action="store_true") + parser.add_argument("-o", "--output_dir", + help="directory for output lambdas and logs") + +if __name__ == "__main__": + # Parse arguments for the script + parser = script_util.generate_parser("Run optimization method") + add_arguments(parser) + args = vars(parser.parse_args()) + + # Run parallel optimization method + run_many(args) diff --git a/scripts/optimize_refined_angles.py b/scripts/optimize_refined_angles.py new file mode 100644 index 0000000..956b11d --- /dev/null +++ b/scripts/optimize_refined_angles.py @@ -0,0 +1,114 @@ +# Script to project a refined marked metric to holonomy constraints + +import os, sys +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) +import numpy as np +import optimization_py as opt +import holonomy_py as holonomy +import igl +import optimization_scripts.script_util as script_util + +def optimize_refined_one(args, fname): + # Get mesh and test name + dot_index = fname.rfind(".") + m = fname[:dot_index] + name = m + + # Create output directory for the mesh + output_dir = script_util.get_mesh_output_directory(args['output_dir'], m) + os.makedirs(output_dir, exist_ok=True) + + # Get logger + log_path = os.path.join(output_dir, name+'_optimize_refined.log') + logger = script_util.get_logger(log_path) + logger.info("Projecting {} to constraints with intrinsic refinement".format(name)) + + try: + V, F = igl.read_triangle_mesh(os.path.join(args['input_dir'], fname)) + except: + logger.info("Could not open mesh data") + return + + # Generate initial similarity metric + marked_metric_params = holonomy.MarkedMetricParameters() + marked_metric_params.use_initial_zero = args['use_initial_zero'] + marked_metric_params.remove_loop_constraints = args['remove_loop_constraints'] + try: + marked_metric, Th_hat, rotation_form = holonomy.generate_refined_marked_metric(V, F, args['min_angle'], marked_metric_params) + except: + logger.info("Could not build refined metric") + return + + # Initialize parameters + alg_params = holonomy.NewtonParameters() + alg_params.error_eps = args['conf_error_eps'] + alg_params.max_itr = args['conf_max_itr'] + alg_params.do_reduction = args['do_reduction'] + alg_params.reset_lambda = args['reset_lambda'] + alg_params.lambda0 = args['lambda_init'] + alg_params.max_time = args['max_time'] + alg_params.output_dir = output_dir + alg_params.log_level = 6 + alg_params.error_log = True + + # Project to constraint, undoing flips to restore initial connectivity + logger.info("Optimizing metric") + try: + marked_metric = holonomy.optimize_metric_angles(marked_metric, alg_params) + marked_metric.undo_flips() + except: + logger.info("Could not optimize metric") + return + + # Return if no output needed + if args['skip_output']: + return + + # Save metric coordinate information + output_path = os.path.join(output_dir, name + '_metric_coords') + logger.info("Saving metric coordinates at {}".format(output_path)) + np.savetxt(output_path, marked_metric.get_reduced_metric_coordinates()) + + +def optimize_refined_many(args): + script_util.run_many(optimize_refined_one, args) + +def add_optimize_refined_arguments(parser): + alg_params = opt.AlgorithmParameters() + ls_params = opt.LineSearchParameters() + parser.add_argument("-f", "--fname", help="filenames of the obj file", + nargs='+') + parser.add_argument("-i", "--input_dir", help="input folder that stores obj files and Th_hat") + parser.add_argument("--conf_error_eps", help="maximum error for conformal projection", + type=float, default=alg_params.error_eps) + parser.add_argument("--max_time", help="maximum time for projection", + type=float, default=1e10) + parser.add_argument("--min_angle", help="minimum angle for refinement", + type=float, default=25) + parser.add_argument("-m", "--conf_max_itr", help="maximum number of iterations for the conformal method", + type=int, default=alg_params.max_itr) + parser.add_argument("--do_reduction", help="do reduction for conformal step", + type=bool, default=ls_params.do_reduction) + parser.add_argument("--reset_lambda", help="reset lambda for each conformal step", + type=bool, default=ls_params.reset_lambda) + parser.add_argument("--lambda_init", help="initial lambda", + type=bool, default=ls_params.lambda0) + parser.add_argument("--use_initial_zero", help="use zero vector for initial metric coordinates", + action="store_true") + parser.add_argument("--remove_loop_constraints", help="remove holonomy constraints", + action="store_true") + parser.add_argument("--skip_output", help="don't write metric output if true", + action="store_true") + parser.add_argument("-o", "--output_dir", + help="directory for output lambdas and logs") + +if __name__ == "__main__": + # Parse arguments for the script + parser = script_util.generate_parser("Run optimization method") + add_optimize_refined_arguments(parser) + args = vars(parser.parse_args()) + + # Run parallel optimization method + optimize_refined_many(args) diff --git a/scripts/optimize_similarity.py b/scripts/optimize_similarity.py new file mode 100644 index 0000000..c955c63 --- /dev/null +++ b/scripts/optimize_similarity.py @@ -0,0 +1,150 @@ +# Script to optimize a similarity metric with holonomy constraints + +import os, sys +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) +import numpy as np +import holonomy_py as holonomy +import optimization_py as opt +import pickle, math +import igl +import optimization_scripts.script_util as script_util +import optimize_impl.render as render + +def optimize_similarity_one(args, fname): + # Get mesh and test name + dot_index = fname.rfind(".") + m = fname[:dot_index] + name = m + V, F = igl.read_triangle_mesh(os.path.join(args['input_dir'], fname)) + Th_hat = np.loadtxt(os.path.join(args['input_dir'], name + "_Th_hat"), dtype=float) + rotation_form = np.loadtxt(os.path.join(args['input_dir'], name + "_kappa_hat"), dtype=float) + + # Create output directory for the mesh + output_dir = script_util.get_mesh_output_directory(args['output_dir'], m) + os.makedirs(output_dir, exist_ok=True) + + # Get logger + log_path = os.path.join(output_dir, name+'_convert_to_vf.log') + logger = script_util.get_logger(log_path) + logger.info("Converting {} to vf".format(name)) + + # Generate initial similarity metric + free_cones = [] + fix_boundary = False + set_holonomy_constraints = True + similarity_metric = holonomy.generate_similarity_mesh(V, F, V, F, Th_hat, rotation_form, free_cones, fix_boundary, set_holonomy_constraints) + + # Get mesh information + is_bd = igl.is_border_vertex(V, F) + build_double = (np.sum(is_bd) != 0) + _, vtx_reindex = opt.fv_to_double(V, F, V, F, Th_hat, [], False) + + # Get cones + cones = np.array([id for id in range(len(Th_hat)) if np.abs(Th_hat[id]-2*math.pi) > 1e-15 and not is_bd[id]], dtype=int) + cones = [idx for idx in range(len(vtx_reindex)) if vtx_reindex[idx] in cones] + + # Build energies + energy_choice = args['similarity_energy_choice'] + if (energy_choice == "integrated"): + opt_energy = holonomy.IntegratedEnergy(similarity_metric) + elif (energy_choice == "coordinate"): + num_coords = len(similarity_metric.get_reduced_metric_coordinates()) + num_form_coords = similarity_metric.n_homology_basis_loops() + + if (num_form_coords == 0): + logger.error("Cannot optimize jump coordinates for genus 0") + return + + coordinates = np.arange(num_coords - num_form_coords, num_coords) + opt_energy = holonomy.CoordinateEnergy(similarity_metric, coordinates) + else: + logger.error("No valid energy selected") + return + + # Perform optimization + proj_params, opt_params = script_util.generate_parameters(args) + opt_params.output_dir = script_util.get_mesh_output_directory(args['output_dir'], m) + if (args['optimization_method'] == 'metric'): + optimized_similarity_metric = opt.optimize_metric(similarity_metric, opt_energy, proj_params, opt_params) + if (args['optimization_method'] == 'shear'): + shear_basis_matrix, _ = opt.compute_shear_dual_basis(similarity_metric) + domain_matrix, codomain_matrix, domain_coords, codomain_coords = holonomy.compute_similarity_optimization_domain(similarity_metric, shear_basis_matrix) + optimized_metric_coords = opt.optimize_domain_coordinates(similarity_metric, opt_energy, domain_matrix, codomain_matrix, domain_coords, codomain_coords, proj_params, opt_params) + optimized_similarity_metric = similarity_metric.set_metric_coordinates(optimized_metric_coords) + + # Save metric coordinate information + simp_path = os.path.join(output_dir, name + '_metric_coords') + logger.info("Saving metric coordinates at {}".format(simp_path)) + np.savetxt(simp_path, optimized_similarity_metric.get_reduced_metric_coordinates()) + + # Get overlay and write to file + cut_h = [] + _, V_o, F_o, uv_o, FT_o, is_cut_h, _, fn_to_f, endpoints = holonomy.generate_VF_mesh_from_similarity_metric(V, F, Th_hat, optimized_similarity_metric, cut_h) + + # Save new meshes + uv_mesh_path = os.path.join(output_dir, name + '_overlay_with_uv.obj') + logger.info("Saving uv mesh at {}".format(uv_mesh_path)) + opt.write_obj_with_uv(uv_mesh_path, V_o, F_o, uv_o, FT_o) + + # Save cut information + simp_path = os.path.join(output_dir, name + '_is_cut_h') + logger.info("Saving cut information at {}".format(simp_path)) + np.savetxt(simp_path, is_cut_h) + + # Save cut to singularity information + # TODO Generate this from file data instead of pickle + cut_to_sin_list = render.add_cut_to_sin(similarity_metric.n, similarity_metric.opp, similarity_metric.to, cones, similarity_metric.type, is_cut_h, vtx_reindex, build_double) + simp_path = os.path.join(output_dir, name + '_cut_to_sin_list.pickle') + logger.info("Saving cut to singularity information at {}".format(simp_path)) + with open(simp_path, 'wb') as file: + pickle.dump(cut_to_sin_list, file) + simp_path = os.path.join(output_dir, name + '_overlay_with_uv_cut_to_sin_list.pickle') + logger.info("Saving cut to singularity information at {}".format(simp_path)) + with open(simp_path, 'wb') as file: + pickle.dump(cut_to_sin_list, file) + simp_path = os.path.join(output_dir, name + '_refined_with_uv_cut_to_sin_list.pickle') + logger.info("Saving cut to singularity information at {}".format(simp_path)) + with open(simp_path, 'wb') as file: + pickle.dump(cut_to_sin_list, file) + + # Write fn_to_f to file + face_map_path = os.path.join(output_dir, name + '_overlay_with_uv_fn_to_f') + logger.info("Saving new to old face map at {}".format(face_map_path)) + np.savetxt(face_map_path, fn_to_f, fmt='%i') + + # Write vn_to_v to file + vertex_map_path = os.path.join(output_dir, name + '_overlay_with_uv_vn_to_v') + logger.info("Saving trivial new to old vertex map at {}".format(vertex_map_path)) + vn_to_v = np.arange(len(uv_o)) + np.savetxt(vertex_map_path, vn_to_v, fmt='%i') + + # Write endpoints to file + endpoints_path = os.path.join(output_dir, name + '_overlay_with_uv_endpoints') + logger.info("Saving endpoints at {}".format(endpoints_path)) + np.savetxt(endpoints_path, endpoints, fmt='%i') + +def optimize_similarity_many(args): + script_util.run_many(optimize_similarity_one, args) + +def add_optimize_similarity_arguments(parser): + parser.add_argument("-f", "--fname", help="filenames of the obj file", + nargs='+') + parser.add_argument("-i", "--input_dir", help="input folder that stores obj files and Th_hat") + parser.add_argument("--similarity_energy_choice", help="similarity energy to optimize", default="integrated") + parser.add_argument("--optimization_method", + help="optimization method to use", + default="metric") + parser.add_argument("-o", "--output_dir", + help="directory for output lambdas and logs") + script_util.add_parameter_arguments(parser) + +if __name__ == "__main__": + # Parse arguments for the script + parser = script_util.generate_parser("Run optimization method") + add_optimize_similarity_arguments(parser) + args = vars(parser.parse_args()) + + # Run parallel optimization method + optimize_similarity_many(args) diff --git a/scripts/render_mesh.py b/scripts/render_mesh.py new file mode 100644 index 0000000..5742806 --- /dev/null +++ b/scripts/render_mesh.py @@ -0,0 +1,100 @@ +# Script to render mesh +# +# By default, runs all meshes specified by the `fname` argument in parallel. +# Functions to run the parallelized script and the method without parllelization +# are also exposed for use in other modules. + +import os, sys +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) +import numpy as np +import igl +import script_util +import polyscope as ps + + +def render_mesh_one(args, fname): + # Get mesh and test name + dot_index = fname.rfind(".") + m = fname[:dot_index] + if (args['suffix'] == ""): + name = m + else: + name = m + '_'+args['suffix'] + + # Create output directory for the mesh + output_dir = script_util.get_mesh_output_directory(args['output_dir'], 'images') + os.makedirs(output_dir, exist_ok=True) + + # Skip meshes that are already processed + output_file = os.path.join(output_dir, name+".png") + if os.path.isfile(output_file): + print("Skipping processed mesh") + return + + # Load mesh information + try: + input_dir = args['input_dir'] + v3d_orig, f_orig = igl.read_triangle_mesh(os.path.join(input_dir, m+'.obj')) + except: + return + + # Load uv information + try: + uv_dir = args['uv_dir'] + v3d, uv, _, f, fuv, _ = igl.read_obj(os.path.join(uv_dir, name + ".obj")) + except: + print("Could not load uv coordinates") + return + + # get colors based on mesh size + teal = np.array([0.290,0.686,0.835]) + orange = np.array([0.906,0.639,0.224]) + sage = np.array([0.569,0.694,0.529]) + if (len(f_orig) >= 50000): + color = orange + elif (len(f_orig) > 5000): + color = teal + else: + color = sage + + # register mesh + ps.init() + uv_flat = uv[fuv.flatten()] + ps_mesh = ps.register_surface_mesh("mesh", v3d, f, smooth_shade=False, material="wax") + ps_mesh.add_parameterization_quantity("uv", uv_flat, defined_on='corners', + coords_type='world', viz_style='grid', + grid_colors=(0.25 * color, color), enabled=True) + + ps.set_ground_plane_mode("none") + ps.reset_camera_to_home_view() + + # Save image to file in global directory + image_path = os.path.join(output_dir, name+".png") + ps.screenshot(image_path) + + +def render_mesh_many(args): + script_util.run_many(render_mesh_one, args) + + +def add_render_mesh_arguments(parser): + parser.add_argument( + "-o", "--output_dir", + help="directory for output images" + ) + parser.add_argument( + "--suffix", + help="suffix for output files", + default="" + ) + +if __name__ == "__main__": + # Parse arguments for the script + parser = script_util.generate_parser("Render mesh") + add_render_mesh_arguments(parser) + args = vars(parser.parse_args()) + + # Run method in parallel + render_mesh_many(args) diff --git a/scripts/statistics.py b/scripts/statistics.py new file mode 100644 index 0000000..0f3151d --- /dev/null +++ b/scripts/statistics.py @@ -0,0 +1,160 @@ +# Script to generate a table summarizing various energies and other metrics from output +# Penner coordinates + +import optimize_impl.energies as energies +import script_util +import pandas as pd +import optimization_py as opt +import holonomy_py as holonomy +import numpy as np +import os, math +import sys +import igl +script_dir = os.path.dirname(__file__) +module_dir = os.path.join(script_dir, '..', 'py') +sys.path.append(module_dir) + + +def add_statistics_arguments(parser): + parser.add_argument( + "--use_delaunay", + action="store_true", + help="use delaunay connectivity as base mesh" + ) + parser.add_argument( + "-i", "--input_dir", + help="directory for input meshes" + ) + parser.add_argument( + "-o", "--output_dir", + help="directory for output" + ) + +def run_statistics(args): + models = [] + + # Build dictionary of statistics + statistics = [ + 'genus', + 'faces', + 'cones', + 'RMSRE', + 'iter', + 'max_stretch', + 'min_cone', + 'max_cone', + 'surface_area', + ] + statistics_dict = {statistic : [] for statistic in statistics} + + # Create output directory for the mesh + output_dir = args['output_dir'] + os.makedirs(output_dir, exist_ok=True) + + # Get logger + log_path = os.path.join(output_dir, 'statistics.log') + logger = script_util.get_logger(log_path) + logger.info("Building statistics table") + + for fname in args['fname']: + dot_index = fname.rfind(".") + m = fname[:dot_index] + name = m + models.append(m) + mesh_output_dir = script_util.get_mesh_output_directory(args['output_dir'], m) + + try: + # Get mesh + V, F = igl.read_triangle_mesh(os.path.join(args['input_dir'], fname)) + Th_hat = np.loadtxt(os.path.join(mesh_output_dir, m + '_Th_hat'), dtype=float) + rotation_form = np.loadtxt(os.path.join(mesh_output_dir, m + '_kappa_hat'), dtype=float) + + # Generate metric TODO use constructor + free_cones = [] + marked_metric_params = holonomy.MarkedMetricParameters() + marked_metric, _ = holonomy.generate_marked_metric(V, F, V, F, Th_hat, rotation_form, free_cones, marked_metric_params) + + # Get target metric + if (args['use_delaunay']): + logger.info("Using Delaunay connectivity") + marked_target = marked_metric.clone_cone_metric() + marked_target.make_discrete_metric() + flip_seq = np.array(marked_target.get_flip_sequence()) + penner_target = marked_target.get_reduced_metric_coordinates() + else: + penner_target = marked_metric.get_reduced_metric_coordinates() + + + # get final metric coordinates + metric_coords_path = os.path.join(mesh_output_dir, name + "_metric_coords") + logger.info("Loading metric coordinates from {}".format(metric_coords_path)) + reduced_metric_coords = np.loadtxt(metric_coords_path) + marked_metric = marked_metric.set_metric_coordinates(reduced_metric_coords) + + # ensure coordinates are defined on same connectivity + if (args['use_delaunay']): + logger.info("Flipping to Delaunay connectivity") + for h in flip_seq: + marked_metric.flip_ccw(h, True) + penner_coords = marked_metric.get_reduced_metric_coordinates() + + # get per iteration data + iteration_data_dir = os.path.join(mesh_output_dir, 'iteration_data_log.csv') + logger.info("Loading iteration data from {}".format(iteration_data_dir)) + iteration_data = pd.read_csv(iteration_data_dir) + except: + logger.info("Could not open mesh data at {}".format(args['input_dir'])) + for statistic in statistics: + statistics_dict[statistic].append(-1) + continue + + for statistic in statistics: + logger.info("Getting {} statistic".format(statistic)) + try: + if statistic == 'surface_area': + statistics_dict[statistic].append(np.sum(igl.doublearea(V, F)) / 2.) + + if statistic == 'genus': + statistics_dict[statistic].append(marked_metric.n_homology_basis_loops() / 2) + + if statistic == 'faces': + statistics_dict[statistic].append(marked_metric.n_faces()) + + if statistic == 'cones': + is_bd = igl.is_border_vertex(V, F) + _, vtx_reindex = opt.fv_to_double(V, F, V, F, Th_hat, [], False) + cones = np.array([id for id in range(len(Th_hat)) if np.abs(Th_hat[id]-2*math.pi) > 1e-15 and not is_bd[id]], dtype=int) + cones = [idx for idx in range(len(vtx_reindex)) if vtx_reindex[idx] in cones] + statistics_dict[statistic].append(len(cones)) + + if statistic == 'min_cone': + statistics_dict[statistic].append(np.min(Th_hat)) + + if statistic == 'max_cone': + statistics_dict[statistic].append(np.max(Th_hat)) + + if statistic == 'RMSRE': + statistics_dict[statistic].append(float(iteration_data['rmsre'].tail(1))) + + if statistic == 'iter': + statistics_dict[statistic].append(int(iteration_data['num_iter'].tail(1))) + + if statistic == 'max_stretch': + X = energies.symmetric_stretches(penner_coords, penner_target) + statistics_dict[statistic].append(np.max(X)) + + except: + statistics_dict[statistic].append(-1) + + statistics_df = pd.DataFrame(statistics_dict, index=models) + csv_path = os.path.join(output_dir, 'statistics.csv') + statistics_df.to_csv(csv_path) + + +if __name__ == "__main__": + # Parse arguments for the script + parser = script_util.generate_parser("Generate statistics") + add_statistics_arguments(parser) + args = vars(parser.parse_args()) + + run_statistics(args) diff --git a/scripts/symmetric_dirichlet.sh b/scripts/symmetric_dirichlet.sh new file mode 100644 index 0000000..93615f7 --- /dev/null +++ b/scripts/symmetric_dirichlet.sh @@ -0,0 +1,50 @@ +#! /bin/bash + +input_dir=$1 +output_dir=$2 + +build_dir="build" +sym_opt_dir="../wildmeshing-toolkit/" +sym_opt_build_dir="${sym_opt_dir}/build/" +cut_mesh_dir="output/cut_meshes" +suffix="refined_with_uv" +mkdir -p ${cut_mesh_dir} +mkdir -p ${cut_mesh_dir}/EE +mkdir -p ${output_dir} +model_list=($(ls ${input_dir})) + +for file in ${model_list[@]} +do + if [[ "$file" = *"_output" ]]; then + mesh_name=${file%_output} + ./${build_dir}/bin/cut_mesh \ + --mesh ${input_dir}/${mesh_name}_output/${mesh_name}_${suffix}.obj \ + --mesh_name ${mesh_name} \ + --output ${cut_mesh_dir}/ & + fi +done +wait + +for file in ${model_list[@]} +do + if [[ "$file" = *"_output" ]]; then + mesh_name=${file%_output} + ${sym_opt_build_dir}/app/extreme_opt/extreme_opt \ + -i ${cut_mesh_dir} \ + -j ${sym_opt_dir}/json/example.json \ + -o ${output_dir} \ + -m ${mesh_name} & + fi +done +wait + +for file in ${model_list[@]} +do + if [[ "$file" = *"_output" ]]; then + mesh_name=${file%_output} + mkdir -p ${output_dir}/${mesh_name}_output + mv ${output_dir}/${mesh_name}_out.obj ${output_dir}/${mesh_name}_output/${mesh_name}_optimized_uv.obj + mv ${output_dir}/${mesh_name}.json ${output_dir}/${mesh_name}_output/ + fi +done +wait diff --git a/scripts/symmetric_dirichlet_glue.sh b/scripts/symmetric_dirichlet_glue.sh new file mode 100644 index 0000000..2918c34 --- /dev/null +++ b/scripts/symmetric_dirichlet_glue.sh @@ -0,0 +1,24 @@ +#! /bin/bash + +input_dir=$1 +uv_dir=$2 +output_dir=$3 + +build_dir="build" +suffix="refined_with_uv" +model_list=($(ls ${input_dir})) +mkdir -p ${output_dir} + +for file in ${model_list[@]} +do + if [[ "$file" = *"_output" ]]; then + mesh_name=${file%_output} + ./${build_dir}/bin/glue_mesh \ + --mesh_name ${mesh_name} \ + --mesh ${input_dir}/${mesh_name}_output/${mesh_name}_${suffix}.obj \ + --uv_mesh ${uv_dir}/${mesh_name}_output/${mesh_name}_optimized_uv.obj \ + --output ${output_dir}/ & + fi +done +wait + diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 854657f..d520f84 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,21 +1,9 @@ -add_subdirectory(core) -add_subdirectory(metric_optimization) -add_subdirectory(parameterization) add_subdirectory(util) +add_subdirectory(optimization) +add_subdirectory(holonomy) -add_library(PennerOptimizationLib - penner_optimization_interface.cpp -) -target_include_directories(PennerOptimizationLib PUBLIC .) -target_link_libraries(PennerOptimizationLib PUBLIC - PennerOptimizationCoreLib - MetricOptimizationLib - ParameterizationLib - PennerOptimizationUtilLib -) -target_compile_definitions(PennerOptimizationLib PUBLIC - SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_DEBUG -) -target_compile_options(PennerOptimizationLib PRIVATE - -Wall -Wpedantic -Wextra -Werror -) +add_library(PennerLib INTERFACE) +target_link_libraries(PennerLib INTERFACE + PennerOptimizationLib + PennerHolonomyLib +) \ No newline at end of file diff --git a/src/app/CMakeLists.txt b/src/app/CMakeLists.txt deleted file mode 100644 index b2fa7f4..0000000 --- a/src/app/CMakeLists.txt +++ /dev/null @@ -1,22 +0,0 @@ -add_executable(optimize_metric - optimize_metric.cpp -) -target_link_libraries(optimize_metric PRIVATE - PennerOptimizationLib - CLI11::CLI11 -) - -add_executable(optimize_shear - optimize_shear.cpp -) -target_link_libraries(optimize_shear PRIVATE - PennerOptimizationLib - CLI11::CLI11 -) - -add_executable(plot_shear_energy - plot_shear_energy.cpp -) -target_link_libraries(plot_shear_energy PRIVATE - PennerOptimizationLib -) diff --git a/src/app/optimize_metric.cpp b/src/app/optimize_metric.cpp deleted file mode 100644 index ab1bd4d..0000000 --- a/src/app/optimize_metric.cpp +++ /dev/null @@ -1,193 +0,0 @@ -/********************************************************************************* -* This file is part of reference implementation of SIGGRAPH Asia 2023 Paper * -* `Metric Optimization in Penner Coordinates` * -* v1.0 * -* * -* The MIT License * -* * -* Permission is hereby granted, free of charge, to any person obtaining a * -* copy of this software and associated documentation files (the "Software"), * -* to deal in the Software without restriction, including without limitation * -* the rights to use, copy, modify, merge, publish, distribute, sublicense, * -* and/or sell copies of the Software, and to permit persons to whom the * -* Software is furnished to do so, subject to the following conditions: * -* * -* The above copyright notice and this permission notice shall be included in * -* all copies or substantial portions of the Software. * -* * -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * -* FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE * -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * -* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * -* IN THE SOFTWARE. * -* * -* Author(s): * -* Ryan Capouellez, Denis Zorin, * -* Courant Institute of Mathematical Sciences, New York University, USA * -* * * -*********************************************************************************/ -#include -#include -#include "common.hh" -#include "cone_metric.hh" -#include "energy_functor.hh" -#include "implicit_optimization.hh" -#include "io.hh" -#include "penner_optimization_interface.hh" -#include "refinement.hh" -#include "vector.hh" -#include "viewers.hh" - -#include - -using namespace CurvatureMetric; - -int main(int argc, char* argv[]) -{ -#ifdef MULTIPRECISION - spdlog::info("Using multiprecision"); - mpfr::mpreal::set_default_prec(100); - mpfr::mpreal::set_emax(mpfr::mpreal::get_emax_max()); - mpfr::mpreal::set_emin(mpfr::mpreal::get_emin_min()); -#endif - - // Build maps from strings to enums - std::map energy_choice_map{ - {"log_length", EnergyChoice::log_length}, - {"log_scale", EnergyChoice::log_scale}, - {"quadratic_sym_dirichlet", EnergyChoice::quadratic_sym_dirichlet}, - {"sym_dirichlet", EnergyChoice::sym_dirichlet}, - {"p_norm", EnergyChoice::p_norm}, - }; - - // Get command line arguments - CLI::App app{"Generate approximately isometric parameterization for a mesh."}; - std::string mesh_filename = ""; - std::string Th_hat_filename = ""; - std::string output_dir = "./"; - EnergyChoice energy_choice = EnergyChoice::log_length; - bool use_discrete_metric = false; - bool show_parameterization = false; - auto proj_params = std::make_shared(); - auto opt_params = std::make_shared(); - app.add_option("--mesh", mesh_filename, "Mesh filepath")->check(CLI::ExistingFile)->required(); - app.add_option("--cones", Th_hat_filename, "Cone angle filepath") - ->check(CLI::ExistingFile) - ->required(); - app.add_option("--energy", energy_choice, "Energy to minimize") - ->transform(CLI::CheckedTransformer(energy_choice_map, CLI::ignore_case)); - app.add_option("--direction", opt_params->direction_choice, "Descent direction: projected_gradient, projected_newton"); - app.add_option( - "--num_iter", - opt_params->num_iter, - "Maximum number of iterations to perform") - ->check(CLI::NonNegativeNumber); - app.add_flag("--use_discrete_metric", use_discrete_metric, "Use edge lengths instead of Penner coordinates"); - app.add_flag("--show_parameterization", show_parameterization, "Show final parameterization"); - app.add_option("-o,--output", output_dir, "Output directory"); - CLI11_PARSE(app, argc, argv); - - spdlog::set_level(spdlog::level::info); - std::filesystem::create_directories(output_dir); - opt_params->output_dir = output_dir; - - // TODO Make this automatic - if (use_discrete_metric) - { - proj_params->initial_ptolemy = false; - proj_params->use_edge_flips = false; - proj_params->max_itr = 30; - } - - // Get input mesh - Eigen::MatrixXd V, uv, N; - Eigen::MatrixXi F, FT, FN; - spdlog::info("Optimizing mesh at {}", mesh_filename); - igl::readOBJ(mesh_filename, V, uv, N, F, FT, FN); - - // Get input angles - std::vector Th_hat_init; - spdlog::info("Using cone angles at {}", Th_hat_filename); - read_vector_from_file(Th_hat_filename, Th_hat_init); - std::vector Th_hat = correct_cone_angles(Th_hat_init); - - // Get initial mesh for optimization - std::vector vtx_reindex; - std::vector free_cones = {}; - bool fix_boundary = false; - std::unique_ptr cone_metric = - generate_initial_mesh(V, F, V, F, Th_hat, vtx_reindex, free_cones, fix_boundary, use_discrete_metric); - - // Get energy - std::unique_ptr opt_energy = generate_energy(V, F, Th_hat, *cone_metric, energy_choice); - - // Optimize the metric - std::unique_ptr optimized_cone_metric = - optimize_metric(*cone_metric, *opt_energy, proj_params, opt_params); - VectorX optimized_metric_coords = optimized_cone_metric->get_reduced_metric_coordinates(); - - // Write the output metric coordinates - std::string output_filename = join_path(output_dir, "optimized_metric_coords"); - write_vector(optimized_metric_coords, output_filename, 17); - - // Generate overlay VF mesh with parametrization - if (use_discrete_metric) { - auto vf_res = generate_VF_mesh_from_discrete_metric( - V, - F, - Th_hat, - optimized_metric_coords); - Eigen::MatrixXd V_l = std::get<0>(vf_res); - Eigen::MatrixXi F_l = std::get<1>(vf_res); - Eigen::MatrixXd uv_l = std::get<2>(vf_res); - Eigen::MatrixXi FT_l = std::get<3>(vf_res); - - // Write the overlay output - output_filename = join_path(output_dir, "mesh_with_uv.obj"); - write_obj_with_uv(output_filename, V_l, F_l, uv_l, FT_l); - - // Optionally show final parameterization - if (show_parameterization) view_parameterization(V_l, F_l, uv_l, FT_l); - } else { - std::vector is_cut = {}; - bool do_best_fit_scaling = false; - auto vf_res = generate_VF_mesh_from_metric( - V, - F, - Th_hat, - *cone_metric, - optimized_metric_coords, - is_cut, - do_best_fit_scaling); - OverlayMesh m_o = std::get<0>(vf_res); - Eigen::MatrixXd V_o = std::get<1>(vf_res); - Eigen::MatrixXi F_o = std::get<2>(vf_res); - Eigen::MatrixXd uv_o = std::get<3>(vf_res); - Eigen::MatrixXi FT_o = std::get<4>(vf_res); - std::vector fn_to_f_o = std::get<7>(vf_res); - std::vector> endpoints_o = std::get<8>(vf_res); - - // Write the overlay output - output_filename = join_path(output_dir, "overlay_mesh_with_uv.obj"); - write_obj_with_uv(output_filename, V_o, F_o, uv_o, FT_o); - - // Get refinement mesh - Eigen::MatrixXd V_r; - Eigen::MatrixXi F_r; - Eigen::MatrixXd uv_r; - Eigen::MatrixXi FT_r; - std::vector fn_to_f_r; - std::vector> endpoints_r; - RefinementMesh refinement_mesh(V_o, F_o, uv_o, FT_o, fn_to_f_o, endpoints_o); - refinement_mesh.get_VF_mesh(V_r, F_r, uv_r, FT_r, fn_to_f_r, endpoints_r); - - // Write the refined output - output_filename = join_path(output_dir, "refined_mesh_with_uv.obj"); - write_obj_with_uv(output_filename, V_r, F_r, uv_r, FT_r); - - // Optionally show final parameterization - if (show_parameterization) view_parameterization(V_r, F_r, uv_r, FT_r); - } -} diff --git a/src/app/optimize_shear.cpp b/src/app/optimize_shear.cpp deleted file mode 100644 index 4786c22..0000000 --- a/src/app/optimize_shear.cpp +++ /dev/null @@ -1,181 +0,0 @@ -/********************************************************************************* -* This file is part of reference implementation of SIGGRAPH Asia 2023 Paper * -* `Metric Optimization in Penner Coordinates` * -* v1.0 * -* * -* The MIT License * -* * -* Permission is hereby granted, free of charge, to any person obtaining a * -* copy of this software and associated documentation files (the "Software"), * -* to deal in the Software without restriction, including without limitation * -* the rights to use, copy, modify, merge, publish, distribute, sublicense, * -* and/or sell copies of the Software, and to permit persons to whom the * -* Software is furnished to do so, subject to the following conditions: * -* * -* The above copyright notice and this permission notice shall be included in * -* all copies or substantial portions of the Software. * -* * -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * -* FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE * -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * -* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * -* IN THE SOFTWARE. * -* * -* Author(s): * -* Ryan Capouellez, Denis Zorin, * -* Courant Institute of Mathematical Sciences, New York University, USA * -* * * -*********************************************************************************/ -#include -#include -#include "common.hh" -#include "explicit_optimization.hh" -#include "io.hh" -#include "penner_optimization_interface.hh" -#include "shear.hh" -#include "viewers.hh" -#include "refinement.hh" -#include - -using namespace CurvatureMetric; - -int main(int argc, char* argv[]) -{ -#ifdef MULTIPRECISION - spdlog::info("Using multiprecision"); - mpfr::mpreal::set_default_prec(60); - mpfr::mpreal::set_emax(mpfr::mpreal::get_emax_max()); - mpfr::mpreal::set_emin(mpfr::mpreal::get_emin_min()); -#endif - - // Build maps from strings to enums - std::map energy_choice_map{ - {"log_length", EnergyChoice::log_length}, - {"log_scale", EnergyChoice::log_scale}, - {"quadratic_sym_dirichlet", EnergyChoice::quadratic_sym_dirichlet}, - {"sym_dirichlet", EnergyChoice::sym_dirichlet}, - {"p_norm", EnergyChoice::p_norm}, - }; - - // Get command line arguments - CLI::App app{"Generate approximately isometric parameterization for a mesh."}; - std::string mesh_filename = ""; - std::string Th_hat_filename = ""; - std::string output_dir = "./"; - EnergyChoice energy_choice = EnergyChoice::log_length; - bool show_parameterization = false; - auto proj_params = std::make_shared(); - auto opt_params = std::make_shared(); - opt_params->direction_choice = "gradient"; - app.add_option("--mesh", mesh_filename, "Mesh filepath")->check(CLI::ExistingFile)->required(); - app.add_option("--cones", Th_hat_filename, "Cone angle filepath") - ->check(CLI::ExistingFile) - ->required(); - app.add_option("--energy", energy_choice, "Energy to minimize") - ->transform(CLI::CheckedTransformer(energy_choice_map, CLI::ignore_case)); - app.add_option("--direction", opt_params->direction_choice, "Descent direction: gradient, conjugate_gradient, lbfgs"); - app.add_option( - "--num_iter", - opt_params->num_iter, - "Maximum number of iterations to perform") - ->check(CLI::NonNegativeNumber); - app.add_flag("--show_parameterization", show_parameterization, "Show final parameterization"); - app.add_option("-o,--output", output_dir, "Output directory"); - CLI11_PARSE(app, argc, argv); - - // Make output directory - spdlog::set_level(spdlog::level::info); - std::filesystem::create_directories(output_dir); - opt_params->output_dir = output_dir; - - // Get input mesh - Eigen::MatrixXd V, uv, N; - Eigen::MatrixXi F, FT, FN; - spdlog::info("Optimizing mesh at {}", mesh_filename); - igl::readOBJ(mesh_filename, V, uv, N, F, FT, FN); - - // Get input angles - std::vector Th_hat_init; - spdlog::info("Using cone angles at {}", Th_hat_filename); - read_vector_from_file(Th_hat_filename, Th_hat_init); - std::vector Th_hat = correct_cone_angles(Th_hat_init); - - // Get initial mesh for optimization - std::vector vtx_reindex; - std::vector free_cones = {}; - bool fix_boundary = false; - bool use_discrete_metric = false; - std::unique_ptr cone_metric = - generate_initial_mesh(V, F, V, F, Th_hat, vtx_reindex, free_cones, fix_boundary, use_discrete_metric); - - // Get energy - std::unique_ptr opt_energy = generate_energy(V, F, Th_hat, *cone_metric, energy_choice); - - // Compute shear dual basis and the corresponding inner product matrix - MatrixX shear_basis_matrix; - std::vector independent_edges; - compute_shear_dual_basis(*cone_metric, shear_basis_matrix, independent_edges); - - // Compute the shear dual coordinates for this basis - VectorX shear_basis_coords_init; - VectorX scale_factors_init; - compute_shear_basis_coordinates( - *cone_metric, - shear_basis_matrix, - shear_basis_coords_init, - scale_factors_init); - - // Optimize the metric - VectorX optimized_metric_coords = optimize_shear_basis_coordinates( - *cone_metric, - *opt_energy, - shear_basis_matrix, - proj_params, - opt_params); - - // Write the metric coordinate output - std::string output_filename = join_path(output_dir, "reduced_metric_coords"); - write_vector(optimized_metric_coords, output_filename); - - // Generate overlay mesh - std::vector is_cut = {}; - bool do_best_fit_scaling = false; - auto vf_res = generate_VF_mesh_from_metric( - V, - F, - Th_hat, - *cone_metric, - optimized_metric_coords, - is_cut, - do_best_fit_scaling); - OverlayMesh m_o = std::get<0>(vf_res); - Eigen::MatrixXd V_o = std::get<1>(vf_res); - Eigen::MatrixXi F_o = std::get<2>(vf_res); - Eigen::MatrixXd uv_o = std::get<3>(vf_res); - Eigen::MatrixXi FT_o = std::get<4>(vf_res); - std::vector fn_to_f_o = std::get<7>(vf_res); - std::vector> endpoints_o = std::get<8>(vf_res); - - // Write the overlay output - output_filename = join_path(output_dir, "overlay_mesh_with_uv.obj"); - write_obj_with_uv(output_filename, V_o, F_o, uv_o, FT_o); - - // Get refinement mesh - Eigen::MatrixXd V_r; - Eigen::MatrixXi F_r; - Eigen::MatrixXd uv_r; - Eigen::MatrixXi FT_r; - std::vector fn_to_f_r; - std::vector> endpoints_r; - RefinementMesh refinement_mesh(V_o, F_o, uv_o, FT_o, fn_to_f_o, endpoints_o); - refinement_mesh.get_VF_mesh(V_r, F_r, uv_r, FT_r, fn_to_f_r, endpoints_r); - - // Write the refined output - output_filename = join_path(output_dir, "refined_mesh_with_uv.obj"); - write_obj_with_uv(output_filename, V_r, F_r, uv_r, FT_r); - - // Optionally show final parameterization - if (show_parameterization) view_parameterization(V_r, F_r, uv_r, FT_r); -} diff --git a/src/app/plot_shear_energy.cpp b/src/app/plot_shear_energy.cpp deleted file mode 100644 index a30513b..0000000 --- a/src/app/plot_shear_energy.cpp +++ /dev/null @@ -1,136 +0,0 @@ -/********************************************************************************* -* This file is part of reference implementation of SIGGRAPH Asia 2023 Paper * -* `Metric Optimization in Penner Coordinates` * -* v1.0 * -* * -* The MIT License * -* * -* Permission is hereby granted, free of charge, to any person obtaining a * -* copy of this software and associated documentation files (the "Software"), * -* to deal in the Software without restriction, including without limitation * -* the rights to use, copy, modify, merge, publish, distribute, sublicense, * -* and/or sell copies of the Software, and to permit persons to whom the * -* Software is furnished to do so, subject to the following conditions: * -* * -* The above copyright notice and this permission notice shall be included in * -* all copies or substantial portions of the Software. * -* * -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * -* FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE * -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * -* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * -* IN THE SOFTWARE. * -* * -* Author(s): * -* Ryan Capouellez, Denis Zorin, * -* Courant Institute of Mathematical Sciences, New York University, USA * -* * * -*********************************************************************************/ -#include "common.hh" -#include "explicit_optimization.hh" -#include "energies.hh" -#include "penner_optimization_interface.hh" -#include "constraint.hh" -#include "shear.hh" -#include "io.hh" -#include "projection.hh" -#include -#include - -/// Unlike with Penner coordinates, any choice of shear coordinates gives a valid -/// metric satisfying the constraints with an energy. Thus, we can can plot the energy -/// for any coordinates. We plot the energies of the metrics in a two dimensional grid -/// around the initial metric - -using namespace CurvatureMetric; - -int main(int argc, char *argv[]) -{ - spdlog::set_level(spdlog::level::debug); - assert(argc > 3); - std::string input_filename = argv[1]; - std::string Th_hat_filename = argv[2]; - std::string output_dir = argv[3]; - std::string energy_choice = argv[4]; - Scalar range = std::stod(argv[5]); - std::filesystem::create_directories(output_dir); - int num_grid_steps = 800; - - // Get input mesh - Eigen::MatrixXd V, uv, N; - Eigen::MatrixXi F, FT, FN; - spdlog::info("Plotting energy for the mesh at {}", input_filename); - igl::readOBJ(input_filename, V, uv, N, F, FT, FN); - - // Get input angles - std::vector Th_hat; - spdlog::info("Using cone angles at {}", Th_hat_filename); - read_vector_from_file(Th_hat_filename, Th_hat); - - // Get initial mesh for optimization - std::vector vtx_reindex; - std::vector free_cones = {}; - bool fix_boundary = false; - std::unique_ptr cone_metric = generate_initial_mesh(V, F, V, F, Th_hat, vtx_reindex, free_cones, fix_boundary, false); - - // Compute shear dual basis and the coordinates - MatrixX shear_basis_matrix; - std::vector independent_edges; - compute_shear_dual_basis(*cone_metric, shear_basis_matrix, independent_edges); - - // Build energy functions for given energy - LogLengthEnergy opt_energy(*cone_metric); - - // Build independent and dependent basis vectors by adding a global scaling term - // to the shear basis and removing and arbitrary basis vector from the scale factors - MatrixX constraint_domain_matrix, constraint_codomain_matrix; - VectorX domain_coords, codomain_coords; - compute_optimization_domain( - *cone_metric, - shear_basis_matrix, - constraint_domain_matrix, - constraint_codomain_matrix, - domain_coords, - codomain_coords - ); - spdlog::info( - "Plotting {} coordinates with codomain of dimension {}", - constraint_domain_matrix.cols(), - constraint_codomain_matrix.cols() - ); - Scalar x0 = domain_coords[0]; - Scalar y0 = domain_coords[1]; - - // Iterate over grid - auto proj_params = std::make_shared(); - Scalar delta = 2.0 * range / static_cast(num_grid_steps - 1); - Eigen::MatrixXd energy_grid(num_grid_steps, num_grid_steps); - for (int i = 0; i < num_grid_steps; ++i) - { - for (int j = 0; j < num_grid_steps; ++j) - { - // Update metric - Scalar dx = -range + delta * i; - Scalar dy = -range + delta * j; - domain_coords[0] = x0 + dx; - domain_coords[1] = y0 + dy; - - // Compute the energy for the shear metric coordinates - Scalar energy = compute_domain_coordinate_energy( - *cone_metric, - opt_energy, - constraint_domain_matrix, - constraint_codomain_matrix, - domain_coords, - codomain_coords, - proj_params); - energy_grid(i, j) = double(energy); - } - } - - // Write the output - std::string output_filename = join_path(output_dir, "energy_grid_"+energy_choice + "_range_" + argv[5]); - write_matrix(energy_grid, output_filename); -} diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt deleted file mode 100644 index 1d753d2..0000000 --- a/src/core/CMakeLists.txt +++ /dev/null @@ -1,34 +0,0 @@ -add_library(PennerOptimizationCoreLib - area.cpp - common.cpp - cone_metric.cpp - constraint.cpp - embedding.cpp - flip_matrix_generator.cpp - io.cpp - linear_algebra.cpp - projection.cpp - reparametrization.cpp - shear.cpp - vector.cpp - vf_mesh.cpp -) -target_include_directories(PennerOptimizationCoreLib PUBLIC .) -target_link_libraries(PennerOptimizationCoreLib PUBLIC - Eigen3::Eigen - conformal_cpp - igl::core - igl::predicates - spdlog::spdlog - ${MPFR_LIBRARIES} - ${POLYSCOPE_LIBRARIES} -) -target_compile_definitions(PennerOptimizationCoreLib PUBLIC - SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_DEBUG -) -# TODO Fix for multiprecision -if (NOT USE_MULTIPRECISION) - target_compile_options(PennerOptimizationCoreLib PRIVATE - -Wall -Wpedantic -Wextra -Werror - ) -endif() diff --git a/src/holonomy/CMakeLists.txt b/src/holonomy/CMakeLists.txt new file mode 100644 index 0000000..c80a44d --- /dev/null +++ b/src/holonomy/CMakeLists.txt @@ -0,0 +1,80 @@ +set(HolonomyCoreSrc + core/boundary_basis.cpp + core/common.cpp + core/dual_lengths.cpp + core/dual_loop.cpp + core/dual_segment.cpp + core/field.cpp + core/forms.cpp + core/homology_basis.cpp + core/intrinsic_field.cpp + core/quality.cpp + core/viewer.cpp +) + +set(HolonomySrc + holonomy/cones.cpp + holonomy/constraint.cpp + holonomy/holonomy.cpp + holonomy/marked_penner_cone_metric.cpp + holonomy/newton.cpp + holonomy/rotation_form.cpp +) + +set(SimilaritySrc + similarity/conformal.cpp + similarity/constraint.cpp + similarity/energy.cpp + similarity/layout.cpp + similarity/similarity_penner_cone_metric.cpp +) + +add_library(PennerHolonomyLib + interface.cpp + ${HolonomyCoreSrc} + ${HolonomySrc} + ${SimilaritySrc} + ${DirichletSrc} +) +target_include_directories(PennerHolonomyLib PUBLIC ../../include/holonomy) +target_link_libraries(PennerHolonomyLib PUBLIC + PennerUtilLib + PennerOptimizationLib + ${VISUALIZATION_LIBS} +) +target_link_libraries(PennerHolonomyLib PRIVATE + geometry-central + nlohmann_json::nlohmann_json + ${COMISO_LIBS} +) +target_compile_definitions(PennerHolonomyLib PUBLIC + SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_DEBUG +) +# TODO Fix for multiprecision +if (NOT USE_MULTIPRECISION) + target_compile_options(PennerHolonomyLib PRIVATE + -Wall -Wpedantic -Wextra -Werror + ) +endif() + +if(USE_PYBIND) + add_library(holonomy_py MODULE + pybind.cpp + ) + + # Link libraries + target_link_libraries(holonomy_py PUBLIC + PennerHolonomyLib + pybind11::module + ${RENDER_LIBRARIES} + ) + + # Set pybinding settings + set_target_properties(holonomy_py PROPERTIES LIBRARY_OUTPUT_DIRECTORY + ${PROJECT_SOURCE_DIR}/py + ) + set_target_properties(holonomy_py PROPERTIES PREFIX + "${PYTHON_MODULE_PREFIX}" + ) +endif() + diff --git a/src/holonomy/core/boundary_basis.cpp b/src/holonomy/core/boundary_basis.cpp new file mode 100644 index 0000000..6d28404 --- /dev/null +++ b/src/holonomy/core/boundary_basis.cpp @@ -0,0 +1,89 @@ +#include "holonomy/core/boundary_basis.h" + +#include "holonomy/core/dual_lengths.h" +#include "util/boundary.h" + +namespace Penner { +namespace Holonomy { + +BoundaryBasisGenerator::BoundaryBasisGenerator(const Mesh& m) + : m_mesh(m) +{ + // Build halfedge to edge maps + build_edge_maps(m, m_he2e, m_e2he); + + // Get boundary components + m_basis_boundary_handles = find_boundary_components(m); + int root = 0; + if (!m_basis_boundary_handles.empty()) { + m_root_boundary_handle = m_basis_boundary_handles.back(); + root = m.f[m_root_boundary_handle]; + assert(m.type[m.h[root]] == 1); + m_basis_boundary_handles.pop_back(); + } + + // Build spanning dual tree from the root boundary + std::vector dual_edge_lengths = compute_dual_edge_lengths(m); + m_dual_tree = DualTree(m, dual_edge_lengths, root, true); +}; + +std::vector BoundaryBasisGenerator::construct_boundary_basis_loop(int index) const +{ + // Start from face adjacent to handle + int start_h = m_basis_boundary_handles[index]; + + // Circulate around boundary to build basis + std::vector basis_loop = {}; + int h = start_h; + do + { + // Iterate once to prevent duplication + h = m_mesh.opp[m_mesh.n[h]]; + + // Circulate to next boundary edge, adding faces to basis + while (m_mesh.type[h] != 2) { + basis_loop.push_back(m_mesh.f[h]); + h = m_mesh.opp[m_mesh.n[h]]; + } + h = m_mesh.opp[h]; + + } while (h != start_h); + + return basis_loop; +} + +std::vector BoundaryBasisGenerator::construct_boundary_path_basis_loop(int index) const +{ + int start_face = m_mesh.f[m_basis_boundary_handles[index]]; + std::vector dual_path = {start_face}; + std::vector dual_edges = {}; + + // Trace up the dual tree until a root is reached + int curr_face_index = start_face; + while (!m_dual_tree.is_root(curr_face_index)) { + // Get parent face of current face + int edge_index = m_dual_tree.out(curr_face_index); + curr_face_index = m_dual_tree.to(edge_index); + assert(m_dual_tree.from(edge_index) == dual_path.back()); + + // Add face and edge to the path + dual_path.push_back(curr_face_index); + } + assert(curr_face_index == m_mesh.f[m_root_boundary_handle]); + + // Build dual loop from the path and its double copy + std::vector basis_loop = dual_path; + basis_loop.reserve(2 * dual_path.size()); + for (auto itr = dual_path.rbegin(); itr != dual_path.rend(); ++itr) + { + int primal_face_index = *itr; + int copy_face_index = m_mesh.f[m_mesh.R[m_mesh.h[primal_face_index]]]; + assert(m_mesh.type[m_mesh.h[copy_face_index]] == 2); + basis_loop.push_back(copy_face_index); + } + + return basis_loop; +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/core/common.cpp b/src/holonomy/core/common.cpp new file mode 100644 index 0000000..ce3279e --- /dev/null +++ b/src/holonomy/core/common.cpp @@ -0,0 +1,42 @@ +#include "holonomy/core/common.h" + +namespace Penner { +namespace Holonomy { + +int compute_euler_characteristic(const Mesh& m) +{ + int n_v = m.n_vertices(); + int n_e = m.n_edges(); + int n_f = m.n_faces(); + return n_v - n_e + n_f; +} + +int compute_genus(const Mesh& m) +{ + int euler_characteristic = compute_euler_characteristic(m); + return (2 - euler_characteristic) / 2; +} + +Eigen::SparseMatrix compute_vv_to_halfedge_matrix(const Mesh& m) +{ + // Create the adjacency matrix (tail, head) -> halfedge index+1 + int n_v = m.n_ind_vertices(); + int n_he = m.n_halfedges(); + Eigen::SparseMatrix vv2he(n_v, n_v); + typedef Eigen::Triplet Trip; + std::vector trips; + trips.reserve(n_he); + for (int hij = 0; hij < n_he; ++hij) + { + if (m.type[hij] > 1) continue; // only use primal halfedges + int vi = m.v_rep[m.to[m.opp[hij]]]; + int vj = m.v_rep[m.to[hij]]; + trips.push_back(Trip(vi, vj, hij + 1)); + } + vv2he.setFromTriplets(trips.begin(), trips.end()); + + return vv2he; +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/core/dual_lengths.cpp b/src/holonomy/core/dual_lengths.cpp new file mode 100644 index 0000000..b8de17b --- /dev/null +++ b/src/holonomy/core/dual_lengths.cpp @@ -0,0 +1,180 @@ +#include "holonomy/core/dual_lengths.h" + +#include "optimization/core/constraint.h" + +#include + +namespace Penner { +namespace Holonomy { + +std::vector compute_dual_edge_lengths(const Mesh& m) +{ + int num_halfedges = m.n_halfedges(); + std::vector dual_edge_lengths(num_halfedges); + + // Compute cotangent angles + VectorX he2angle, he2cot; + Optimization::corner_angles(m, he2angle, he2cot); + + for (int hij = 0; hij < num_halfedges; ++hij) { + // Only process halfedge with lower index + if (hij > m.opp[hij]) continue; + + // Get the average of cotangents opposite the edge + int hji = m.opp[hij]; + Scalar ratio = 0.5 * (he2cot[hij] + he2cot[hji]); + + // Set the dual edge length so that its ratio with the primal length is the average of + // cotangents + dual_edge_lengths[hij] = dual_edge_lengths[hji] = abs(ratio * m.l[hij]); + } + + return dual_edge_lengths; +} + +// Compute the length of the path from a face to a root in a dual tree (or forest) +Scalar compute_dual_path_distance_to_root( + const Mesh& m, + const std::vector& weights, + const DualTree& dual_tree, + const std::vector& e2he, + int face_index) +{ + // Double distance is infinite + if (m.type[m.h[face_index]] == 2) { + return INF; + } + + // Trace path to root and compute length + Scalar dual_loop_length = 0.0; + int curr_face_index = face_index; + while (!dual_tree.is_root(curr_face_index)) { + int edge_index = dual_tree.out(curr_face_index); + curr_face_index = dual_tree.to(edge_index); + dual_loop_length += weights[e2he[dual_tree.edge(edge_index)]]; + } + + return dual_loop_length; +} + +// Check that dual path distances to roots are valid +bool is_valid_dual_path_distance_to_root( + const Mesh& m, + const std::vector& weights, + const DualTree& dual_tree, + const std::vector& distances) +{ + // Get edge maps + std::vector he2e; + std::vector e2he; + build_edge_maps(m, he2e, e2he); + + // Check each distance against direct computation + int num_faces = m.n_faces(); + for (int fi = 0; fi < num_faces; ++fi) { + Scalar distance_to_root = + compute_dual_path_distance_to_root(m, weights, dual_tree, e2he, fi); + if (!float_equal(distances[fi], distance_to_root, 1e-6)) { + spdlog::error( + "computed distance {} and actual distance {} for {} differ", + distances[fi], + distance_to_root, + fi); + return false; + } + } + + return true; +} + +// Compute the distances from dual vertices to the root of a dual spanning tree (or forest) +std::vector compute_dual_path_distances_to_root( + const Mesh& m, + const std::vector& weights, + const DualTree& dual_tree) +{ + // Get edge maps + std::vector he2e; + std::vector e2he; + build_edge_maps(m, he2e, e2he); + + // Initialize face map with 0 distances + int num_faces = m.n_faces(); + std::vector distances(num_faces, -1.0); + + // Set a priori distances + for (int fi = 0; fi < num_faces; ++fi) { + // Double distance is infinite + if (m.type[m.h[fi]] == 2) { + distances[fi] = INF; + continue; + } + + // Distance to a root is 0 + if (dual_tree.is_root(fi)) { + distances[fi] = 0.0; + } + } + + // Assign length for each remaining faces iteratively + for (int fi = 0; fi < num_faces; ++fi) { + if (distances[fi] >= 0.0) continue; + + // Trace path to first known distance (root in base case) + int curr_face_index = fi; + std::vector dual_path = {}; + while (distances[curr_face_index] < 0.0) { + dual_path.push_back(curr_face_index); + int edge_index = dual_tree.out(curr_face_index); + curr_face_index = dual_tree.to(edge_index); + } + + // Update lengths along reverse path + for (auto itr = dual_path.rbegin(); itr != dual_path.rend(); ++itr) { + int fj = *itr; + int edge_index = dual_tree.out(fj); + distances[fj] = + distances[dual_tree.to(edge_index)] + weights[e2he[dual_tree.edge(edge_index)]]; + } + } + + assert(is_valid_dual_path_distance_to_root(m, weights, dual_tree, distances)); + + return distances; +} + +std::vector compute_dual_loop_length_weights( + const Mesh& m, + const std::vector& weights, + const DualTree& dual_tree) +{ + // Get edge maps + std::vector he2e; + std::vector e2he; + build_edge_maps(m, he2e, e2he); + + // Initialize dual loop lengths to 0 + int num_halfedges = m.n_halfedges(); + std::vector dual_loop_lengths(num_halfedges, 0.0); + + // Precompute distance from faces to root + std::vector distances = compute_dual_path_distances_to_root(m, weights, dual_tree); + + // Compute triangle areas + for (int hij = 0; hij < num_halfedges; ++hij) { + // Only process lower index halfedge in edge + if (hij > m.opp[hij]) continue; + + // Skip edges in dual tree (default to zero) + if (dual_tree.is_edge_in_tree(he2e[hij])) continue; + + // Compute length of homotopy cycle path from adding the edge to the dual tree + dual_loop_lengths[hij] = distances[m.f[hij]] + distances[m.f[m.opp[hij]]] + weights[hij]; + dual_loop_lengths[m.opp[hij]] = dual_loop_lengths[hij]; + } + + return dual_loop_lengths; +} + +} // namespace Holonomy +} // namespace Penner diff --git a/src/holonomy/core/dual_loop.cpp b/src/holonomy/core/dual_loop.cpp new file mode 100644 index 0000000..01d14f7 --- /dev/null +++ b/src/holonomy/core/dual_loop.cpp @@ -0,0 +1,683 @@ +#include "holonomy/core/dual_loop.h" +#include +#include + +#ifdef ENABLE_VISUALIZATION +#include "polyscope/surface_mesh.h" +#endif + +namespace Penner { +namespace Holonomy { + +DualLoopManager::DualLoopManager(int num_edges) +{ + m_e_num_loops = std::vector(num_edges, 0); + m_e_first_loop = std::vector(num_edges, -1); + m_e_bucket = std::vector(num_edges, -1); + m_e2loops.clear(); + + m_empty_list = {}; + m_temp_list = {-1}; +} + +void DualLoopManager::clear() +{ + m_e2loops.clear(); + m_e_num_loops.clear(); + m_e_first_loop.clear(); + m_e_bucket.clear(); + + m_empty_list = {}; + m_temp_list = {-1}; +} + +// Add a map from a edge index to a loop index +void DualLoopManager::add_loop(int edge_index, int loop_index) +{ + // If the edge has no loops intersecting it, record the new loop in the first loop array + if (m_e_num_loops[edge_index] == 0) { + m_e_num_loops[edge_index] = 1; + m_e_first_loop[edge_index] = loop_index; + } + // If the edge already has a single outgoing loop, move it and the new loop to the + // unordered map + else if (m_e_num_loops[edge_index] == 1) { + // Check if trying to add a duplicate entry + if (m_e_first_loop[edge_index] == loop_index) return; + + // Get bucket for the edge index (only allocate if needed) + if (m_e_bucket[edge_index] < 0) { + m_e_bucket[edge_index] = m_e2loops.size(); + m_e2loops.push_back({}); + } + + // Copy to the bucket + int bucket_index = m_e_bucket[edge_index]; + m_e2loops[bucket_index].clear(); + m_e2loops[bucket_index].push_back(m_e_first_loop[edge_index]); + m_e2loops[bucket_index].push_back(loop_index); + m_e_num_loops[edge_index] = 2; + } + // If the edge already has multiple outgoing loops, add the new loop to the map + else { + int bucket_index = m_e_bucket[edge_index]; + const auto& bucket = m_e2loops[bucket_index]; + + // Only add loop if not already in map + if (std::find(bucket.begin(), bucket.end(), loop_index) == bucket.end()) + { + m_e2loops[bucket_index].push_back(loop_index); + m_e_num_loops[edge_index] += 1; + } + } +} + +void DualLoopManager::register_loop_edges( + int loop_index, + const Mesh& m, + const DualLoop& dual_loop) +{ + // Get edge maps + std::vector he2e, e2he; + build_edge_maps(m, he2e, e2he); + + // Add all loops adjacent to the dual loop + for (const auto& dual_segment : dual_loop) { + int hij = dual_segment[0]; + for (int h : {hij, m.n[hij], m.n[m.n[hij]]}) { + add_loop(he2e[h], loop_index); + } + } +} + +// Remove all the recorded outgoing loops for a edge from the edge-to-loop map +void DualLoopManager::erase_entry(int edge_index) +{ + // Set the number of outgoing loops as zero + m_e_num_loops[edge_index] = 0; +} + +// Get list of loops that start at a given edge +const std::vector& DualLoopManager::get_edge_loops(int edge_index) +{ + // Use preallocated empty list for the one-to-zero case + if (m_e_num_loops[edge_index] == 0) { + return m_empty_list; + } + // Overwrite and use preallocated single edge list for the one-to-one case + // WARNING: dangerous for parallelism + else if (m_e_num_loops[edge_index] == 1) { + m_temp_list[0] = m_e_first_loop[edge_index]; + return m_temp_list; + } + // Use stored list for the one-to-many case + else { + int bucket_index = m_e_bucket[edge_index]; + assert(bucket_index >= 0); + return m_e2loops[bucket_index]; + } +} + +DenseHalfedgeMap::DenseHalfedgeMap(int num_halfedges) +{ + m_h_num_segments = std::vector(num_halfedges, 0); + m_h_first_segment = std::vector(num_halfedges, -1); + m_h_bucket = std::vector(num_halfedges, -1); + m_h2segments.clear(); + + m_empty_list = {}; + m_temp_list = {-1}; +} + +void DenseHalfedgeMap::clear() +{ + m_h2segments.clear(); + m_h_num_segments.clear(); + m_h_first_segment.clear(); + m_h_bucket.clear(); + + m_empty_list = {}; + m_temp_list = {-1}; +} + +// Add a map from a halfedge index to a segment index +void DenseHalfedgeMap::add_segment(int halfedge_index, int segment_index) +{ + // We need to map halfedges to segments that start at this halfedge. In the vast majority of + // cases, this is one-to-zero or one-to-one, but for some halfedges there may be many outgoing + // segments. Using a fully general map data structure is slow due to cache incoherence. + // + // To balance generality with efficiency we, explicitly track the number of outgoing segments, + // use a fixed size vector to record the map from halfedges to the first outgoing segment, + // and only switch to an unordered map if a one-to-many case is encountered. + + // If the halfedge has no outgoing segments yet, record the new segment in the first segment + // array + if (m_h_num_segments[halfedge_index] == 0) { + m_h_num_segments[halfedge_index] = 1; + m_h_first_segment[halfedge_index] = segment_index; + } + // If the halfedge already has a single outgoing segment, move it and the new segment to the + // unordered map + else if (m_h_num_segments[halfedge_index] == 1) { + // Get bucket for the halfedge index (only allocate if needed) + if (m_h_bucket[halfedge_index] < 0) { + m_h_bucket[halfedge_index] = m_h2segments.size(); + m_h2segments.push_back({}); + } + + // Copy to the bucket + int bucket_index = m_h_bucket[halfedge_index]; + m_h2segments[bucket_index].clear(); + m_h2segments[bucket_index].push_back(m_h_first_segment[halfedge_index]); + m_h2segments[bucket_index].push_back(segment_index); + m_h_num_segments[halfedge_index] = 2; + } + // If the halfedge already has multiple outgoing segments, add the new segment to the map + else { + int bucket_index = m_h_bucket[halfedge_index]; + m_h2segments[bucket_index].push_back(segment_index); + m_h_num_segments[halfedge_index] += 1; + } +} + +// Remove all the recorded outgoing segments for a halfedge from the halfedge-to-segment map +void DenseHalfedgeMap::erase_entry(int halfedge_index) +{ + // Set the number of outgoing segments as zero + m_h_num_segments[halfedge_index] = 0; +} + +// Get list of segments that start at a given halfedge +const std::vector& DenseHalfedgeMap::get_halfedge_segments(int halfedge_index) +{ + // Use preallocated empty list for the one-to-zero case + if (m_h_num_segments[halfedge_index] == 0) { + return m_empty_list; + } + // Overwrite and use preallocated single edge list for the one-to-one case + // WARNING: dangerous for parallelism + else if (m_h_num_segments[halfedge_index] == 1) { + m_temp_list[0] = m_h_first_segment[halfedge_index]; + return m_temp_list; + } + // Use stored list for the one-to-many case + else { + int bucket_index = m_h_bucket[halfedge_index]; + assert(bucket_index >= 0); + return m_h2segments[bucket_index]; + } +} + +SparseHalfedgeMap::SparseHalfedgeMap() +{ + m_empty_list = {}; +} + +void SparseHalfedgeMap::clear() +{ + m_h2segments.clear(); + + m_empty_list = {}; +} + +void SparseHalfedgeMap::add_segment(int halfedge_index, int segment_index) +{ + m_h2segments[halfedge_index].push_back(segment_index); +} + +void SparseHalfedgeMap::erase_entry(int halfedge_index) +{ + m_h2segments.erase(halfedge_index); +} + +const std::vector& SparseHalfedgeMap::get_halfedge_segments(int halfedge_index) +{ + const auto& itr = m_h2segments.find(halfedge_index); + if (itr != m_h2segments.end()) { + return itr->second; + } else { + return m_empty_list; + } +} + + +DualLoopConnectivity::DualLoopConnectivity() + : m_halfedge_map() +{ + clear(); +} + +DualLoopConnectivity::DualLoopConnectivity( + const std::vector& dual_loop_segments) + : m_halfedge_map() +{ + clear(); + + // Resize segment arrays + int num_segments = dual_loop_segments.size(); + m_next.resize(num_segments); + m_prev.resize(num_segments); + m_start.resize(num_segments); + m_end.resize(num_segments); + + // Initialize trivial free and deleted index data + m_is_deleted = std::vector(num_segments, false); + m_free_indices.clear(); + + for (int i = 0; i < num_segments; ++i) { + // Find the edge adjacent to the next face in the sequence + int j = (i + 1) % num_segments; // next periodic index + + // Set connectivity for the current edge (just a simple periodic offset by 1) + m_next[i] = j; + m_prev[j] = i; + + // Set dual loop halfedge indices for the current segment + m_start[i] = dual_loop_segments[i][0]; + m_end[i] = dual_loop_segments[i][1]; + + // Map halfedge to segment starting at this halfedge + m_halfedge_map.add_segment(m_start[i], i); + } + +} + +DualLoopConnectivity::DualLoopConnectivity( + const Mesh& m, + const std::vector& dual_loop_faces) + : DualLoopConnectivity(build_dual_path_from_face_sequence(m, dual_loop_faces)) +{ + assert(is_valid_dual_loop(m)); +} + +void DualLoopConnectivity::update_under_ccw_flip(const Mesh& m, int halfedge_index) +{ + assert(is_valid_dual_loop(m)); + + // Get halfedges and faces in the flipped quad + int hij = halfedge_index; + int hjk = m.n[hij]; + int hki = m.n[hjk]; + + int hji = m.opp[hij]; + int hil = m.n[hji]; + int hlj = m.n[hil]; + + // Clear current halfedge to segment maps for flipped edge + m_halfedge_map.erase_entry(hij); + m_halfedge_map.erase_entry(hji); + + // Before + // vk + // hki / \ hjk + // / hij \ . + // vi --- vj + // \ hji / . + // hil \ / hlj + // vl + // + // After + // vk + // hki / | \ hjk + // / | \ . + // vi | vj + // \ | / . + // hil \ | / hlj + // vl + + // Adjust portions of the dual loop that intersect the quad + // There are three possible cases (up to rotational symmetry) + // 1. The dual loop enters and leaves the quad from the same triangle + // 2. The dual loop crosses the diagonal and exits the opposite side of the quad + // 3. The dual loop crosses the diagonal and exits the same side of the quad + // For each possible initial edge, we handle the above three cases in this order, which + // necessitate the following corresponding operations + // 1. Split one segment into two + // 2. Flip (or leave unchanged) the edge between two segments + // 3. Combine two segments into one + + // Dual loop segments entering quad from hil + for (int segment_index : m_halfedge_map.get_halfedge_segments(hil)) { + int next_segment_index = get_next(segment_index); + if (get_end(segment_index) == hlj) { + split_segment(segment_index, hij, hji); + } else if (get_start(next_segment_index) == hij && get_end(next_segment_index) == hjk) { + flip_segments(segment_index, next_segment_index, hij, hji); + } else if (get_start(next_segment_index) == hij && get_end(next_segment_index) == hki) { + combine_segments(segment_index, next_segment_index); + } else { + throw std::runtime_error("Invalid dual loop flip encountered"); + } + } + + // Dual loop edge enters quad from hlj + for (int segment_index : m_halfedge_map.get_halfedge_segments(hlj)) { + int next_segment_index = get_next(segment_index); + if (get_end(segment_index) == hil) { + split_segment(segment_index, hji, hij); + } else if (get_start(next_segment_index) == hij && get_end(next_segment_index) == hki) { + flip_segments(segment_index, next_segment_index, hji, hij); + } else if (get_start(next_segment_index) == hij && get_end(next_segment_index) == hjk) { + combine_segments(segment_index, next_segment_index); + } else { + throw std::runtime_error("Invalid dual loop flip encountered"); + } + } + + // Dual loop edge enters quad from hjk + for (int segment_index : m_halfedge_map.get_halfedge_segments(hjk)) { + int next_segment_index = get_next(segment_index); + if (get_end(segment_index) == hki) { + split_segment(segment_index, hji, hij); + } else if (get_start(next_segment_index) == hji && get_end(next_segment_index) == hil) { + flip_segments(segment_index, next_segment_index, hji, hij); + } else if (get_start(next_segment_index) == hji && get_end(next_segment_index) == hlj) { + combine_segments(segment_index, next_segment_index); + } else { + throw std::runtime_error("Invalid dual loop flip encountered"); + } + } + + // Dual loop edge enters quad from hki + for (int segment_index : m_halfedge_map.get_halfedge_segments(hki)) { + int next_segment_index = get_next(segment_index); + if (get_end(segment_index) == hjk) { + split_segment(segment_index, hij, hji); + } else if (get_start(next_segment_index) == hji && get_end(next_segment_index) == hlj) { + flip_segments(segment_index, next_segment_index, hij, hji); + } else if (get_start(next_segment_index) == hji && get_end(next_segment_index) == hil) { + combine_segments(segment_index, next_segment_index); + } else { + throw std::runtime_error("Invalid dual loop flip encountered"); + } + } + + assert(is_valid_connectivity()); // can only check connectivity before mesh flip complete +} + +std::vector DualLoopConnectivity::generate_face_sequence(const Mesh& m) const +{ + // Resize face loop to the size of the dual loop + std::vector dual_loop_faces; + dual_loop_faces.reserve(count_segment_indices()); + + // Map dual segments to faces + for (const auto& dual_segment : *this) { + dual_loop_faces.push_back(compute_dual_segment_face(m, dual_segment)); + } + + return dual_loop_faces; +} + +void DualLoopConnectivity::clear() +{ + m_next.clear(); + m_prev.clear(); + m_start.clear(); + m_end.clear(); + m_is_deleted.clear(); + m_free_indices.clear(); + + m_halfedge_map.clear(); +} + +// Get the first undeleted segment index +int DualLoopConnectivity::get_start_segment_index() const +{ + // Find index that is not deleted + int num_indices = count_segment_indices(); + for (int i = 0; i < num_indices; ++i) { + if (!is_deleted(i)) return i; + } + + // Return invalid index otherwise + return -1; +} + +// Construct the dual segment corresponding to a given dual segment index +DualSegment DualLoopConnectivity::get_dual_segment(int segment_index) const +{ + assert(is_valid_segment_index(segment_index)); + DualSegment dual_segment = {get_start(segment_index), get_end(segment_index)}; + return dual_segment; +} + +// Check if a segment index is valid +bool DualLoopConnectivity::is_valid_segment_index(int segment_index) const +{ + // Segment index should be in the bounds for the segment arrays + int num_segments = m_next.size(); + if (segment_index < 0) return false; + if (segment_index >= num_segments) return false; + + // Corresponding segment should not be deleted + if (m_is_deleted[segment_index]) return false; + + return true; +} + +// Check if the dual loop connectivity is valid (but not consistent with the underlying mesh) +bool DualLoopConnectivity::is_valid_connectivity() const +{ + if (!m_check_validity) return true; // option to skip validity check for debugging + int num_segments = m_next.size(); + + // Check next and prev are inverse + for (int i = 0; i < num_segments; ++i) { + if (m_is_deleted[i]) continue; + + // previous segment is valid + if (!is_valid_segment_index(m_prev[i])) { + spdlog::error("Segment {} has invalid previous segment", i); + return false; + } + + // next segment is valid + if (!is_valid_segment_index(m_next[i])) { + spdlog::error("Segment {} has invalid next segment", i); + return false; + } + + // prev-next is identity + if (m_next[m_prev[i]] != i) { + spdlog::error("Segment i = {} does not satisfy next[prev[i]] = i", i); + return false; + } + + // next-prev is identity + if (m_prev[m_next[i]] != i) { + spdlog::error("Segment i = {} does not satisfy prev[next[i]] = i", i); + return false; + } + } + + // Check free indices and deleted indices are the same + int num_free_indices = m_free_indices.size(); + for (int i : m_free_indices) { + if (!m_is_deleted[i]) { + spdlog::error("Free segment index {} is not deleted", i); + return false; + } + } + if (std::count(m_is_deleted.begin(), m_is_deleted.end(), true) != num_free_indices) { + spdlog::error("Inconsistent number of deleted and free indices"); + return false; + } + + return true; +} + +// Check if the dual loop connectivity is valid and consistent with the underlying mesh +bool DualLoopConnectivity::is_valid_dual_loop(const Mesh& m) const +{ + if (!m_check_validity) return true; + + int num_segments = m_next.size(); + + // Check connectivity conditions + if (!is_valid_connectivity()) return false; + + // Check start and end halfedges are valid + int num_halfedges = m.n_halfedges(); + for (int i = 0; i < num_segments; ++i) { + if (m_is_deleted[i]) continue; + + // start halfedge is valid + int h_start = m_start[i]; + if ((h_start < 0) || (h_start >= num_halfedges)) { + spdlog::error("Start {} of segment {} is invalid", h_start, i); + return false; + } + + // end halfedge is valid + int h_end = m_end[i]; + if ((h_end < 0) || (h_end >= num_halfedges)) { + spdlog::error("End {} of segment {} is invalid", h_end, i); + return false; + } + + // start and end halfedges in same face + if (m.f[h_start] != m.f[h_end]) { + spdlog::error("Segment {} = ({}, {}) is not contained in a face", i, h_start, h_end); + return false; + } + } + + // Check halfedge to segment map is valid + // TODO + // for (int i = 0; i < num_halfedges; ++i) { + // const auto& segments = itr.second; + // for (int segment_index : segments) { + // if (!is_valid_segment_index(segment_index)) return false; + // if (m_start[segment_index] != h) return false; + // } + //} + + return true; +} + +// Split one segment into two with new splitting edge having the given halfedge indices +void DualLoopConnectivity::split_segment( + int segment_index, + int halfedge_index, + int opposite_halfedge) +{ + assert(is_valid_connectivity()); + assert(is_valid_segment_index(segment_index)); + + // Get the next segment and a new segment + int next_segment_index = m_next[segment_index]; + int new_segment_index = create_segment_index(); + + // Get the halfedges at the start and end of the current segment + int h_start = m_start[segment_index]; + int h_end = m_end[segment_index]; + + // Connect new segment in the loop + m_next[segment_index] = new_segment_index; + m_prev[new_segment_index] = segment_index; + m_next[new_segment_index] = next_segment_index; + m_prev[next_segment_index] = new_segment_index; + + // Set the start and end halfedge of the current segment + m_start[segment_index] = h_start; + m_end[segment_index] = halfedge_index; + + // Set the start and end halfedge of the new segment + m_start[new_segment_index] = opposite_halfedge; + m_end[new_segment_index] = h_end; + + // Add the new segment to the halfedge map + m_halfedge_map.add_segment(opposite_halfedge, new_segment_index); + + assert(is_valid_connectivity()); +} + +// Change the halfedge indices for the start and end of the given (adjacent) segments. +// This is needed for flipping an edge in a loop that enters and leaves a flip +// quad on opposite sides. +void DualLoopConnectivity::flip_segments( + int first_segment_index, + int second_segment_index, + int halfedge_index, + int opposite_halfedge) +{ + assert(is_valid_connectivity()); + assert(is_valid_segment_index(first_segment_index)); + assert(is_valid_segment_index(second_segment_index)); + assert(m_next[first_segment_index] == second_segment_index); + + // Overwrite shared edge indices + m_end[first_segment_index] = halfedge_index; + m_start[second_segment_index] = opposite_halfedge; + + // Add the second segment to the halfedge map + m_halfedge_map.add_segment(opposite_halfedge, second_segment_index); + + assert(is_valid_connectivity()); +} + +// Combine two (adjacent) segments into a single segment +void DualLoopConnectivity::combine_segments(int first_segment_index, int second_segment_index) +{ + assert(is_valid_connectivity()); + assert(is_valid_segment_index(first_segment_index)); + assert(is_valid_segment_index(second_segment_index)); + assert(m_next[first_segment_index] == second_segment_index); + + // Get the next segment in the loop + int next_segment_index = m_next[second_segment_index]; + + // Get the start and ending halfedge of the combined segment + int h_start = m_start[first_segment_index]; + int h_end = m_end[second_segment_index]; + + // Remove segment segment from the loop + m_next[first_segment_index] = next_segment_index; + m_prev[next_segment_index] = first_segment_index; + + // Set the start and end halfedge of the first segment to the combined values + m_start[first_segment_index] = h_start; + m_end[first_segment_index] = h_end; + + // Remove the second segment + delete_segment_index(second_segment_index); + + assert(is_valid_connectivity()); +} + +// Get a free segment index +int DualLoopConnectivity::create_segment_index() +{ + int segment_index = -1; + + // Get a free index if one exist + if (!m_free_indices.empty()) { + segment_index = m_free_indices.back(); + m_free_indices.pop_back(); + m_is_deleted[segment_index] = false; + } + // Allocate more space if not + else { + segment_index = m_next.size(); + m_next.push_back(-1); + m_prev.push_back(-1); + m_start.push_back(-1); + m_end.push_back(-1); + m_is_deleted.push_back(false); + } + + return segment_index; +} + +// Mark a segment index as deleted and free for reuse +void DualLoopConnectivity::delete_segment_index(int segment_index) +{ + m_free_indices.push_back(segment_index); + m_is_deleted[segment_index] = true; +} + + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/core/dual_segment.cpp b/src/holonomy/core/dual_segment.cpp new file mode 100644 index 0000000..e339446 --- /dev/null +++ b/src/holonomy/core/dual_segment.cpp @@ -0,0 +1,312 @@ +#include "holonomy/core/dual_segment.h" +#include +#include + +#ifdef ENABLE_VISUALIZATION +#include "polyscope/surface_mesh.h" +#endif + +namespace Penner { +namespace Holonomy { + +bool is_valid_dual_segment(const Mesh& m, const DualSegment& dual_segment) +{ + // Just check segment halfedges belong to the same face + return (m.f[dual_segment[0]] == m.f[dual_segment[1]]); +} + +bool is_valid_dual_path(const Mesh& m, const std::vector& dual_path) +{ + int path_length = dual_path.size(); + for (int i = 0; i < path_length; ++i) { + // Check segment i is valid + if (!is_valid_dual_segment(m, dual_path[i])) { + spdlog::error("Invalid dual segment ({}, {})", dual_path[i][0], dual_path[i][1]); + return false; + } + + // Check sequential dual segments are adjacent in the mesh + if ((i + 1 < path_length) && (dual_path[i][1] != m.opp[dual_path[i + 1][0]])) { + spdlog::error("Dual segments {} and {} are not adjacent", i, i + 1); + return false; + } + } + return true; +} + +bool is_valid_dual_loop(const Mesh& m, const std::vector& dual_loop) +{ + // Check if the loop is a valid path + if (!is_valid_dual_path(m, dual_loop)) { + return false; + } + + // Loop must be at least size 1 + if (dual_loop.empty()) { + return false; + } + + // Check last dual segment is adjacent to the first + if (dual_loop.back()[1] != m.opp[dual_loop.front()[0]]) { + spdlog::error("Initial and final dual segments are not adjacent"); + return false; + } + + return true; +} + +DualSegment reverse_dual_segment(const DualSegment& dual_segment) +{ + return DualSegment({dual_segment[1], dual_segment[0]}); +} + +std::vector reverse_dual_path(const std::vector& dual_path) +{ + int num_segments = dual_path.size(); + std::vector reversed_dual_path(num_segments); + for (int i = 0; i < num_segments; ++i) { + reversed_dual_path[num_segments - 1 - i] = reverse_dual_segment(dual_path[i]); + } + + return reversed_dual_path; +} + +int compute_dual_segment_face(const Mesh& m, const DualSegment& dual_segment) +{ + assert(is_valid_dual_segment(m, dual_segment)); + int h = dual_segment[0]; + return m.f[h]; +} + +std::vector build_face_sequence_from_dual_path( + const Mesh& m, + const std::vector& dual_path) +{ + // Resize face path to the size of the dual path + int num_segments = dual_path.size(); + std::vector dual_path_faces(num_segments); + + // Map dual segments to faces + for (int i = 0; i < num_segments; ++i) { + dual_path_faces[i] = compute_dual_segment_face(m, dual_path[i]); + } + + return dual_path_faces; +} + +std::vector build_dual_path_from_face_sequence( + const Mesh& m, + const std::vector& dual_loop_faces) +{ + // Resize dual loop to the size of the face loop + int num_segments = dual_loop_faces.size(); + std::vector dual_loop(num_segments); + + // Get initial halfedge for the first face + int h = m.h[dual_loop_faces[0]]; + for (int i = 0; i < num_segments; ++i) { + // Find the edge adjacent to the next face in the sequence + int j = (i + 1) % num_segments; // next periodic index + int next_face = dual_loop_faces[j]; + int h_start = h; + while (m.f[m.opp[h]] != next_face) { + h = m.n[h]; + + // Catch full face circulation without finding the desired next face + if (h == h_start) { + throw std::runtime_error("Face dual loop is not connected"); + } + } + + // Set dual loop halfedge indices for the current edge + dual_loop[i][1] = h; + dual_loop[j][0] = m.opp[h]; + + // Increment traversal halfedge to the next face + h = m.n[m.opp[h]]; + } + + assert(is_valid_dual_loop(m, dual_loop)); + return dual_loop; +} + +void update_dual_loop_under_ccw_flip( + const Mesh& m, + int halfedge_index, + std::vector& dual_loop) +{ + assert(is_valid_dual_loop(m, dual_loop)); + + // Get halfedges in the flipped quad + int hij = halfedge_index; + int hjk = m.n[hij]; + int hki = m.n[hjk]; + + int hji = m.opp[hij]; + int hil = m.n[hji]; + int hlj = m.n[hil]; + + // Before + // vk + // hki / \ hjk + // / hij \ . + // vi --- vj + // \ hji / . + // hil \ / hlj + // vl + // + // After + // vk + // hki / | \ hjk + // / | \ . + // vi | vj + // \ | / . + // hil \ | / hlj + // vl + + // Adjust portions of the dual loop that intersect the quad + int num_segments = dual_loop.size(); + std::vector flipped_dual_loop(0); + flipped_dual_loop.reserve(num_segments + 10); + for (int n = 0; n < num_segments; ++n) { + DualSegment curr_segment = dual_loop[n]; + DualSegment next_segment = dual_loop[(n + 1) % num_segments]; + + // Should never process from center of quad; skip if occurs in first iteration + if ((curr_segment[0] == hij) || (curr_segment[0] == hji)) { + spdlog::trace("Skipping initial segment in flipped quad"); + continue; + } + + // There are three possible cases (up to rotational symmetry) + // 1. The dual loop enters and leaves the quad from the same triangle + // 2. The dual loop crosses the diagonal and exits the opposite side of the quad + // 3. The dual loop crosses the diagonal and exits the same side of the quad + // For each possible initial edge, we handle the above three cases in this order + + // Dual loop edge enters quad from hil + if (curr_segment[0] == hil) { + spdlog::trace("Loop enters at hil"); + if (curr_segment[1] == hlj) { + flipped_dual_loop.push_back({curr_segment[0], hij}); + flipped_dual_loop.push_back({hji, curr_segment[1]}); + spdlog::trace("Loop exits same triangle"); + } else if (next_segment[0] == hij && next_segment[1] == hjk) { + flipped_dual_loop.push_back({curr_segment[0], hij}); + flipped_dual_loop.push_back({hji, next_segment[1]}); + n += 1; // increment since two segments processed + spdlog::trace("Loop exits opposite side"); + } else if (next_segment[0] == hij && next_segment[1] == hki) { + flipped_dual_loop.push_back({curr_segment[0], next_segment[1]}); + n += 1; // increment since two segments processed + spdlog::trace("Loop exits same side"); + } + } + + // Dual loop edge enters quad from hlj + else if (curr_segment[0] == hlj) { + spdlog::trace("Loop enters at hlj"); + if (curr_segment[1] == hil) { + flipped_dual_loop.push_back({curr_segment[0], hji}); + flipped_dual_loop.push_back({hij, curr_segment[1]}); + spdlog::trace("Loop exits same triangle"); + } else if (next_segment[0] == hij && next_segment[1] == hki) { + flipped_dual_loop.push_back({curr_segment[0], hji}); + flipped_dual_loop.push_back({hij, next_segment[1]}); + n += 1; // increment since two segments processed + spdlog::trace("Loop exits opposite side"); + } else if (next_segment[0] == hij && next_segment[1] == hjk) { + flipped_dual_loop.push_back({curr_segment[0], next_segment[1]}); + n += 1; // increment since two segments processed + spdlog::trace("Loop exits same side"); + } + } + + // Dual loop edge enters quad from hjk + else if (curr_segment[0] == hjk) { + spdlog::trace("Loop enters at hjk"); + if (curr_segment[1] == hki) { + flipped_dual_loop.push_back({curr_segment[0], hji}); + flipped_dual_loop.push_back({hij, curr_segment[1]}); + spdlog::trace("Loop exits same triangle"); + } else if (next_segment[0] == hji && next_segment[1] == hil) { + flipped_dual_loop.push_back({curr_segment[0], hji}); + flipped_dual_loop.push_back({hij, next_segment[1]}); + n += 1; // increment since two segments processed + spdlog::trace("Loop exits opposite side"); + } else if (next_segment[0] == hji && next_segment[1] == hlj) { + flipped_dual_loop.push_back({curr_segment[0], next_segment[1]}); + n += 1; // increment since two segments processed + spdlog::trace("Loop exits same side"); + } + } + + // Dual loop edge enters quad from hki + else if (curr_segment[0] == hki) { + spdlog::trace("Loop enters at hki"); + if (curr_segment[1] == hjk) { + flipped_dual_loop.push_back({curr_segment[0], hij}); + flipped_dual_loop.push_back({hji, curr_segment[1]}); + spdlog::trace("Loop exits same triangle"); + } else if (next_segment[0] == hji && next_segment[1] == hlj) { + flipped_dual_loop.push_back({curr_segment[0], hij}); + flipped_dual_loop.push_back({hji, next_segment[1]}); + n += 1; // increment since two segments processed + spdlog::trace("Loop exits opposite side"); + } else if (next_segment[0] == hji && next_segment[1] == hil) { + flipped_dual_loop.push_back({curr_segment[0], next_segment[1]}); + n += 1; // increment since two segments processed + spdlog::trace("Loop exits same side"); + } + } + + // Just copy dual segment + else { + flipped_dual_loop.push_back(curr_segment); + } + } + + // Copy back to original vector + dual_loop = flipped_dual_loop; +} + +void update_dual_loop_under_ccw_flip_sequence( + const Mesh& m, + const std::vector& flip_seq, + std::vector& dual_loop) +{ + Mesh m_flip = m; + for (const auto& h_flip : flip_seq) { + update_dual_loop_under_ccw_flip(m_flip, h_flip, dual_loop); + m_flip.flip_ccw(h_flip); + } +} + +void view_dual_path( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Mesh& m, + const std::vector& dual_path) +{ + // Build corresponding face sequence + std::vector dual_path_faces = build_face_sequence_from_dual_path(m, dual_path); + Eigen::VectorXd is_dual_path_face; + is_dual_path_face.setZero(m.n_faces()); + for (const auto& dual_path_face : dual_path_faces) { + is_dual_path_face(dual_path_face) = 1.0; + } + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + polyscope::registerSurfaceMesh("path_mesh", V, F) + ->addFaceScalarQuantity("dual_path", is_dual_path_face); + polyscope::show(); +#else + int n_v = V.rows(); + int n_f = F.rows(); + spdlog::error("Cannot visualize dual path for mesh with {} vertices and {} faces", n_v, n_f); +#endif +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/core/field.cpp b/src/holonomy/core/field.cpp new file mode 100644 index 0000000..5342c22 --- /dev/null +++ b/src/holonomy/core/field.cpp @@ -0,0 +1,59 @@ +#include "holonomy/core/field.h" + +#include "util/vf_mesh.h" +#include "holonomy/core/dual_loop.h" +#include "holonomy/core/forms.h" + +#include "optimization/core/constraint.h" +#include "util/vector.h" + +#if USE_COMISO +#include +#endif + +namespace Penner { +namespace Holonomy { + +std::tuple> generate_cross_field( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F) +{ + // Compute cross field and singularities from comiso + Eigen::MatrixXd frame_field; + Eigen::VectorXd S; +#if USE_COMISO + Eigen::VectorXi b(1); + Eigen::MatrixXd bc(1, 3); + b << 0; + bc << 1, 1, 1; + int degree = 4; + igl::copyleft::comiso::nrosy(V, F, b, bc, degree, frame_field, S); +#else + int num_vertices = V.rows(); + int num_faces = F.rows(); + spdlog::error( + "Comiso solver not enabled for #V={}, #F={} mesh. Set USE_COMISO to use.", + num_vertices, + num_faces); +#endif + + // Get the boundary vertices + std::vector is_boundary_vertex = compute_boundary_vertices(F, V.rows()); + + // Turn singularities into a flat metric + // FIXME This is only accurate for closed meshes; singularities only make sense with doubling + int num_cone_vertices = S.size(); + std::vector Th_hat(num_cone_vertices); + for (int vi = 0; vi < num_cone_vertices; ++vi) { + if (is_boundary_vertex[vi]) { + Th_hat[vi] = M_PI - (2 * M_PI * S[vi]); + } else { + Th_hat[vi] = 2 * M_PI * (1 - S[vi]); + } + } + + return std::make_tuple(frame_field, Th_hat); +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/core/forms.cpp b/src/holonomy/core/forms.cpp new file mode 100644 index 0000000..bad53b5 --- /dev/null +++ b/src/holonomy/core/forms.cpp @@ -0,0 +1,334 @@ +#include "holonomy/core/forms.h" + +#include "optimization/core/constraint.h" +#include + +namespace Penner { +namespace Holonomy { + +bool is_valid_one_form(const Mesh& m, const VectorX& one_form) +{ + int num_halfedges = m.n_halfedges(); + if (one_form.size() != num_halfedges) { + return false; + } + + // Check opposite halfedges are inverse + for (int hij = 0; hij < num_halfedges; ++hij) { + int hji = m.opp[hij]; + Scalar xij = one_form[hij]; + Scalar xji = one_form[hji]; + if (!float_equal(xij + xji, 0)) { + spdlog::error("Edge pair ({}, {}) have form values ({}, {})", hij, hji, xij, xji); + + return false; + } + } + + // Check reflected edges are inverse + for (int hij = 0; hij < num_halfedges; ++hij) { + if (m.type[hij] < 1) break; // only consider reflection structure + + int hji = m.R[hij]; + Scalar xij = one_form[hij]; + Scalar xji = one_form[hji]; + if (!float_equal(xij + xji, 0)) { + spdlog::error("Edge pair ({}, {}) have form values ({}, {})", hij, hji, xij, xji); + + return false; + } + } + + return true; +} + +bool is_closed_one_form(const Mesh& m, const VectorX& one_form) +{ + // Check is one form valid + if (!is_valid_one_form(m, one_form)) { + return false; + } + + // Check one form values of face sum to 0 + int num_faces = m.n_faces(); + for (int f = 0; f < num_faces; ++f) { + int hij = m.h[f]; + int hjk = m.n[hij]; + int hki = m.n[hjk]; + + Scalar xij = one_form[hij]; + Scalar xjk = one_form[hjk]; + Scalar xki = one_form[hki]; + Scalar sum = xij + xjk + xki; + + if (!float_equal(sum, 0)) { + spdlog::info("Face 1-form sum is {}", sum); + return false; + } + } + + return true; +} + +MatrixX build_dual_loop_basis_one_form_matrix( + const Mesh& m, + const std::vector>& dual_loops) +{ + int num_loops = dual_loops.size(); + int num_halfedges = m.n_halfedges(); + + // Columns of matrix are signed segment halfedge indicators + typedef Eigen::Triplet T; + std::vector tripletList; + tripletList.reserve(num_halfedges); + for (int i = 0; i < num_loops; ++i) { + for (const auto& dual_segment : *dual_loops[i]) { + tripletList.push_back(T(dual_segment[0], i, -1.0)); + tripletList.push_back(T(dual_segment[1], i, 1.0)); + } + } + + // Create the matrix from the triplets + MatrixX one_form_matrix; + one_form_matrix.resize(num_halfedges, num_loops); + one_form_matrix.reserve(tripletList.size()); + one_form_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); + return one_form_matrix; +} + +MatrixX build_closed_one_form_matrix( + const Mesh& m, + const std::vector>& homology_basis_loops, + bool eliminate_vertex) +{ + // Initialize matrix triplet list + int num_halfedges = m.n_halfedges(); + typedef Eigen::Triplet T; + std::vector tripletList; + tripletList.reserve(2 * num_halfedges); + + // Build v_rep + std::vector v_rep; + int num_vertex_forms; + if (eliminate_vertex) { + Optimization::build_free_vertex_map(m, v_rep, num_vertex_forms); + } else { + v_rep = m.v_rep; + num_vertex_forms = m.n_ind_vertices(); + } + + // Add vertex basis forms + for (int h = 0; h < num_halfedges; ++h) { + int v0 = v_rep[m.to[h]]; + if (v0 >= 0) { + tripletList.push_back(T(h, v0, 1.0)); + } + + int v1 = v_rep[m.to[m.opp[h]]]; + if (v1 >= 0) { + tripletList.push_back(T(h, v1, -1.0)); + } + } + + // Add homology basis forms + int num_loops = homology_basis_loops.size(); + for (int i = 0; i < num_loops; ++i) { + for (const auto& dual_segment : *homology_basis_loops[i]) { + tripletList.push_back(T(dual_segment[0], num_vertex_forms + i, -1.0)); + tripletList.push_back(T(dual_segment[1], num_vertex_forms + i, 1.0)); + } + } + + // Create the matrix from the triplets + MatrixX one_form_matrix(num_halfedges, num_vertex_forms + num_loops); + one_form_matrix.reserve(tripletList.size()); + one_form_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); + return one_form_matrix; +} + +MatrixX build_one_form_integral_matrix( + const Mesh& m, + const std::vector& cut_h, + std::vector& is_cut_h, + int start_h) +{ + int num_halfedges = m.n_halfedges(); + std::vector> integral_matrix_lol(num_halfedges, std::map()); + + bool cut_given = !cut_h.empty(); + is_cut_h = std::vector(m.n_halfedges(), false); + + // Initialize the per-halfedge integral with the starting halfedge + int h = m.n[m.n[start_h]]; + integral_matrix_lol[h] = std::map(); + h = m.n[h]; + integral_matrix_lol[h][h] = 1.0; + + // Initialize list of halfedges to process and record of seen faces + std::queue halfedges_to_process; + halfedges_to_process.push(h); + auto is_face_seen = std::vector(m.n_faces(), false); + is_face_seen[m.f[h]] = true; + + + // Process opposite halfedge if it is not cut + if (cut_given && cut_h[h]) { + int ho = m.opp[h]; + is_cut_h[h] = true; + is_cut_h[ho] = true; + } else { + // Copy per corner values across the edge + int ho = m.opp[h]; + integral_matrix_lol[ho] = integral_matrix_lol[m.n[m.n[h]]]; + integral_matrix_lol[m.n[m.n[ho]]] = integral_matrix_lol[h]; + + // Update records + is_face_seen[m.f[ho]] = true; + halfedges_to_process.push(ho); + } + + // Process one face at a time until all halfedges are seen + while (!halfedges_to_process.empty()) { + // Get triangle halfedges for the next halfedge to process + h = halfedges_to_process.front(); + halfedges_to_process.pop(); + int hn = m.n[h]; + int hp = m.n[hn]; + + // Integrate over hn to finish defining halfedges for the face + integral_matrix_lol[hn] = integral_matrix_lol[h]; + integral_matrix_lol[hn][hn] += 1.0; + + // Process edges of the face, + for (int hc : {hn, hp}) { + int ho = m.opp[hc]; + + // Skip edges adjacent to seen faces or that are cut + if (is_face_seen[m.f[ho]] || (cut_given && cut_h[ho])) { + is_cut_h[hc] = true; + is_cut_h[ho] = true; + continue; + } + + // Copy per corner values across the edge + integral_matrix_lol[ho] = integral_matrix_lol[m.n[m.n[hc]]]; + integral_matrix_lol[m.n[m.n[ho]]] = integral_matrix_lol[hc]; + + // Update records + is_face_seen[m.f[ho]] = true; + halfedges_to_process.push(ho); + } + } + + // Copy list of lists to triplets + typedef Eigen::Triplet T; + std::vector tripletList; + tripletList.reserve(num_halfedges); + for (int h = 0; h < num_halfedges; ++h) { + for (auto it : integral_matrix_lol[h]) { + tripletList.push_back(T(h, it.first, it.second)); + } + } + + // Create the matrix from the triplets + MatrixX integral_matrix; + integral_matrix.resize(num_halfedges, num_halfedges); + integral_matrix.reserve(tripletList.size()); + integral_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); + + return integral_matrix; +} + +VectorX integrate_one_form( + const Mesh& m, + const VectorX& one_form, + const std::vector& cut_h, + std::vector& is_cut_h, + int start_h) +{ + assert(is_closed_one_form(m, one_form)); + + // Integrate one form using the integration matrix + MatrixX integral_matrix = build_one_form_integral_matrix(m, cut_h, is_cut_h, start_h); + VectorX integrated_one_form = integral_matrix * one_form; + + // TODO: Validate integration + return integrated_one_form; +} + +// TODO: Differentiation code sketch +// // validation phi values +// for (int i = 0; i < m.n_halfedges(); i++) { +// Scalar u0 = phi[i]; +// Scalar u1 = phi[m.n[m.n[i]]]; +// Scalar _xi = u0 - u1; +// if (abs(_xi - xi[i]) > 1e-12) +// std::cerr << std::setprecision(17) << "error (" << _xi << ", " << xi[i] +// << "): " << abs(_xi - xi[i]) << std::endl; +// } + +MatrixX build_integrated_one_form_scaling_matrix(const Mesh& m) +{ + // Generate map from halfedges hjk to halfedges hij and hjk with tips at the base + // and tip of hjk respectively since the integrated one form has data at halfedge tip + // corners + int num_halfedges = m.n_halfedges(); + typedef Eigen::Triplet T; + std::vector tripletList; + tripletList.reserve(2 * num_halfedges); + for (int hij = 0; hij < num_halfedges; ++hij) { + int hjk = m.n[hij]; + tripletList.push_back(T(hjk, hij, 1.0)); + tripletList.push_back(T(hjk, hjk, 1.0)); + } + + // Create the matrix from the triplets + MatrixX scaling_matrix; + scaling_matrix.resize(num_halfedges, num_halfedges); + scaling_matrix.reserve(tripletList.size()); + scaling_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); + + return scaling_matrix; +} + +VectorX scale_halfedges_by_integrated_one_form( + const Mesh& m, + const VectorX& metric_coords, + const VectorX& integrated_one_form) +{ + int num_halfedges = metric_coords.size(); + VectorX scaled_metric_coords(num_halfedges); + for (int hij = 0; hij < num_halfedges; ++hij) { + // Scale NEXT halfedge hjk (as we can access the adjacent corner values from the tips of hij + // and hjk) + int hjk = m.n[hij]; + scaled_metric_coords[hjk] = + metric_coords[hjk] + (integrated_one_form[hij] + integrated_one_form[hjk]); + } + + return scaled_metric_coords; +} + +VectorX scale_edges_by_zero_form( + const Mesh& m, + const VectorX& metric_coords, + const VectorX& zero_form) +{ + int num_halfedges = metric_coords.size(); + VectorX scaled_metric_coords(num_halfedges); + for (int hij = 0; hij < num_halfedges; ++hij) { + // Get adjacent vertices + int hji = m.opp[hij]; + int vi = m.v_rep[m.to[hji]]; + int vj = m.v_rep[m.to[hij]]; + + // Scale halfedge + scaled_metric_coords[hij] = metric_coords[hij] + (zero_form[vi] + zero_form[vj]); + } + + return scaled_metric_coords; +} + + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/core/homology_basis.cpp b/src/holonomy/core/homology_basis.cpp new file mode 100644 index 0000000..c37d58e --- /dev/null +++ b/src/holonomy/core/homology_basis.cpp @@ -0,0 +1,208 @@ +#include "holonomy/core/homology_basis.h" + +#include "util/vector.h" +#include "holonomy/core/dual_lengths.h" + +#include +#include "util/embedding.h" + +namespace Penner { +namespace Holonomy { + +// Construct a clockwise sequence of dual segments around a vertex in the mesh +std::vector build_clockwise_vertex_dual_segment_sequence( + const Mesh& m, + int vertex_index) +{ + std::vector dual_loop(0); + + // Circulate clockwise around the vertex to build the loop + int h_start = m.opp[m.out[vertex_index]]; + int h_iter = h_start; + do { + // Build clockwise dual segment in current face + int h_next = m.n[h_iter]; + dual_loop.push_back({h_iter, h_next}); + + // Iterate to next face + h_iter = m.opp[h_next]; + } while (h_iter != h_start); + + return dual_loop; +} + +// Construct a counterclockwise sequence of dual segments around a vertex in the mesh +std::vector build_counterclockwise_vertex_dual_segment_sequence( + const Mesh& m, + int vertex_index) +{ + // Use reverse of the clockwise dual segment + return reverse_dual_path(build_clockwise_vertex_dual_segment_sequence(m, vertex_index)); +} + +HomotopyBasisGenerator::HomotopyBasisGenerator(const Mesh& m, int root, Weighting weighting) + : m_mesh(m) +{ + // Build halfedge to edge maps + build_edge_maps(m, m_he2e, m_e2he); + + // Build spanning tree and cotree with given weighting + if (weighting == Weighting::minimal_homotopy) { + // Compute dual tree with shortest path tree + std::vector dual_edge_lengths = compute_dual_edge_lengths(m); + m_dual_tree = DualTree(m, dual_edge_lengths, root, true); + + // Compute primal cotree with maximal dual loop lengths + std::vector dual_loop_lengths = + compute_dual_loop_length_weights(m, dual_edge_lengths, m_dual_tree); + m_primal_tree = PrimalCotree(m, vector_negate(dual_loop_lengths), m_dual_tree); + } + // Build + else if (weighting == Weighting::maximal_homotopy) { + // Compute dual tree with shortest path tree + std::vector dual_edge_lengths = compute_dual_edge_lengths(m); + m_dual_tree = DualTree(m, vector_negate(dual_edge_lengths), root, true); + + // Compute primal cotree with maximal dual loop lengths + std::vector dual_loop_lengths = + compute_dual_loop_length_weights(m, dual_edge_lengths, m_dual_tree); + m_primal_tree = PrimalCotree(m, dual_loop_lengths, m_dual_tree); + } + // Use min-max trees with dual edge weights + else if (weighting == Weighting::dual_min_primal_max) { + std::vector dual_edge_lengths = compute_dual_edge_lengths(m); + m_dual_tree = DualTree(m, dual_edge_lengths, root); + m_primal_tree = PrimalCotree(m, vector_negate(dual_edge_lengths), m_dual_tree); + } + // Use min-max trees with primal edge weights + else if (weighting == Weighting::primal_min_dual_max) { + m_primal_tree = PrimalTree(m, m.l, root); + m_dual_tree = DualCotree(m, vector_negate(m.l), m_primal_tree); + } + + // Find homotopy basis handle edges that are not in the tree or cotree + int num_edges = m_e2he.size(); + for (int ei = 0; ei < num_edges; ++ei) { + int h = m_e2he[ei]; + + // Skip edges in double or boundary + if ((m_mesh.type[h] == 2) || (m_mesh.opp[m_mesh.R[h]] == h)) continue; + + if ((!m_primal_tree.is_edge_in_tree(ei)) && (!m_dual_tree.is_edge_in_tree(ei))) { + m_homotopy_basis_edge_handles.push_back(ei); + } + } +}; + +// Trace a dual vertex back to the root of the dual tree +std::tuple, std::vector> HomotopyBasisGenerator::trace_dual_vertex_to_root(int face_index) const +{ + std::vector dual_path = {face_index}; + std::vector dual_edges = {}; + + // Trace up the dual tree until a root is reached + int curr_face_index = face_index; + while (!m_dual_tree.is_root(curr_face_index)) { + // Get parent face of current face + int edge_index = m_dual_tree.out(curr_face_index); + curr_face_index = m_dual_tree.to(edge_index); + assert(m_dual_tree.from(edge_index) == dual_path.back()); + + // Add face and edge to the path + dual_path.push_back(curr_face_index); + dual_edges.push_back(m_dual_tree.edge(edge_index)); + } + + return std::make_tuple(dual_path, dual_edges); +} + +std::tuple, std::vector> HomotopyBasisGenerator::construct_homotopy_basis_edge_loop(int index) const +{ + assert(index >= 0); + assert(index < n_homology_basis_loops()); + + // Construct path from both sides of the edge to the root face of the dual tree + int handle_edge = m_homotopy_basis_edge_handles[index]; + int left_halfedge = m_e2he[handle_edge]; + int right_halfedge = m_mesh.opp[left_halfedge]; + int left_face = m_mesh.f[left_halfedge]; + int right_face = m_mesh.f[right_halfedge]; + auto [left_dual_path, left_dual_edges] = trace_dual_vertex_to_root(left_face); + auto [right_dual_path, right_dual_edges] = trace_dual_vertex_to_root(right_face); + + // Find common root path of the left and right paths + int left_path_size = left_dual_path.size(); + int right_path_size = right_dual_path.size(); + + // Combine dual paths to generate a simple loop + std::vector dual_loop(0); + std::vector dual_edges(0); + dual_loop.reserve(left_path_size + right_path_size); + dual_edges.reserve(left_path_size + right_path_size); + dual_edges.push_back(handle_edge); + for (int i = 0; i < left_path_size-1; ++i) { + dual_loop.push_back(left_dual_path[i]); // Add left path to root (exclusive) + dual_edges.push_back(left_dual_edges[i]); // Add left path to root (exclusive) + } + dual_loop.push_back(left_dual_path[left_path_size - 1]); // Add common root + for (int i = right_path_size - 2; i >= 0; --i) { + dual_loop.push_back(right_dual_path[i]); // Add right path from root (exclusive) + dual_edges.push_back(right_dual_edges[i]); // Add left path to root (exclusive) + } + + return std::make_tuple(dual_loop, dual_edges); +} + +std::tuple, std::vector> HomotopyBasisGenerator::construct_homology_basis_edge_loop(int index) const +{ + assert(index >= 0); + assert(index < n_homology_basis_loops()); + + // Construct path from both sides of the edge to the root face of the dual tree + int handle_edge = m_homotopy_basis_edge_handles[index]; + int left_halfedge = m_e2he[handle_edge]; + int right_halfedge = m_mesh.opp[left_halfedge]; + int left_face = m_mesh.f[left_halfedge]; + int right_face = m_mesh.f[right_halfedge]; + auto [left_dual_path, left_dual_edges] = trace_dual_vertex_to_root(left_face); + auto [right_dual_path, right_dual_edges] = trace_dual_vertex_to_root(right_face); + + // Find common root path of the left and right paths + int trim_root_offset = 0; + int left_path_size = left_dual_path.size(); + int right_path_size = right_dual_path.size(); + while ((trim_root_offset < left_path_size) && (trim_root_offset < right_path_size) && + (left_dual_path[left_path_size - 1 - trim_root_offset] == + right_dual_path[right_path_size - 1 - trim_root_offset])) { + trim_root_offset++; + } + assert( + left_dual_path[left_path_size - trim_root_offset] == + right_dual_path[right_path_size - trim_root_offset]); + + // Combine dual paths and trim common path to root to generate a simple loop + std::vector dual_loop(0); + std::vector dual_edges(0); + dual_loop.reserve(left_path_size + right_path_size); + dual_edges.reserve(left_path_size + right_path_size); + dual_edges.push_back(handle_edge); + for (int i = 0; i < left_path_size - trim_root_offset; ++i) { + dual_loop.push_back(left_dual_path[i]); // Add left path to trim root + dual_edges.push_back(left_dual_edges[i]); + } + dual_loop.push_back(left_dual_path[left_path_size - trim_root_offset]); // Add trim root + for (int i = right_path_size - 1 - trim_root_offset; i >= 0; --i) { + dual_loop.push_back(right_dual_path[i]); // Add right path from trim root + dual_edges.push_back(right_dual_edges[i]); + } + + return std::make_tuple(dual_loop, dual_edges); +} + +std::vector HomotopyBasisGenerator::construct_homology_basis_loop(int index) const +{ + return std::get<0>(construct_homology_basis_edge_loop(index)); +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/core/intrinsic_field.cpp b/src/holonomy/core/intrinsic_field.cpp new file mode 100644 index 0000000..e97f84d --- /dev/null +++ b/src/holonomy/core/intrinsic_field.cpp @@ -0,0 +1,1196 @@ +#include "holonomy/core/intrinsic_field.h" + +#include +#include +#include +#include + +#include + +#include "holonomy/core/field.h" +#include "util/spanning_tree.h" +#include "holonomy/core/viewer.h" +#include "holonomy/core/forms.h" + +#include "optimization/core/constraint.h" +#include "util/vector.h" + +#include + +#ifdef ENABLE_VISUALIZATION +#include "polyscope/point_cloud.h" +#include "polyscope/surface_mesh.h" +#endif + +namespace Penner { +namespace Holonomy { + +// compute the cone angles at vertices +VectorX compute_cone_angles(const Mesh& m, const VectorX& alpha) +{ + // Sum up angles around vertices + VectorX t(m.n_ind_vertices()); + t.setZero(); + for (int h = 0; h < m.n_halfedges(); h++) { + t[m.v_rep[m.to[m.n[h]]]] += alpha[h]; + } + return t; +} + +// build a bfs forest with faces adjacent to the boundaries as the roots +std::vector build_double_dual_bfs_forest(const Mesh& m) +{ + int num_faces = m.n_faces(); + int num_halfedges = m.n_halfedges(); + std::vector is_processed_face(num_faces, false); + std::vector halfedge_from_face(num_faces, -1); + + // Split mesh along boundary by marking all doubled faces as processed + for (int h = 0; h < num_halfedges; ++h) { + if ((m.type[h] == 2) || (m.type[m.opp[h]] == 2)) { + is_processed_face[m.f[h]] = true; + } + } + + // Initialize queue with boundary faces to process + std::deque faces_to_process; + for (int h = 0; h < num_halfedges; ++h) { + if ((m.type[h] == 2) || (m.type[m.opp[h]] == 2)) { + int fi = m.f[h]; + is_processed_face[fi] = true; + faces_to_process.push_back(fi); + } + } + + // Perform Prim or Dijkstra algorithm + while (!faces_to_process.empty()) { + // Get the next face to process + int fi = faces_to_process.front(); + faces_to_process.pop_front(); + + // Iterate over the face circulator via halfedges + int h_start = m.h[fi]; + int hij = h_start; + do { + // Get the face in the one ring at the tip of the halfedge + int fj = m.f[m.opp[hij]]; + + // Check if the edge to the tip face is the best seen so far + if (!is_processed_face[fj]) { + is_processed_face[fj] = true; + halfedge_from_face[fj] = hij; + faces_to_process.push_back(fj); + } + + // Progress to the next halfedge in the face circulator + hij = m.n[hij]; + } while (hij != h_start); + } + + return halfedge_from_face; +} + +// build a dfs forest with faces adjacent to the boundaries as the roots +std::vector build_double_dual_dfs_forest(const Mesh& m) +{ + int num_faces = m.n_faces(); + int num_halfedges = m.n_halfedges(); + std::vector is_processed_face(num_faces, false); + std::vector halfedge_from_face(num_faces, -1); + + // Split mesh along boundary by marking all doubled faces as processed + for (int h = 0; h < num_halfedges; ++h) { + if (m.type[h] == 2) { + is_processed_face[m.f[h]] = true; + } + } + + // Initialize queue with face to process + std::deque faces_to_process; + for (int fi = 0; fi < num_faces; ++fi) { + if (m.type[m.h[fi]] == 1) + { + faces_to_process.push_back(fi); + break; + } + } + + // Perform Prim or Dijkstra algorithm + while (!faces_to_process.empty()) { + // Get the next face to process + int fi = faces_to_process.back(); + faces_to_process.pop_back(); + if (is_processed_face[fi]) continue; + is_processed_face[fi] = true; + + // Iterate over the face circulator via halfedges + int h_start = m.h[fi]; + int hij = h_start; + do { + // Get the face in the one ring at the tip of the halfedge + int fj = m.f[m.opp[hij]]; + faces_to_process.push_back(fj); + + // Check if the edge to the tip face is the best seen so far + if (!is_processed_face[fj]) { + halfedge_from_face[fj] = hij; + } + + // Progress to the next halfedge in the face circulator + hij = m.n[hij]; + } while (hij != h_start); + } + + return halfedge_from_face; +} + + +// compute the signed angle from the given halfedge h to the reference halfedge in the same face +Scalar IntrinsicNRosyField::compute_angle_to_reference(const Mesh& m, const VectorX& he2angle, int h) const +{ + // Get reference edges for the adjacent face + int f = m.f[h]; + int h_ref = face_reference_halfedge[f]; + + // Determine local orientation of h and h_ref + int hij = h; + int hjk = m.n[hij]; + int hki = m.n[hjk]; + + // Reference halfedge is input halfedge + if (h_ref == hij) { + return 0.0; + } + // Reference halfedge is ccw from input halfedge + else if (h_ref == hjk) { + return (M_PI - he2angle[hki]); + } + // Reference halfedge is cw from input halfedge + else if (h_ref == hki) { + return (he2angle[hjk] - M_PI); + } + // Face is not triangular + else { + throw std::runtime_error("Cannot compute field for mesh with nontriangular face"); + return 0.0; + } +} + +// compute the signed angle from frame hij to hji +// TODO think through this +Scalar IntrinsicNRosyField::compute_angle_between_frames(const Mesh& m, const VectorX& he2angle, int h) const +{ + // Get angles from the edge to the reference halfedges for the adjacent faces + int hij = h; + int hji = m.opp[hij]; + Scalar kappa0 = compute_angle_to_reference(m, he2angle, hij); + Scalar kappa1 = compute_angle_to_reference(m, he2angle, hji); + + // Compute angle between frames in range [-pi, pi] + return (pos_fmod(2 * M_PI + kappa0 - kappa1, 2 * M_PI) - M_PI); +} + +void IntrinsicNRosyField::initialize_local_frames(const Mesh& m) +{ + // For each face, select a reference halfedge + face_reference_halfedge = m.h; + + // Set initial face angles to 0 + int num_faces = m.n_faces(); + theta.setZero(num_faces); + + // Mark faces as free except one + is_face_fixed = std::vector(num_faces, false); + is_face_fixed[0] = true; + + // Compute corner angles + Optimization::corner_angles(m, he2angle, he2cot); + + // Compute the angle between reference halfedges across faces + int num_halfedges = m.n_halfedges(); + kappa.setZero(num_halfedges); + for (int hij = 0; hij < num_halfedges; ++hij) { + // Only process each edge once + int hji = m.opp[hij]; + if (hij < hji) continue; + + // compute oriented angles between frames across edge eij + kappa[hij] = compute_angle_between_frames(m, he2angle, hij); + kappa[hji] = -kappa[hij]; + } +} + +void IntrinsicNRosyField::initialize_double_local_frames(const Mesh& m) +{ + // For each face, select a reference halfedge, ensuring consistency of doubled reference + int num_halfedges = m.n_halfedges(); + int num_faces = m.n_faces(); + face_reference_halfedge.resize(num_faces); + theta.setZero(num_faces); + for (int f = 0; f < num_faces; ++f) { + int h = m.h[f]; + + // Just use face to halfedge map for faces in original mesh + if (m.type[h] != 2) { + face_reference_halfedge[f] = h; + theta[f] = 0.; + } + // Use reflection conjugated halfedge for doubled mesh + else { + int Rf = m.f[m.R[h]]; // get reflection of the face + face_reference_halfedge[f] = m.R[m.h[Rf]]; + theta[f] = M_PI; + } + } + + // Ensure all reference halfedges on boundary are aligned + for (int h = 0; h < num_halfedges; ++h) { + if ((m.opp[m.R[h]] == h) && (m.type[h] == 1)) { + int f = m.f[h]; + int Rf = m.f[m.R[h]]; // get reflection of the face + face_reference_halfedge[f] = h; + face_reference_halfedge[Rf] = m.R[h]; + } + } + + // Mark double and faces on the boundary as fixed + is_face_fixed = std::vector(num_faces, false); + for (int f = 0; f < num_faces; ++f) { + if (m.type[m.h[f]] == 2) { + is_face_fixed[f] = true; + } + } + for (int h = 0; h < num_halfedges; ++h) { + if ((m.opp[m.R[h]] == h) && (m.type[h] == 1)) { + is_face_fixed[m.f[h]] = true; + } + } + + // Compute corner angles + Optimization::corner_angles(m, he2angle, he2cot); + + // Compute the angle between reference halfedges across faces + kappa.setZero(num_halfedges); + for (int hij = 0; hij < num_halfedges; ++hij) { + // Only process each edge once + int hji = m.opp[hij]; + if (hij < hji) continue; + if ((m.type[hij] == 2) && (m.type[hji] == 2)) continue; + + // compute oriented angles between frames across edge eij and reflected edge + kappa[hij] = compute_angle_between_frames(m, he2angle, hij); + kappa[hji] = -kappa[hij]; + kappa[m.R[hij]] = -kappa[hij]; + kappa[m.opp[m.R[hij]]] = kappa[hij]; + } +} + +void IntrinsicNRosyField::initialize_period_jump(const Mesh& m) +{ + // Get edge maps + build_edge_maps(m, he2e, e2he); + + // Build dual spanning tree + int num_halfedges = m.n_halfedges(); + DualTree dual_tree(m, std::vector(num_halfedges, 0.0)); + + // Initialize period jumps of value pi/2 to 0 and mark dual tree edges as fixed + period_jump.setZero(num_halfedges); + period_value = VectorX::Constant(num_halfedges, M_PI / 2.); + is_period_jump_fixed = std::vector(num_halfedges, false); + for (int h = 0; h < num_halfedges; ++h) { + if (dual_tree.is_edge_in_tree(he2e[h])) { + is_period_jump_fixed[h] = true; + } + } + + // TODO: Handle multiple fixed faces and boundary constraints +} + +void IntrinsicNRosyField::initialize_double_period_jump(const Mesh& m) +{ + // Get edge maps + build_edge_maps(m, he2e, e2he); + + // Initialize period jumps of value pi/2 to 0 + int num_halfedges = m.n_halfedges(); + period_value = VectorX::Constant(num_halfedges, M_PI / 2.); + + // Change period value for boundary edges to pi + for (int h = 0; h < num_halfedges; ++h) { + if (m.opp[m.R[h]] == h) { + period_value[h] = M_PI; + } + } + + // Mark edges in dual spanning tree and double as fixed + // TODO: Mark first boundary period jump as fixed? + constrain_bd = false; + is_period_jump_fixed = std::vector(num_halfedges, false); + if (constrain_bd) + { + std::vector halfedges_from_face; + halfedges_from_face = build_double_dual_bfs_forest(m); + for (int h : halfedges_from_face) { + if (h < 0) continue; + for (int h_rel : { h, m.opp[h], m.R[h], m.opp[m.R[h]] }) { + is_period_jump_fixed[h_rel] = true; + } + } + } else { + std::vector halfedges_from_face; + halfedges_from_face = build_double_dual_bfs_forest(m); + for (int h : halfedges_from_face) { + if (h < 0) continue; + for (int h_rel : { h, m.opp[h], m.R[h], m.opp[m.R[h]] }) { + is_period_jump_fixed[h_rel] = true; + } + } + for (int h = 0; h < num_halfedges; ++h) { + if ((m.opp[m.R[h]] == h) && (m.type[h] == 1)) { + is_period_jump_fixed[h] = true; + is_period_jump_fixed[m.opp[h]] = true; + } + } + + // DualTree dual_tree(m, std::vector(num_halfedges, 0.0)); + // for (int h = 0; h < num_halfedges; ++h) { + // if ((m.type[h] == 2) || (m.type[m.opp[h]] == 2)) continue; + + // if (dual_tree.is_edge_in_tree(he2e[h])) { + // is_period_jump_fixed[h] = true; + // } + // } + // for (int h = 0; h < num_halfedges; ++h) { + // if (m.opp[m.R[h]] == h) { + // is_period_jump_fixed[h] = true; + // is_period_jump_fixed[m.opp[h]] = true; + // break; + // } + // } + } + + // set the period jump (necessary for jumps between fixed faces) + period_jump.setZero(num_halfedges); + for (int hij = 0; hij < num_halfedges; ++hij) { + int hji = m.opp[hij]; + if (hij < hji) continue; // only process each edge once + if ((m.type[hij] == 2) && (m.type[m.opp[hij]] == 2)) continue; + + int fi = m.f[hij]; + int fj = m.f[hij]; + period_jump[hij] = (int)(round((theta[fi] - theta[fj] - kappa[hij])/period_value[hij])); + period_jump[hji] = -period_jump[hij]; + period_jump[m.R[hij]] = -period_jump[hij]; + period_jump[m.opp[m.R[hij]]] = period_jump[hij]; + } + + // TODO: Handle multiple fixed faces and boundary constraints +} + +std::tuple, std::vector> build_variable_index( + const std::vector& is_fixed) +{ + int num_var = is_fixed.size(); + + // Allocate maps from variables to all values to variable and the left inverse + std::vector all2var(num_var, -1); + std::vector var2all; + var2all.reserve(num_var); + + // Build maps + for (int i = 0; i < num_var; ++i) { + if (!is_fixed[i]) { + all2var[i] = var2all.size(); + var2all.push_back(i); + } + } + + return std::make_tuple(var2all, all2var); +} + +void IntrinsicNRosyField::initialize_mixed_integer_system(const Mesh& m) +{ + int num_faces = m.n_faces(); + int num_halfedges = m.n_halfedges(); + + // Count and tag the variables + face_var_id = std::vector(num_faces, -1); + halfedge_var_id = std::vector(num_halfedges, -1); + int count = 0; + for (int fi = 0; fi < num_faces; ++fi) { + if (!is_face_fixed[fi]) { + face_var_id[fi] = count; + count++; + } + } + int num_face_var = count; + for (int hij = 0; hij < num_halfedges; ++hij) { + if (hij < m.opp[hij]) continue; // only process each edge once + if (!is_period_jump_fixed[hij]) { + halfedge_var_id[hij] = count; + count++; + } + } + int num_var = count; + int num_edge_var = num_var - num_face_var; + + b.setZero(num_var); + std::vector> T; + T.reserve(3 * 4 * num_edge_var); + + for (int hij = 0; hij < num_halfedges; ++hij) { + if (hij < m.opp[hij]) continue; // only process each edge once + int hji = m.opp[hij]; + int f0 = m.f[hij]; + int f1 = m.f[hji]; + int f0_var = face_var_id[f0]; + int f1_var = face_var_id[f1]; + int e_var = halfedge_var_id[hij]; + int row; + + // partial with respect to f0 + if (!is_face_fixed[f0]) { + row = f0_var; + T.emplace_back(row, f0_var, 2); + + if (is_face_fixed[f1]) { + b(row) += 2 * theta[f1]; + } else { + T.emplace_back(row, f1_var, -2); + } + + if (is_period_jump_fixed[hij]) { + b(row) += -2. * period_value[hij] * period_jump[hij]; + } else { + T.emplace_back(row, e_var, 2. * period_value[hij]); + } + + b(row) += -2 * kappa[hij]; + } + // partial with respect to f1 + if (!is_face_fixed[f1]) { + row = f1_var; + T.emplace_back(row, f1_var, 2); + + if (is_face_fixed[f0]) { + b(row) += 2 * theta[f0]; + } else { + T.emplace_back(row, f0_var, -2); + } + + if (is_period_jump_fixed[hij]) { + b(row) += 2. * period_value[hij] * period_jump[hij]; + } else { + T.emplace_back(row, e_var, -2. * period_value[hij]); + } + + b(row) += 2 * kappa[hij]; + } + // partial with respect to eij + if (!is_period_jump_fixed[hij]) { + row = e_var; + T.emplace_back(row, e_var, 2. * period_value[hij] * period_value[hij]); + + if (is_face_fixed[f0]) { + b(row) += -2. * period_value[hij] * theta[f0]; + } else { + T.emplace_back(row, f0_var, 2. * period_value[hij]); + } + + if (is_face_fixed[f1]) { + b(row) += 2. * period_value[hij] * theta[f1]; + } else { + T.emplace_back(row, f1_var, -2. * period_value[hij]); + } + + b(row) += -2. * period_value[hij] * kappa[hij]; + } + } + + A.resize(num_var, num_var); + A.setFromTriplets(T.begin(), T.end()); + spdlog::debug("Cross field system vector has norm {}", b.norm()); + spdlog::debug("Cross field system matrix has norm {}", A.norm()); + + // TODO: Soft constraints +} + +void IntrinsicNRosyField::initialize_double_mixed_integer_system(const Mesh& m) +{ + int num_faces = m.n_faces(); + int num_halfedges = m.n_halfedges(); + + // Count and tag the variables + face_var_id = std::vector(num_faces, -1); + halfedge_var_id = std::vector(num_halfedges, -1); + int count = 0; + for (int fi = 0; fi < num_faces; ++fi) { + if ((m.type[m.h[fi]] == 1) && (!is_face_fixed[fi])) { + face_var_id[fi] = count; + count++; + } + } + int num_face_var = count; + for (int hij = 0; hij < num_halfedges; ++hij) { + int hji = m.opp[hij]; + if (is_period_jump_fixed[hij]) continue; + if ((m.type[hij] == 2) && (m.type[hji] == 2)) + continue; // don't process interior reflection + if (hij < hji) continue; // unique index check + + halfedge_var_id[hij] = count; + count++; + } + int num_var = count; + int num_edge_var = num_var - num_face_var; + + b.setZero(num_var); + std::vector> T; + T.reserve(3 * 4 * num_edge_var); + + for (int hij = 0; hij < num_halfedges; ++hij) { + if (hij < m.opp[hij]) continue; // only process each edge once + if ((m.type[hij] == 2) && (m.type[m.opp[hij]] == 2)) continue; + int hji = m.opp[hij]; + int f0 = m.f[hij]; + int f1 = m.f[hji]; + int f0_var = face_var_id[f0]; + int f1_var = face_var_id[f1]; + int e_var = halfedge_var_id[hij]; + int row; + + bool use_boundary_energy = true; + bool is_boundary = (m.type[hij] == 2) || (m.type[m.opp[hij]] == 2); + if ((use_boundary_energy) && (is_boundary)) { + if (!is_face_fixed[f0]) { + row = f0_var; + T.emplace_back(f0_var, f0_var, 4.); + + if (is_period_jump_fixed[hij]) { + b(row) += -2. * period_value[hij] * period_jump[hij]; + } else { + T.emplace_back(row, e_var, 2. * period_value[hij]); + } + } + + if (!is_face_fixed[f1]) { + row = f1_var; + T.emplace_back(f1_var, f1_var, 4.); + + if (is_period_jump_fixed[hij]) { + b(row) += 2. * period_value[hij] * period_jump[hij]; + } else { + T.emplace_back(row, e_var, -2. * period_value[hij]); + } + } + + if (!is_period_jump_fixed[hij]) { + row = e_var; + T.emplace_back(row, e_var, 2. * period_value[hij] * period_value[hij]); + + if (!is_face_fixed[f0]) { + T.emplace_back(row, f0_var, 4. * period_value[hij]); + } + + if (!is_face_fixed[f1]) { + T.emplace_back(row, f1_var, -4. * period_value[hij]); + } + } + continue; + } + + // partial with respect to f0 + if (!is_face_fixed[f0]) { + row = f0_var; + T.emplace_back(row, f0_var, 4.); + + if (is_face_fixed[f1]) { + b(row) += 4. * theta[f1]; + } else { + b(row) += 4. * theta[f1]; + T.emplace_back(row, f1_var, -4.); + } + + if (is_period_jump_fixed[hij]) { + b(row) += -4. * period_value[hij] * period_jump[hij]; + } else { + T.emplace_back(row, e_var, 4. * period_value[hij]); + } + + b(row) += -4. * kappa[hij]; + } + // partial with respect to f1 + if (!is_face_fixed[f1]) { + row = f1_var; + T.emplace_back(row, f1_var, 4); + + if (is_face_fixed[f0]) { + b(row) += 4. * theta[f0]; + } else { + b(row) += 4. * theta[f0]; + T.emplace_back(row, f0_var, -4.); + } + + if (is_period_jump_fixed[hij]) { + b(row) += 4. * period_value[hij] * period_jump[hij]; + } else { + T.emplace_back(row, e_var, -4. * period_value[hij]); + } + + b(row) += 4. * kappa[hij]; + } + // partial with respect to eij + if (!is_period_jump_fixed[hij]) { + row = e_var; + T.emplace_back(row, e_var, 4. * period_value[hij] * period_value[hij]); + + if (is_face_fixed[f0]) { + b(row) += -4. * period_value[hij] * theta[f0]; + } else { + b(row) += -4. * period_value[hij] * theta[f0]; + T.emplace_back(row, f0_var, 4. * period_value[hij]); + } + + if (is_face_fixed[f1]) { + b(row) += 4. * period_value[hij] * theta[f1]; + } else { + b(row) += 4. * period_value[hij] * theta[f1]; + T.emplace_back(row, f1_var, -4. * period_value[hij]); + } + + b(row) += -4. * period_value[hij] * kappa[hij]; + } + } + + A.resize(num_var, num_var); + A.setFromTriplets(T.begin(), T.end()); + + // TODO: Soft constraints + if (!constrain_bd) return; + + // List boundary halfedges in the original mesh + std::vector bd_halfedges = {}; + for (int hij = 0; hij < num_halfedges; ++hij) { + if ((m.type[hij] == 1) && (m.type[m.opp[hij]] == 2)) { + bd_halfedges.push_back(hij); + } + } + + // Build constraint + int num_vertices = m.n_vertices(); + int num_bd_vertices = bd_halfedges.size(); + int num_int_vertices = num_vertices - num_bd_vertices; + int euler_char = m.n_vertices() - m.n_edges() + m.n_faces(); + Scalar target_curvature = 2. * M_PI * euler_char; + VectorX cone_angles = compute_cone_angles(m, he2angle); + VectorX target_indices(num_bd_vertices); + bool use_corners = true; + Scalar boundary_curvature = 0.; + for (int i = 0; i < num_bd_vertices; ++i) { + int hij = bd_halfedges[i]; + int vi = m.v_rep[m.to[m.opp[hij]]]; + + // Initialize constant to 2 pi + target_indices(i) = 2. * M_PI; + if (use_corners) + { + target_indices(i) = M_PI * max(round(cone_angles[vi]/ M_PI), 1.); + spdlog::trace("Setting cone constraint {} to {}", vi, target_indices(i)); + } + boundary_curvature += ((2. * M_PI) - target_indices(i)); + } + //C.setZero(num_bd_vertices, num_var + 1); + + for (int i = 0; i < num_bd_vertices; ++i) { + int h_var = bd_halfedges[i]; + h_var = m.opp[m.n[m.n[h_var]]]; + + // TODO Generalize to case where bd not fixed + while (m.type[h_var] != 2) { + if (!is_period_jump_fixed[h_var]) + { + int row; + int sign; + if (h_var < m.opp[h_var]) + { + row = halfedge_var_id[m.opp[h_var]]; + sign = -100000.; + } + else { + row = halfedge_var_id[h_var]; + sign = 100000.; + } + int hij = bd_halfedges[i]; + + if (!is_period_jump_fixed[hij]) { + if (hij < m.opp[hij]) + { + int e_var = halfedge_var_id[m.opp[hij]]; + T.emplace_back(row, e_var, -sign * period_value[hij]); + } else { + int e_var = halfedge_var_id[hij]; + T.emplace_back(row, e_var, sign * period_value[hij]); + } + } else { + b(row) -= sign * period_value[hij] * period_jump[hij]; + } + b(row) += sign * kappa[hij]; + b(row) -= sign * 2. * he2angle[m.n[hij]]; + + hij = m.opp[m.n[m.n[hij]]]; + + // Circulate ccw until other boundary edge reached + while (m.type[hij] != 2) { + int hji = m.opp[hij]; + + // add period jump constraint term for given edge + if (!is_period_jump_fixed[hij]) { + if (hij < hji) + { + int e_var = halfedge_var_id[hji]; + T.emplace_back(row, e_var, -sign * 2. * period_value[hij]); + } else { + int e_var = halfedge_var_id[hij]; + T.emplace_back(row, e_var, sign * 2. * period_value[hij]); + } + } else { + b(row) -= sign * 2. * period_value[hij] * period_jump[hij]; + } + + // add original index term for given edge + b(row) += sign * 2. * kappa[hij]; + b(row) -= sign * 2. * he2angle[m.n[hij]]; + + // circulate halfedge + hij = m.opp[m.n[m.n[hij]]]; + } + + // Add constraints for last boundary edge + if (!is_period_jump_fixed[hij]) { + if (hij < m.opp[hij]) + { + int e_var = halfedge_var_id[m.opp[hij]]; + T.emplace_back(row, e_var, -sign * period_value[hij]); + } else { + int e_var = halfedge_var_id[hij]; + T.emplace_back(row, e_var, sign * period_value[hij]); + } + } else { + b(row) -= sign * period_value[hij] * period_jump[hij]; + } + b(row) += sign * kappa[hij]; + b(row) += sign * target_indices(i); + } + + h_var = m.opp[m.n[m.n[h_var]]]; + } + } + + spdlog::info("Adding constraints"); + + + bool correct_curvature = true; + if (correct_curvature) + { + Scalar curvature_defect = target_curvature - boundary_curvature; + spdlog::info("Correcting curvature defect {} for {} interior vertices", curvature_defect, num_int_vertices); + while (curvature_defect < (-M_PI * num_int_vertices)) + //while (curvature_defect < 0) + { + // TODO Be more clever + Scalar min_defect = 3. * M_PI; + int min_index = -1; + for (int i = 0; i < num_bd_vertices; ++i) { + if (min_defect > C(i, num_var)) + { + min_defect = C(i, num_var); + min_index = i; + } + } + + + C(min_index, num_var) += M_PI; + curvature_defect += M_PI; + } + while (curvature_defect > (M_PI * num_int_vertices)) + //while (curvature_defect > 0) + { + // TODO Be more clever + Scalar max_defect = -1.; + int max_index = -1; + for (int i = 0; i < num_bd_vertices; ++i) { + if (max_defect < C(i, num_var)) + { + max_defect = C(i, num_var); + max_index = i; + } + } + + C(max_index, num_var) -= M_PI; + curvature_defect -= M_PI; + + } + spdlog::info("Curvature defect {}", curvature_defect); + } + + for (int i = 0; i < num_bd_vertices; ++i) { + int hij = bd_halfedges[i]; + + // Add constraint term for first face + // TODO Check if theta necessary + //int f0 = m.f[hij]; + //int f0_var = face_var_id[f0]; + //if (!is_face_fixed[f0]) { + // C(i, f0_var) = 1; + //} else { + // C(i, num_var + 1) -= theta[f0]; + //} + if (!is_period_jump_fixed[hij]) { + if (hij < m.opp[hij]) + { + int e_var = halfedge_var_id[m.opp[hij]]; + C(i, e_var) -= period_value[hij]; + } else { + int e_var = halfedge_var_id[hij]; + C(i, e_var) += period_value[hij]; + } + } else { + C(i, num_var) -= period_value[hij] * period_jump[hij]; + } + C(i, num_var) += kappa[hij]; + C(i, num_var) -= 2. * he2angle[m.n[hij]]; + + hij = m.opp[m.n[m.n[hij]]]; + + // Circulate ccw until other boundary edge reached + while (m.type[hij] != 2) { + int hji = m.opp[hij]; + + // add period jump constraint term for given edge + if (!is_period_jump_fixed[hij]) { + if (hij < hji) + { + int e_var = halfedge_var_id[hji]; + C(i, e_var) -= 2. * period_value[hij]; + } else { + int e_var = halfedge_var_id[hij]; + C(i, e_var) += 2. * period_value[hij]; + } + } else { + C(i, num_var) -= 2. * period_value[hij] * period_jump[hij]; + } + + // add original index term for given edge + C(i, num_var) += 2. * kappa[hij]; + C(i, num_var) -= 2. * he2angle[m.n[hij]]; + + // circulate halfedge + hij = m.opp[m.n[m.n[hij]]]; + } + + // Add constraints for last boundary edge + if (!is_period_jump_fixed[hij]) { + if (hij < m.opp[hij]) + { + int e_var = halfedge_var_id[m.opp[hij]]; + C(i, e_var) -= period_value[hij]; + } else { + int e_var = halfedge_var_id[hij]; + C(i, e_var) += period_value[hij]; + } + } else { + C(i, num_var) -= period_value[hij] * period_jump[hij]; + } + C(i, num_var) += kappa[hij]; + + spdlog::trace("Constraint {} is {}", i, C(i, num_var)); + + } + + // Scale C by pi/2 so it's integer multipled + C /= (M_PI / 2.); + + // TODO: Soft constraints +} + +void IntrinsicNRosyField::solve(const Mesh& m) +{ + int n = A.rows(); + int c = C.rows(); + + gmm::col_matrix> gmm_A(n, n); + std::vector gmm_b(n); + gmm::row_matrix> gmm_C(c, n+1); + std::vector var_edges; + std::vector x(n); + + // Copy A + for (int k = 0; k < A.outerSize(); ++k) { + for (MatrixX::InnerIterator it(A, k); it; ++it) { + gmm_A(it.row(), it.col()) += (double)(it.value()); + } + } + + // Copy b + for (int i = 0; i < n; ++i) { + gmm_b[i] = (double)(b[i]); + } + + // Set variables to round + var_edges.clear(); + for (int var_id : halfedge_var_id) { + if (var_id != -1) { + var_edges.push_back(var_id); + } + } + + // Empty constraints + for (int i = 0; i < C.rows(); ++i) { + for (int j = 0; j < C.cols(); ++j) { + gmm_C(i, j) += (double)(C(i, j)); + } + } + + // Solve system + COMISO::ConstrainedSolver cs; + cs.solve(gmm_C, gmm_A, x, gmm_b, var_edges, 0.0, false, true); + + // Copy the face angles + int num_faces = m.n_faces(); + for (int fi = 0; fi < num_faces; ++fi) { + if (face_var_id[fi] != -1) { + theta[fi] += x[face_var_id[fi]]; + theta[m.f[m.R[m.h[fi]]]] -= x[face_var_id[fi]]; + } + } + + // Copy the period jumps (and add sign) + int num_halfedges = m.n_halfedges(); + for (int hij = 0; hij < num_halfedges; ++hij) { + if (halfedge_var_id[hij] != -1) { + //if ((m.type[hij] == 2) || (m.type[m.opp[hij]] == 2)) continue; + int hji = m.opp[hij]; + period_jump[hij] = (int)std::round(x[halfedge_var_id[hij]]); + period_jump[hji] = -period_jump[hij]; + + if (m.type[hij] > 0) + { + period_jump[m.R[hij]] = -period_jump[hij]; + period_jump[m.opp[m.R[hij]]] = period_jump[hij]; + } + } + } +} + +std::vector generate_cones_from_rotation_form_FIXME( + const Mesh& m, + const VectorX& rotation_form) +{ + // Compute the corner angles + VectorX he2angle, he2cot; + Optimization::corner_angles(m, he2angle, he2cot); + + // Compute cones from the rotation form as holonomy - rotation around each vertex + // Per-halfedge iteration is used for faster computation + int num_vertices = m.n_ind_vertices(); + std::vector Th_hat(num_vertices, 0.); + for (int h = 0; h < m.n_halfedges(); h++) { + // Add angle to vertex opposite the halfedge + Th_hat[m.v_rep[m.to[m.n[h]]]] += he2angle[h]; + + // Add rotation to the vertex at the tip of the halfedge + // NOTE: By signing convention, this is the negative of the rotation ccw around + // the vertex + Th_hat[m.v_rep[m.to[h]]] += rotation_form[h]; + } + + return Th_hat; +} + +VectorX IntrinsicNRosyField::compute_rotation_form(const Mesh& m) +{ + int num_halfedges = m.n_halfedges(); + VectorX rotation_form(num_halfedges); + for (int hij = 0; hij < num_halfedges; ++hij) { + if (hij < m.opp[hij]) continue; + int hji = m.opp[hij]; + int f0 = m.f[hij]; + int f1 = m.f[hji]; + rotation_form[hij] = + theta[f0] - theta[f1] + kappa[hij] + period_value[hij] * period_jump[hij]; + rotation_form[hji] = -rotation_form[hij]; + } + + std::vector Th_hat = generate_cones_from_rotation_form_FIXME(m, rotation_form); + + int double_genus = 2 - (m.n_vertices() - m.n_edges() + m.n_faces()); + Scalar targetsum = M_PI * (2 * m.n_vertices() - 2 * (2 - double_genus)); + Scalar th_hat_sum = 0.0; + for(auto t: Th_hat) + { + th_hat_sum += t; + } + spdlog::info("Gauss-Bonnet error before cone removal: {}", th_hat_sum - targetsum); + + int num_vertices = Th_hat.size(); + std::queue cones = {}; + for (int vi = 0; vi < num_vertices; ++vi) + { + if (Th_hat[vi] < min_angle - 1e-6) + { + cones.push(vi); + } + } + while (!cones.empty()) + { + int vi = cones.front(); + cones.pop(); + if (Th_hat[vi] > min_angle + 1e-6) continue; + spdlog::info("Fixing {} cone at {} in rotation form", Th_hat[vi], vi); + + // find largest cone angle near the cone vertex + int h_opt = -1; + Scalar max_angle = -1.; + int h_start = m.out[vi]; + int hij = h_start; + int vj; + do { + vj = m.v_rep[m.to[hij]]; + if (Th_hat[vj] > max_angle) + { + h_opt = hij; + max_angle = Th_hat[vj]; + spdlog::info("Max angle {} at {}", max_angle, vj); + } + + hij = m.opp[m.n[m.n[hij]]]; + } while (hij != h_start); + + // push curvature to adjacent cone if candidate found + if (h_opt != -1) + { + vj = m.v_rep[m.to[h_opt]]; + spdlog::info("Decreasing cone {} at {}", Th_hat[vj], vj); + + // modify the rotation form and resulting cone angles + rotation_form[h_opt] -= (M_PI / 2.); + rotation_form[m.opp[h_opt]] += M_PI / 2.; + if (m.type[h_opt] != 0) { + rotation_form[m.R[h_opt]] += M_PI / 2.; + rotation_form[m.opp[m.R[h_opt]]] -= M_PI / 2.; + } + Th_hat[vi] += M_PI / 2.; + Th_hat[vj] -= M_PI / 2.; + + // check if candidate vertex is now a cone + if (Th_hat[vj] < min_angle - 1e-6) + { + cones.push(vj); + } + } + + // check if vertex vi is still a cone + if (Th_hat[vi] < min_angle - 1e-6) + { + cones.push(vi); + } + } + + th_hat_sum = 0.0; + for(auto t: Th_hat) + { + th_hat_sum += t; + } + spdlog::info("Gauss-Bonnet error after cone removal: {}", th_hat_sum - targetsum); + + assert(is_valid_one_form(m, rotation_form)); + return rotation_form; +} + +VectorX IntrinsicNRosyField::run(const Mesh& m) +{ + // Initialize mixed integer system + if (m.type[0] == 0) { + initialize_local_frames(m); + initialize_period_jump(m); + initialize_mixed_integer_system(m); + } else { + initialize_double_local_frames(m); + initialize_double_period_jump(m); + initialize_double_mixed_integer_system(m); + } + + // Solve + solve(m); + return compute_rotation_form(m); +} + +VectorX IntrinsicNRosyField::run_with_viewer( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V) +{ + // Initialize mixed integer system + if (m.type[0] == 0) { + initialize_local_frames(m); + initialize_period_jump(m); + initialize_mixed_integer_system(m); + } else { + initialize_double_local_frames(m); + initialize_double_period_jump(m); + initialize_double_mixed_integer_system(m); + } + + // Solve + solve(m); + + // Initialize viewer + auto [V_double, F_mesh, F_halfedge] = generate_doubled_mesh(V, m, vtx_reindex); + view_dual_graph(V, m, vtx_reindex, is_period_jump_fixed); + VectorX kappa_mesh = generate_FV_halfedge_data(F_halfedge, kappa); + VectorX period_jump_scaled(period_jump.size()); + for (int i = 0; i < period_jump.size(); ++i) + { + period_jump_scaled[i] = period_value[i] * period_jump[i]; + } + VectorX period_jump_mesh = generate_FV_halfedge_data(F_halfedge, period_jump_scaled); + VectorX period_value_mesh = generate_FV_halfedge_data(F_halfedge, period_value); +#ifdef ENABLE_VISUALIZATION +polyscope::init(); +std::string mesh_handle = "intrinsic_field_mesh"; +polyscope::registerSurfaceMesh(mesh_handle, V_double, F_mesh); +polyscope::getSurfaceMesh(mesh_handle) + ->setBackFacePolicy(polyscope::BackFacePolicy::Cull); +polyscope::getSurfaceMesh(mesh_handle) + ->addHalfedgeScalarQuantity( + "kappa", + convert_scalar_to_double_vector(kappa)) + ->setColorMap("coolwarm") + ->setEnabled(false); +polyscope::getSurfaceMesh(mesh_handle) + ->addHalfedgeScalarQuantity( + "period jump", + convert_scalar_to_double_vector(period_jump_mesh)) + ->setColorMap("coolwarm") + ->setEnabled(false); +polyscope::getSurfaceMesh(mesh_handle) + ->addHalfedgeScalarQuantity( + "period value", + convert_scalar_to_double_vector(period_value_mesh)) + ->setColorMap("coolwarm") + ->setEnabled(false); +polyscope::getSurfaceMesh(mesh_handle) + ->addFaceScalarQuantity( + "theta", + convert_scalar_to_double_vector(theta)) + ->setColorMap("coolwarm") + ->setEnabled(true); +polyscope::show(); +#endif + + return compute_rotation_form(m); +} + +} // namespace Holonomy +} // namespace Penner diff --git a/src/holonomy/core/quality.cpp b/src/holonomy/core/quality.cpp new file mode 100644 index 0000000..8c4a79c --- /dev/null +++ b/src/holonomy/core/quality.cpp @@ -0,0 +1,63 @@ +#include "holonomy/core/quality.h" + +namespace Penner { +namespace Holonomy { + +Scalar compute_triangle_quality(Scalar lij, Scalar ljk, Scalar lki) +{ + Scalar numer = 2 * lij * ljk * lki; + Scalar denom = ((-lij + ljk + lki) * (lij - ljk + lki) * (lij + ljk - lki)); + return (!float_equal(denom, 0)) ? numer / denom : 1e10; +} + +// Helper function to compute face quality for a triangle mesh face +Scalar compute_face_quality( + const DifferentiableConeMetric& cone_metric, + const VectorX& metric_coords, + int f) +{ + int hij = cone_metric.h[f]; + int hjk = cone_metric.n[hij]; + int hki = cone_metric.n[hjk]; + + // Get edge log lengths and average + Scalar llij = metric_coords[cone_metric.he2e[hij]]; + Scalar lljk = metric_coords[cone_metric.he2e[hjk]]; + Scalar llki = metric_coords[cone_metric.he2e[hki]]; + Scalar llijk = (llij + lljk + llki) / 3.0; + + // Compute lengths scaled by average (triangle quality is scale invariant) + Scalar lij = exp((llij - llijk) / 2.0); + Scalar ljk = exp((lljk - llijk) / 2.0); + Scalar lki = exp((llki - llijk) / 2.0); + + return compute_triangle_quality(lij, ljk, lki); +} + +VectorX compute_mesh_quality(const DifferentiableConeMetric& cone_metric) +{ + // Get metric coordinates + VectorX metric_coords = cone_metric.get_metric_coordinates(); + + // Compute per face quality + int num_faces = cone_metric.n_faces(); + VectorX mesh_quality(num_faces); + for (int f = 0; f < num_faces; ++f) { + mesh_quality[f] = compute_face_quality(cone_metric, metric_coords, f); + } + + return mesh_quality; +} + +Scalar compute_min_angle(const DifferentiableConeMetric& cone_metric) +{ + // Get angles + VectorX angles, cotangents; + cone_metric.get_corner_angles(angles, cotangents); + + // Compute per face quality + return angles.minCoeff(); +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/core/viewer.cpp b/src/holonomy/core/viewer.cpp new file mode 100644 index 0000000..7bc6c00 --- /dev/null +++ b/src/holonomy/core/viewer.cpp @@ -0,0 +1,890 @@ + +#include "holonomy/core/viewer.h" + +#include "util/vf_mesh.h" + +#include +#include +#include +#include +#include +#include + +#include + +#include "util/vector.h" +#include "util/vf_mesh.h" +#include "holonomy/holonomy/constraint.h" + +#ifdef ENABLE_VISUALIZATION +#include "polyscope/curve_network.h" +#include "polyscope/point_cloud.h" +#include "polyscope/surface_mesh.h" +#endif + +namespace Penner { +namespace Holonomy { + +#ifdef ENABLE_VISUALIZATION +glm::vec3 BEIGE(0.867, 0.765, 0.647); +glm::vec3 BLACK_BROWN(0.125, 0.118, 0.125); +glm::vec3 TAN(0.878, 0.663, 0.427); +glm::vec3 MUSTARD(0.890, 0.706, 0.282); +glm::vec3 FOREST_GREEN(0.227, 0.420, 0.208); +glm::vec3 TEAL(0., 0.375, 0.5); +glm::vec3 DARK_TEAL(0., 0.5*0.375, 0.5*0.5); +#endif + +std::tuple generate_mesh_faces( + const Mesh& m, + const std::vector& vtx_reindex) +{ + int num_faces = m.n_faces(); + Eigen::MatrixXi F(num_faces, 3); + Eigen::MatrixXi F_halfedge(num_faces, 3); + + for (int fijk = 0; fijk < num_faces; ++fijk) { + // Get halfedges of face + int hij = m.h[fijk]; + int hjk = m.n[hij]; + int hki = m.n[hjk]; + + // Get vertices of face + int vj = vtx_reindex[m.v_rep[m.to[hij]]]; + int vk = vtx_reindex[m.v_rep[m.to[hjk]]]; + int vi = vtx_reindex[m.v_rep[m.to[hki]]]; + + // Write face with halfedge and opposite vertex data + F_halfedge(fijk, 0) = hij; + F_halfedge(fijk, 1) = hjk; + F_halfedge(fijk, 2) = hki; + F(fijk, 0) = vi; + F(fijk, 1) = vj; + F(fijk, 2) = vk; + } + + return std::make_tuple(F, F_halfedge); +} + +std::tuple generate_doubled_mesh( + const Eigen::MatrixXd& V, + const Mesh& m, + const std::vector& vtx_reindex) +{ + int num_vertices = m.n_vertices(); + int num_faces = m.n_faces(); + + // Copy and double vertices + Eigen::MatrixXd V_double(num_vertices, 3); + for (int vi = 0; vi < num_vertices; ++vi) { + V_double.row(vi) = V.row(vtx_reindex[m.v_rep[vi]]); + } + + // Generate doubled faces and halfedge map + Eigen::MatrixXi F(num_faces, 3); + Eigen::MatrixXi F_halfedge(num_faces, 3); + for (int fijk = 0; fijk < num_faces; ++fijk) { + // Get halfedges of face + int hij = m.h[fijk]; + int hjk = m.n[hij]; + int hki = m.n[hjk]; + + // Get vertices of face + int vj = m.to[hij]; + int vk = m.to[hjk]; + int vi = m.to[hki]; + + + // Write face with halfedge and opposite vertex data + F_halfedge(fijk, 0) = hij; + F_halfedge(fijk, 1) = hjk; + F_halfedge(fijk, 2) = hki; + F(fijk, 0) = vi; + F(fijk, 1) = vj; + F(fijk, 2) = vk; + } + + // Inflate vertices so they can be distinguished + bool do_inflate_mesh = false; + if (do_inflate_mesh) { + V_double = inflate_mesh(V_double, F, 0.001); + } + + return std::make_tuple(V_double, F, F_halfedge); +} + +VectorX generate_FV_halfedge_data(const Eigen::MatrixXi& F_halfedge, const VectorX& halfedge_data) +{ + int num_faces = F_halfedge.rows(); + int num_halfedges = halfedge_data.size(); + VectorX FV_halfedge_data(num_halfedges); + for (int f = 0; f < num_faces; ++f) { + for (int i : {0, 1, 2}) { + int h = F_halfedge(f, i); + FV_halfedge_data(3 * f + i) = halfedge_data(h); + } + } + + return FV_halfedge_data; +} + +Eigen::Vector3d generate_dual_vertex( + const Eigen::MatrixXd& V, + const Mesh& m, + const std::vector& vtx_reindex, + int f) +{ + // generate face vertices + int hij = m.h[f]; + int hjk = m.n[hij]; + int hki = m.n[hjk]; + int vi = m.v_rep[m.to[hki]]; + int vj = m.v_rep[m.to[hij]]; + int vk = m.v_rep[m.to[hjk]]; + + // get face corner positions + Eigen::Vector3d Vi = V.row(vtx_reindex[vi]); + Eigen::Vector3d Vj = V.row(vtx_reindex[vj]); + Eigen::Vector3d Vk = V.row(vtx_reindex[vk]); + + // compute midpoint + Eigen::Vector3d midpoint = (Vi + Vj + Vk) / 3.; + + return midpoint; +} + +Eigen::Vector3d generate_dual_edge_midpoint( + const Eigen::MatrixXd& V, + const Mesh& m, + const std::vector& vtx_reindex, + int hij) +{ + // generate edge vertices + int hji = m.opp[hij]; + int vi = m.v_rep[m.to[hji]]; + int vj = m.v_rep[m.to[hij]]; + + // get edge endpoint positions + Eigen::Vector3d Vi = V.row(vtx_reindex[vi]); + Eigen::Vector3d Vj = V.row(vtx_reindex[vj]); + + // compute midpoint + Eigen::Vector3d midpoint = (Vi + Vj) / 2.; + + return midpoint; +} + +void view_dual_graph( + const Eigen::MatrixXd& V, + const Mesh& m, + const std::vector& vtx_reindex, + const std::vector is_edge) +{ + int num_faces = m.n_faces(); + int num_halfedges = m.n_halfedges(); + + // Get dual vertices (i.e., faces) in the dual graph + std::vector is_dual_vertex(num_faces, false); + for (int hij = 0; hij < num_halfedges; ++hij) { + if (!is_edge[hij]) continue; // skip edges not in graph + is_dual_vertex[m.f[hij]] = true; + } + std::vector dual_vertices, dual_halfedges; + convert_boolean_array_to_index_vector(is_dual_vertex, dual_vertices); + convert_boolean_array_to_index_vector(is_edge, dual_halfedges); + + // Add all dual face vertices at endpoints of edges + int num_dual_vertices = dual_vertices.size(); + int num_dual_halfedges = dual_halfedges.size(); + std::vector dual_vertex_map(num_faces, -1); + Eigen::MatrixXd dual_vertex_positions(num_dual_vertices, 3); + Eigen::MatrixXd dual_node_positions(num_dual_vertices + num_dual_halfedges, 3); + for (int i = 0; i < num_dual_vertices; ++i) { + int f = dual_vertices[i]; + dual_vertex_map[f] = i; + dual_vertex_positions.row(i) = generate_dual_vertex(V, m, vtx_reindex, f); + dual_node_positions.row(i) = dual_vertex_positions.row(i); + } + + // Generate dual graph network + Eigen::MatrixXi edges(num_dual_halfedges, 2); + for (int i = 0; i < num_dual_halfedges; ++i) { + int hij = dual_halfedges[i]; + int f = m.f[hij]; + edges(i, 0) = dual_vertex_map[f]; + edges(i, 1) = num_dual_vertices + i; + dual_node_positions.row(num_dual_vertices + i) = + generate_dual_edge_midpoint(V, m, vtx_reindex, hij); + } + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + polyscope::registerPointCloud("dual vertices", dual_vertex_positions)->setPointRadius(0.00025); + polyscope::registerCurveNetwork("dual tree", dual_node_positions, edges)->setRadius(0.00015); +#endif +} + +void view_primal_graph( + const Eigen::MatrixXd& V, + const Mesh& m, + const std::vector& vtx_reindex, + const std::vector is_edge) +{ + int num_vertices = m.n_ind_vertices(); + int num_halfedges = m.n_halfedges(); + + // Get vertices in the graph + std::vector is_vertex(num_vertices, false); + for (int hij = 0; hij < num_halfedges; ++hij) { + if (!is_edge[hij]) continue; // skip edges not in graph + is_vertex[vtx_reindex[m.v_rep[m.to[hij]]]] = true; + } + std::vector graph_vertices, graph_halfedges; + convert_boolean_array_to_index_vector(is_vertex, graph_vertices); + convert_boolean_array_to_index_vector(is_edge, graph_halfedges); + + // Add all vertices at endpoints of edges + int num_graph_vertices = graph_vertices.size(); + int num_graph_halfedges = graph_halfedges.size(); + std::vector graph_vertex_map(num_vertices, -1); + Eigen::MatrixXd graph_vertex_positions(num_graph_vertices, 3); + for (int i = 0; i < num_graph_vertices; ++i) { + int v = graph_vertices[i]; + graph_vertex_map[v] = i; + graph_vertex_positions.row(i) = V.row(v); + } + + // Generate graph network + Eigen::MatrixXi edges(num_graph_halfedges, 2); + for (int i = 0; i < num_graph_halfedges; ++i) { + int hij = graph_halfedges[i]; + int hji = m.opp[hij]; + int vi = vtx_reindex[m.v_rep[m.to[hji]]]; + int vj = vtx_reindex[m.v_rep[m.to[hij]]]; + edges(i, 0) = graph_vertex_map[vi]; + edges(i, 1) = graph_vertex_map[vj]; + } + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + polyscope::registerPointCloud("graph vertices", graph_vertex_positions) + ->setPointRadius(0.00025); + polyscope::registerCurveNetwork("graph tree", graph_vertex_positions, edges) + ->setRadius(0.00015); +#endif +} + +std::tuple generate_cone_vertices( + const Eigen::MatrixXd& V, + const std::vector& Th_hat) +{ + int num_vertices = Th_hat.size(); + std::vector cone_indices; + cone_indices.reserve(num_vertices); + for (int vi = 0; vi < num_vertices; ++vi) { + if (float_equal(Th_hat[vi], 2 * M_PI)) continue; + cone_indices.push_back(vi); + } + + int num_cones = cone_indices.size(); + Eigen::MatrixXd cone_positions(num_cones, 3); + Eigen::VectorXd cone_values(num_cones); + for (int i = 0; i < num_cones; ++i) { + int vi = cone_indices[i]; + cone_positions.row(i) = V.row(vi); + cone_values[i] = (double)(Th_hat[vi]) - (2 * M_PI); + } + + return std::make_tuple(cone_positions, cone_values); +} + +std::tuple generate_cone_vertices( + const Eigen::MatrixXd& V, + const std::vector& vtx_reindex, + const Mesh& m) +{ + bool is_closed = (m.type[0] == 0); + int num_vertices = m.n_ind_vertices(); + std::vector cone_indices; + cone_indices.reserve(num_vertices); + for (int vi = 0; vi < num_vertices; ++vi) { + if ((is_closed) && (float_equal(m.Th_hat[vi], 2 * M_PI))) continue; + if ((!is_closed) && (float_equal(m.Th_hat[vi], 4 * M_PI))) continue; + cone_indices.push_back(vi); + } + + int num_cones = cone_indices.size(); + Eigen::MatrixXd cone_positions(num_cones, 3); + Eigen::VectorXd cone_values(num_cones); + for (int i = 0; i < num_cones; ++i) { + int vi = cone_indices[i]; + cone_positions.row(i) = V.row(vtx_reindex[vi]); + cone_values[i] = (double)(m.Th_hat[vi]) - (2 * M_PI); + } + + return std::make_tuple(cone_positions, cone_values); +} + +std::tuple generate_closed_cone_vertices( + const Eigen::MatrixXd& V, + const std::vector& Th_hat) +{ + // get cone indices + int num_vertices = V.rows(); + std::vector cone_indices; + cone_indices.reserve(num_vertices); + for (int vi = 0; vi < num_vertices; ++vi) { + if (float_equal(Th_hat[vi], 2 * M_PI)) continue; + cone_indices.push_back(vi); + } + + // build cone positions and values + int num_cones = cone_indices.size(); + Eigen::MatrixXd cone_positions(num_cones, 3); + Eigen::VectorXd cone_values(num_cones); + for (int i = 0; i < num_cones; ++i) { + int vi = cone_indices[i]; + cone_positions.row(i) = V.row(vi); + cone_values[i] = (double)(Th_hat[vi]) - (2 * M_PI); + } + + return std::make_tuple(cone_positions, cone_values); +} + +Eigen::MatrixXd generate_subset_vertices( + const Eigen::MatrixXd& V, + const std::vector& vertex_indices) +{ + int num_vertices = vertex_indices.size(); + Eigen::MatrixXd vertex_positions(num_vertices, 3); + for (int i = 0; i < num_vertices; ++i) { + int vi = vertex_indices[i]; + vertex_positions.row(i) = V.row(vi); + } + + return vertex_positions; +} + +Eigen::MatrixXd rotate_frame_field( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& frame_field) +{ + Eigen::MatrixXd B1, B2, B3; + igl::local_basis(V, F, B1, B2, B3); + return igl::rotate_vectors(frame_field, Eigen::VectorXd::Constant(1, M_PI / 2), B1, B2); +} + +void view_frame_field( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& frame_field, + const std::vector& Th_hat, + std::string mesh_handle) +{ + spdlog::info("Viewing mesh {}", mesh_handle); + int num_vertices = V.rows(); + int num_faces = F.rows(); + int num_field = frame_field.rows(); + int num_cones = Th_hat.size(); + + // Check size consistency + if (num_field != num_faces) return; + if (num_cones != num_vertices) return; + + // Generate cones + auto [cone_positions, cone_values] = generate_cone_vertices(V, Th_hat); + + // Generate rotated cross field vectors from reference + std::array cross_field; + cross_field[0] = frame_field; + for (int i : {1, 2, 3}) { + cross_field[i] = rotate_frame_field(V, F, cross_field[i - 1]); + } + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + if (mesh_handle == "") { + mesh_handle = "frame_field_mesh"; + polyscope::registerSurfaceMesh(mesh_handle, V, F); + polyscope::getSurfaceMesh(mesh_handle)->setSurfaceColor(MUSTARD); + } + for (int i : {0, 1, 2, 3}) { + polyscope::getSurfaceMesh(mesh_handle) + ->addFaceVectorQuantity("field_" + std::to_string(i), cross_field[i]) + ->setVectorColor((i == 0) ? FOREST_GREEN : BLACK_BROWN) + ->setVectorRadius(0.0005) + ->setVectorLengthScale(0.005) + ->setEnabled(true); + } + polyscope::registerPointCloud("cross_field_cones", cone_positions); + polyscope::getPointCloud("cross_field_cones") + ->addScalarQuantity("index", cone_values) + ->setColorMap("coolwarm") + ->setMapRange({-M_PI, M_PI}) + ->setEnabled(true); + + + polyscope::show(); +#else + spdlog::info("Viewer disabled for mesh (|V|={}, |F|={})", num_vertices, num_faces); +#endif +} + +void view_rotation_form( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const VectorX& rotation_form, + const std::vector& Th_hat, + std::string mesh_handle, + bool show) +{ + int num_vertices = V.rows(); + if (show) { + spdlog::info("Viewing mesh {} with {} vertices", mesh_handle, num_vertices); + } + auto [V_double, F_mesh, F_halfedge] = generate_doubled_mesh(V, m, vtx_reindex); + VectorX rotation_form_mesh = generate_FV_halfedge_data(F_halfedge, rotation_form); + + // Generate cones + spdlog::info("{} vertices", Th_hat.size()); + auto [cone_positions, cone_values] = generate_cone_vertices(V, vtx_reindex, m); + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + if (mesh_handle == "") { + mesh_handle = "rotation_form_mesh"; + } + polyscope::registerSurfaceMesh(mesh_handle, V_double, F_mesh); + polyscope::getSurfaceMesh(mesh_handle) + ->addHalfedgeScalarQuantity( + "rotation_form", + convert_scalar_to_double_vector(rotation_form_mesh)) + ->setColorMap("coolwarm") + ->setEnabled(true); + polyscope::registerPointCloud(mesh_handle + "_cones", cone_positions); + polyscope::getPointCloud(mesh_handle + "_cones") + ->addScalarQuantity("index", cone_values) + ->setColorMap("coolwarm") + ->setMapRange({-M_PI, M_PI}) + ->setEnabled(true); + if (show) polyscope::show(); +#endif +} + +void view_mesh_quality( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + std::string mesh_handle, + bool show) +{ + int num_vertices = V.rows(); + if (show) { + spdlog::info("Viewing mesh {} with {} vertices", mesh_handle, num_vertices); + } + + Eigen::VectorXd double_area; + igl::doublearea(V, F, double_area); + + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + if (mesh_handle == "") { + mesh_handle = "quality_analysis_mesh"; + polyscope::registerSurfaceMesh(mesh_handle, V, F); + polyscope::getSurfaceMesh(mesh_handle)->setSurfaceColor(MUSTARD); + } + polyscope::getSurfaceMesh(mesh_handle)->addFaceScalarQuantity("double_area", double_area); + if (show) polyscope::show(); +#endif +} + +void view_mesh_topology( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + std::string mesh_handle, + bool show) +{ + // Get components and boundary + Eigen::MatrixXi bd; + Eigen::VectorXi components; + igl::boundary_facets(F, bd); + igl::facet_components(F, components); + + if (mesh_handle == "") { + mesh_handle = "topology_analysis_mesh"; + } + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + if (mesh_handle == "") { + polyscope::registerSurfaceMesh(mesh_handle, V, F); + polyscope::getSurfaceMesh(mesh_handle)->setSurfaceColor(MUSTARD); + } + polyscope::getSurfaceMesh(mesh_handle)->addFaceScalarQuantity("component", components); + if (show) polyscope::show(); +#else + if (show) { + int num_vertices = V.rows(); + int num_faces = F.rows(); + spdlog::info("Viewer disabled for mesh (|V|={}, |F|={})", num_vertices, num_faces); + } +#endif +} + +Scalar uv_length_squared(const Eigen::Vector2d& uv_0, const Eigen::Vector2d& uv_1) +{ + Eigen::Vector2d difference_vector = uv_1 - uv_0; + Scalar length_sq = difference_vector.dot(difference_vector); + return length_sq; +} + +Scalar uv_length(const Eigen::Vector2d& uv_0, const Eigen::Vector2d& uv_1) +{ + return sqrt(uv_length_squared(uv_0, uv_1)); +} + +Scalar uv_cos_angle(const Eigen::Vector2d& uv_0, const Eigen::Vector2d& uv_1) +{ + Scalar dot_01 = uv_0.dot(uv_1); + Scalar norm_0 = sqrt(uv_0.dot(uv_0)); + Scalar norm_1 = sqrt(uv_1.dot(uv_1)); + Scalar norm = norm_0 * norm_1; + if (norm == 0.) return 0.; + return (dot_01 / norm); +} + +std::tuple compute_seamless_error( + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& F_uv) +{ + // Get the edge topology for the original uncut mesh + Eigen::MatrixXi uE, EF, EI; + Eigen::VectorXi EMAP; + igl::edge_flaps(F, uE, EMAP, EF, EI); + + // Iterate over edges to check the length inconsistencies + VectorX uv_length_error(F.size()); + VectorX uv_angle_error(F.size()); + VectorX uv_angle(F.size()); + for (Eigen::Index e = 0; e < EF.rows(); ++e) { + // Get face corners corresponding to the current edge + int f0 = EF(e, 0); + int f1 = EF(e, 1); + + // Check first face (if not boundary) + if (f0 < 0) continue; + int i0 = EI(e, 0); // corner vertex face index + int v0n = F_uv(f0, (i0 + 1) % 3); // next vertex + int v0p = F_uv(f0, (i0 + 2) % 3); // previous vertex + + // Check second face (if not boundary) + if (f1 < 0) continue; + int i1 = EI(e, 1); // corner vertex face index + int v1n = F_uv(f1, (i1 + 1) % 3); // next vertex + int v1p = F_uv(f1, (i1 + 2) % 3); // next vertex + + // Compute the length of each halfedge corresponding to the corner in the cut mesh + Eigen::Vector2d uv_00 = uv.row(v0n); + Eigen::Vector2d uv_01 = uv.row(v0p); + Eigen::Vector2d uv_10 = uv.row(v1n); + Eigen::Vector2d uv_11 = uv.row(v1p); + Scalar l0 = uv_length(uv_00, uv_01); + Scalar l1 = uv_length(uv_10, uv_11); + Scalar cos_angle = uv_cos_angle(uv_01 - uv_00, uv_11 - uv_10); + Scalar length_error = abs(l0 - l1); + Scalar angle_error = min(abs(cos_angle), abs(abs(cos_angle) - 1)); + + // set length error for the given edge + i0 = (i0 + 1) % 3; + i1 = (i1 + 1) % 3; + uv_length_error(3 * f0 + i0) = length_error; + uv_length_error(3 * f1 + i1) = length_error; + uv_angle_error(3 * f0 + i0) = angle_error; + uv_angle_error(3 * f1 + i1) = angle_error; + uv_angle(3 * f0 + i0) = cos_angle; + uv_angle(3 * f1 + i1) = cos_angle; + } + + return std::make_tuple(uv_length_error, uv_angle_error, uv_angle); +} + +// TODO MAke separate layout method + +void view_quad_mesh( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + std::string mesh_handle, + bool show) +{ + if (mesh_handle == "") { + mesh_handle = "quad mesh"; + } + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + + // Add cut mesh with + polyscope::registerSurfaceMesh(mesh_handle, V, F); + + if (show) polyscope::show(); +#else + if (show) { + int num_vertices = V.rows(); + int num_faces = F.rows(); + spdlog::info("Viewer disabled for mesh (|V|={}, |F|={})", num_vertices, num_faces); + } +#endif +} + +void view_parameterization( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& FT, + std::string mesh_handle, + bool show) +{ + if (mesh_handle == "") { + mesh_handle = "cut_mesh"; + } + + // Cut mesh along seams + Eigen::MatrixXd V_cut; + cut_mesh_along_parametrization_seams(V, F, uv, FT, V_cut); + auto [uv_length_error, uv_angle_error, uv_angle] = compute_seamless_error(F, uv, FT); + spdlog::info("Max uv length error: {}", uv_length_error.maxCoeff()); + spdlog::info("Max uv angle error: {}", uv_angle_error.maxCoeff()); + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + + // Add cut mesh with + polyscope::registerSurfaceMesh(mesh_handle, V_cut, FT); + polyscope::getSurfaceMesh(mesh_handle) + ->addVertexParameterizationQuantity("uv", uv) + ->setStyle(polyscope::ParamVizStyle::GRID) + ->setGridColors(std::make_pair(DARK_TEAL, TEAL)) + ->setEnabled(true); + polyscope::getSurfaceMesh(mesh_handle) + ->addHalfedgeScalarQuantity( + "uv length error", + convert_scalar_to_double_vector(uv_length_error)); + polyscope::getSurfaceMesh(mesh_handle) + ->addHalfedgeScalarQuantity( + "uv angle error", + convert_scalar_to_double_vector(uv_angle_error)); + polyscope::getSurfaceMesh(mesh_handle) + ->addHalfedgeScalarQuantity( + "uv angle", + convert_scalar_to_double_vector(uv_angle)); + + if (show) polyscope::show(); +#else + if (show) { + int num_vertices = V.rows(); + int num_faces = F.rows(); + spdlog::info("Viewer disabled for mesh (|V|={}, |F|={})", num_vertices, num_faces); + } +#endif +} + +void view_triangulation( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const std::vector& fn_to_f, + std::string mesh_handle, + bool show) +{ + if (mesh_handle == "") { + mesh_handle = "triangulation_mesh"; + } + spdlog::info("Viewing triangulation for map with {} faces", fn_to_f.size()); + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + + // add mesh with permuted face map + polyscope::registerSurfaceMesh(mesh_handle, V, F); + polyscope::getSurfaceMesh(mesh_handle) + ->addFaceScalarQuantity("face_map", fn_to_f) + ->setColorMap("spectral") + ->setEnabled(true); + + if (show) polyscope::show(); +#else + if (show) { + int num_vertices = V.rows(); + int num_faces = F.rows(); + spdlog::info("Viewer disabled for mesh (|V|={}, |F|={})", num_vertices, num_faces); + } +#endif +} + +void view_layout( + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& FT, + std::string mesh_handle, + bool show) +{ + if (mesh_handle == "") { + mesh_handle = "layout"; + } + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + // Add layout + polyscope::registerSurfaceMesh2D(mesh_handle, uv, FT)->setEnabled(true); + polyscope::getSurfaceMesh(mesh_handle) + ->addVertexParameterizationQuantity("uv", uv) + ->setEnabled(true); + polyscope::getSurfaceMesh(mesh_handle)->setEdgeWidth(1.); + + if (show) polyscope::show(); +#else + if (show) { + int num_vertices = uv.rows(); + int num_faces = FT.rows(); + spdlog::info("Viewer disabled for mesh (|V|={}, |F|={})", num_vertices, num_faces); + } +#endif +} + +void view_constraint_error( + const MarkedPennerConeMetric& marked_metric, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + std::string mesh_handle, + bool show) +{ + if (mesh_handle == "") { + mesh_handle = "constraint error"; + } + + int num_vertices = V.rows(); + if (show) { + spdlog::info("Viewing {} mesh with {} vertices", mesh_handle, num_vertices); + } + auto [V_double, F_mesh, F_halfedge] = generate_doubled_mesh(V, marked_metric, vtx_reindex); + + // Make mesh into discrete metric + spdlog::debug("Making metric discrete"); + MarkedPennerConeMetric marked_metric_copy = marked_metric; + marked_metric_copy.make_discrete_metric(); + + // Generate corner angles + spdlog::debug("Computing corner angles"); + VectorX he2angle; + VectorX cotangents; + marked_metric_copy.get_corner_angles(he2angle, cotangents); + + // Generate cones and cone errors + spdlog::debug("Computing cones and errors"); + VectorX vertex_constraint = compute_vertex_constraint(marked_metric_copy, he2angle); + auto [cone_positions, cone_values] = generate_cone_vertices(V, vtx_reindex, marked_metric); + VectorX cone_error = vector_compose(vertex_constraint, marked_metric.v_rep); + +#ifdef ENABLE_VISUALIZATION + spdlog::debug("Initializing mesh"); + polyscope::init(); + polyscope::registerSurfaceMesh(mesh_handle, V_double, F_mesh); + polyscope::getSurfaceMesh(mesh_handle) + ->addVertexScalarQuantity( + "cone error", + convert_scalar_to_double_vector(cone_error)) + ->setColorMap("coolwarm") + ->setEnabled(true); + polyscope::registerPointCloud(mesh_handle + "_cones", cone_positions); + polyscope::getPointCloud(mesh_handle + "_cones") + ->addScalarQuantity("index", cone_values) + ->setColorMap("coolwarm") + ->setMapRange({-M_PI, M_PI}) + ->setEnabled(true); + if (show) polyscope::show(); +#endif +} + +void view_vertex_function( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const VectorX& vertex_function, + std::string mesh_handle, + bool show) +{ + if (mesh_handle == "") { + mesh_handle = "vertex function"; + } + + int num_vertices = V.rows(); + if (show) { + spdlog::info("Viewing {} mesh with {} vertices", mesh_handle, num_vertices); + } + auto [V_double, F_mesh, F_halfedge] = generate_doubled_mesh(V, m, vtx_reindex); + if (num_vertices != vertex_function.size()) return; + +#ifdef ENABLE_VISUALIZATION + spdlog::debug("Initializing mesh"); + polyscope::init(); + polyscope::registerSurfaceMesh(mesh_handle, V_double, F_mesh); + polyscope::getSurfaceMesh(mesh_handle) + ->addVertexScalarQuantity( + "function value", + convert_scalar_to_double_vector(vertex_function)) + ->setColorMap("coolwarm") + ->setEnabled(true); + if (show) polyscope::show(); +#endif +} + +void view_vertex_function( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const std::vector& vertex_function, + std::string mesh_handle, + bool show) +{ + VectorX vertex_function_eig; + convert_std_to_eigen_vector(vertex_function, vertex_function_eig); + view_vertex_function(m, vtx_reindex, V, vertex_function_eig, mesh_handle, show); +} + +void view_independent_vertex_function( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const VectorX& vertex_function, + std::string mesh_handle, + bool show) +{ + if (mesh_handle == "") { + mesh_handle = "independent vertex function mesh"; + } + + int num_vertices = V.rows(); + if (show) { + spdlog::info("Viewing {} mesh with {} vertices", mesh_handle, num_vertices); + } + auto [V_double, F_mesh, F_halfedge] = generate_doubled_mesh(V, m, vtx_reindex); + + VectorX double_vertex_constraint = vector_compose(vertex_function, m.v_rep); + +#ifdef ENABLE_VISUALIZATION + spdlog::debug("Initializing mesh"); + polyscope::init(); + polyscope::registerSurfaceMesh(mesh_handle, V_double, F_mesh); + polyscope::getSurfaceMesh(mesh_handle) + ->addVertexScalarQuantity( + "function value", + convert_scalar_to_double_vector(double_vertex_constraint)) + ->setColorMap("coolwarm") + ->setEnabled(true); + if (show) polyscope::show(); +#endif +} + +} // namespace Holonomy +} // namespace Penner diff --git a/src/holonomy/holonomy/cones.cpp b/src/holonomy/holonomy/cones.cpp new file mode 100644 index 0000000..d945be7 --- /dev/null +++ b/src/holonomy/holonomy/cones.cpp @@ -0,0 +1,437 @@ +#include "holonomy/holonomy/cones.h" + +#include "util/boundary.h" +#include "holonomy/core/forms.h" +#include "holonomy/holonomy/holonomy.h" + +#include "optimization/core/constraint.h" + +#include + +namespace Penner { +namespace Holonomy { + +// Check cones computed from rotation form match more direct vertex iteration computation +bool validate_cones_from_rotation_form( + const Mesh& m, + const VectorX& rotation_form, + const std::vector& Th_hat) +{ + // Compute the corner angles + VectorX he2angle, he2cot; + Optimization::corner_angles(m, he2angle, he2cot); + + // Get boundary vertices if symmetric mesh with boundary + int num_vertices = m.n_vertices(); + bool is_symmetric = (m.type[0] != 0); + std::vector is_boundary_vertex(num_vertices, false); + if (is_symmetric) { + std::vector boundary_vertices = find_boundary_vertices(m); + convert_index_vector_to_boolean_array( + boundary_vertices, + num_vertices, + is_boundary_vertex); + } + + // Compare cones with direct per-vertex computation + for (int vi = 0; vi < num_vertices; ++vi) { + DualLoopList dual_loop(build_counterclockwise_vertex_dual_segment_sequence(m, vi)); + Scalar rotation = compute_dual_loop_rotation(m, rotation_form, dual_loop); + Scalar holonomy = compute_dual_loop_holonomy(m, he2angle, dual_loop); + + // Special treatment for vertices in interior of doubled mesh + if ((is_symmetric) && (!is_boundary_vertex[vi])) { + if (!float_equal(Th_hat[m.v_rep[vi]] / 2., holonomy - rotation, 1e-3)) { + spdlog::warn( + "Inconsistent interior cones {} and {}", + Th_hat[m.v_rep[vi]] / 2., + holonomy - rotation); + return false; + } + } + // General case + else { + if (!float_equal(Th_hat[m.v_rep[vi]], holonomy - rotation, 1e-3)) { + spdlog::warn( + "Inconsistent cones {} and {}", + Th_hat[m.v_rep[vi]], + holonomy - rotation); + return false; + } + } + } + + return true; +} + +std::vector generate_cones_from_rotation_form( + const Mesh& m, + const VectorX& rotation_form) +{ + assert(is_valid_one_form(m, rotation_form)); + // Compute the corner angles + VectorX he2angle, he2cot; + Optimization::corner_angles(m, he2angle, he2cot); + + // Compute cones from the rotation form as holonomy - rotation around each vertex + // Per-halfedge iteration is used for faster computation + int num_vertices = m.n_ind_vertices(); + std::vector Th_hat(num_vertices, 0.); + for (int h = 0; h < m.n_halfedges(); h++) { + // Add angle to vertex opposite the halfedge + Th_hat[m.v_rep[m.to[m.n[h]]]] += he2angle[h]; + + // Add rotation to the vertex at the tip of the halfedge + // NOTE: By signing convention, this is the negative of the rotation ccw around + // the vertex + Th_hat[m.v_rep[m.to[h]]] += rotation_form[h]; + } + assert(validate_cones_from_rotation_form(m, rotation_form, Th_hat)); + + return Th_hat; +} + +std::vector generate_cones_from_rotation_form( + const Mesh& m, + const std::vector& vtx_reindex, + const VectorX& rotation_form, + bool has_boundary) +{ + std::vector Th_hat_mesh = generate_cones_from_rotation_form(m, rotation_form); + + // Compute cones from the rotation form + int num_vertices = m.n_ind_vertices(); + std::vector Th_hat(num_vertices); + for (int vi = 0; vi < num_vertices; ++vi) { + if (has_boundary) { + Th_hat[vtx_reindex[vi]] = Th_hat_mesh[vi] / 2.; + } else { + Th_hat[vtx_reindex[vi]] = Th_hat_mesh[vi]; + } + } + + return Th_hat; +} + +bool contains_small_cones(const std::vector& Th_hat, int min_cone_index) +{ + int num_vertices = Th_hat.size(); + for (int vi = 0; vi < num_vertices; ++vi) { + // Check for cones below threshold + if (Th_hat[vi] < (min_cone_index * M_PI / 2.) - 1e-3) { + spdlog::warn("{} cone found at {}", Th_hat[vi], vi); + return true; + } + } + + return false; +} + +bool contains_zero_cones(const std::vector& Th_hat) +{ + return contains_small_cones(Th_hat, 0.); +} + +// Count negative and positive cones +std::pair count_cones(const Mesh& m) +{ + const auto& Th_hat = m.Th_hat; + + // Get boundary vertices + int num_vertices = m.n_vertices(); + bool is_symmetric = (m.type[0] != 0); + std::vector is_boundary_vertex(num_vertices, false); + if (is_symmetric) { + std::vector boundary_vertices = find_boundary_vertices(m); + convert_index_vector_to_boolean_array( + boundary_vertices, + num_vertices, + is_boundary_vertex); + } + + // Check for cones + int num_ind_vertices = Th_hat.size(); + int num_neg_cones = 0; + int num_pos_cones = 0; + std::vector is_seen(num_ind_vertices, false); + for (int vi = 0; vi < num_vertices; ++vi) { + if (is_seen[m.v_rep[vi]]) continue; + + // Get flat curvature + Scalar flat_angle = 2. * M_PI; + if ((is_symmetric) && (!is_boundary_vertex[vi])) { + flat_angle = 4. * M_PI; + } + + // Count negative and positive curvature cones + if (Th_hat[m.v_rep[vi]] > flat_angle + 1e-3) { + spdlog::trace("{} cone found", Th_hat[vi]); + num_neg_cones++; + } + if (Th_hat[m.v_rep[vi]] < flat_angle - 1e-3) { + spdlog::trace("{} cone found", Th_hat[vi]); + num_pos_cones++; + } + + // Mark vertex as seen + is_seen[m.v_rep[vi]] = true; + } + + return std::make_pair(num_neg_cones, num_pos_cones); +} + +// Get the total curvature of the mesh from the cones +Scalar compute_total_curvature(const Mesh& m) +{ + const auto& Th_hat = m.Th_hat; + + // Get boundary vertices + int num_vertices = m.n_vertices(); + bool is_symmetric = (m.type[0] != 0); + std::vector is_boundary_vertex(num_vertices, false); + if (is_symmetric) { + std::vector boundary_vertices = find_boundary_vertices(m); + convert_index_vector_to_boolean_array( + boundary_vertices, + num_vertices, + is_boundary_vertex); + } + + // Incrementally compute total curvature + int num_ind_vertices = Th_hat.size(); + Scalar total_curvature = 0.0; + std::vector is_seen(num_ind_vertices, false); + for (int vi = 0; vi < num_vertices; ++vi) { + if (is_seen[m.v_rep[vi]]) continue; + + // Get flat curvature + Scalar flat_angle = 2. * M_PI; + if ((is_symmetric) && (!is_boundary_vertex[vi])) { + flat_angle = 4. * M_PI; + } + + // Total curvature is the deviation from 2 pi + total_curvature += (Th_hat[m.v_rep[vi]] - flat_angle); + + // Mark vertex as seen + is_seen[m.v_rep[vi]] = true; + } + + return total_curvature; +} + + +bool is_trivial_torus(const Mesh& m) +{ + auto [num_neg_cones, num_pos_cones] = count_cones(m); + return ((num_neg_cones == 0) && (num_pos_cones == 0)); +} + +bool is_torus_with_cone_pair(const Mesh& m) +{ + // Get the cone counts + auto [num_neg_cones, num_pos_cones] = count_cones(m); + + // Compute genus of the surface from the total curvature + Scalar total_curvature = compute_total_curvature(m); + int genus = (int)(round(1 + total_curvature / (4. * M_PI))); + spdlog::info("Total curvature is {}", total_curvature); + spdlog::info("genus is {}", genus); + + // Check for tori with a pair of cones + return ((genus == 1) && (num_neg_cones == 1) && (num_pos_cones == 1)); +} + +bool validate_cones(const Mesh& m) +{ + if (contains_zero_cones(m.Th_hat)) return false; + if (is_torus_with_cone_pair(m)) return false; + + return true; +} + +// Helper to fix small cones +void remove_minimum_cone(Mesh& m) +{ + bool is_symmetric = (m.type[0] != 0); + Scalar angle_delta = (is_symmetric) ? M_PI : (M_PI / 2.); + + // Add pi/2 to the minimum cone + auto min_cone = std::min_element(m.Th_hat.begin(), m.Th_hat.end()); + *min_cone += angle_delta; + + // Subtract pi/2 from the maximum cone + auto max_cone = std::max_element(m.Th_hat.begin(), m.Th_hat.end()); + *max_cone -= angle_delta; +} + +bool is_interior(const Mesh& m, int vi) +{ + int h_start = m.out[vi]; + int hij = h_start; + do { + int hji = m.opp[hij]; + if ((m.type[hij] == 1) && (m.type[hji] == 2)) return false; + if ((m.type[hij] == 2) && (m.type[hji] == 1)) return false; + + hij = m.n[m.opp[hij]]; + } while (hij != h_start); + + return true; +} + +int get_flat_vertex(const Mesh& m, bool only_interior) +{ + int num_halfedges = m.n_halfedges(); + std::mt19937 rng(0); + std::uniform_int_distribution<> dist(0, num_halfedges - 1); + + // Find a flat cone in the interior of the mesh + bool is_symmetric = (m.type[0] != 0); + Scalar flat_angle = (is_symmetric) ? 4. * M_PI : 2. * M_PI; + while (true) { + int h = dist(rng); + int vi = m.v_rep[m.to[h]]; + if ((is_interior(m, m.to[h])) && (float_equal(m.Th_hat[vi], flat_angle))) { + return vi; + } + if (only_interior) continue; + + if ((m.type[h] == 1) && (m.R[m.opp[h]] == h) && float_equal(m.Th_hat[vi], 2. * M_PI)) { + return vi; + } + } + + return -1; +} + +void add_random_cone_pair(Mesh& m, bool only_interior) +{ + bool is_symmetric = (m.type[0] != 0); + Scalar angle_delta = (is_symmetric) ? M_PI : (M_PI / 2.); + + // Add 5 cone + int vi = get_flat_vertex(m, only_interior); + spdlog::debug("Adding positive cone at {}", vi); + m.Th_hat[vi] += angle_delta; + + // Add 3 cone + int vj = get_flat_vertex(m, only_interior); + spdlog::debug("Adding negative cone at {}", vj); + m.Th_hat[vj] -= angle_delta; +} + +std::tuple get_constraint_outliers( + MarkedPennerConeMetric& marked_metric, + bool use_interior_vertices, + bool use_flat_vertices) +{ + bool is_symmetric = (marked_metric.type[0] != 0); + int num_vertices = marked_metric.n_vertices(); + int num_ind_vertices = marked_metric.n_ind_vertices(); + std::vector bd_vertices = find_boundary_vertices(marked_metric); + std::vector is_bd_vertex(num_ind_vertices, false); + for (int vi : bd_vertices) { + is_bd_vertex[marked_metric.v_rep[vi]] = true; + } + + // get constraint errors + VectorX constraint; + MatrixX J_constraint; + bool need_jacobian = false; + bool only_free_vertices = false; + marked_metric.constraint(constraint, J_constraint, need_jacobian, only_free_vertices); + + // get cone indices with minimum and maximum defect + int i = 0; + int j = 0; + Scalar flat_angle = (is_symmetric) ? 4. * M_PI : 2. * M_PI; + for (int k = 0; k < num_vertices; ++k) { + int vi = marked_metric.v_rep[i]; + int vj = marked_metric.v_rep[j]; + int vk = marked_metric.v_rep[k]; + + // only add (optionally) interior cone pairs at flat vertices + if ((use_interior_vertices) && (is_symmetric) && (is_bd_vertex[vk])) continue; + if ((use_flat_vertices) && (!float_equal(marked_metric.Th_hat[vk], flat_angle))) continue; + + if (constraint[vk] < constraint[vi]) i = k; + if (constraint[vk] > constraint[vj]) j = k; + } + + return std::make_tuple(i, j); +} + +std::tuple add_optimal_cone_pair(MarkedPennerConeMetric& marked_metric) +{ + auto [i, j] = get_constraint_outliers(marked_metric, true, true); + spdlog::debug("Adding positive cone at {}", i); + spdlog::debug("Adding negative cone at {}", j); + bool is_symmetric = (marked_metric.type[0] != 0); + Scalar angle_delta = (is_symmetric) ? M_PI : (M_PI / 2.); + marked_metric.Th_hat[marked_metric.v_rep[i]] += angle_delta; + marked_metric.Th_hat[marked_metric.v_rep[j]] -= angle_delta; + + return std::make_tuple(i, j); +} + +void fix_cones(Mesh& m, int min_cone_index) +{ + // Remove any zero cones + while (contains_small_cones(m.Th_hat, min_cone_index)) { + remove_minimum_cone(m); + } + + // Add another cone pair to torus with a cone pair + if (is_torus_with_cone_pair(m)) { + add_random_cone_pair(m); + } +} + + +// TODO May be worth supporting +void remove_trivial_boundaries( + const Mesh& m, + const std::vector& vtx_reindex, + std::vector& Th_hat) +{ + std::vector boundary_components = find_boundary_components(m); + for (int h_start : boundary_components) { + spdlog::info("Checking for trivial loop at {}", h_start); + bool is_trivial = true; + int h = h_start; + do { + // Circulate to next boundary edge + while (m.type[h] != 2) { + h = m.opp[m.n[h]]; + } + h = m.opp[h]; + + int vi = vtx_reindex[m.v_rep[m.to[h]]]; + if (!float_equal(Th_hat[vi], M_PI)) { + is_trivial = false; + break; + } + } while (h != h_start); + + if (is_trivial) { + spdlog::info("Adjusting trivial loop at {}", h); + int vi = vtx_reindex[m.v_rep[m.to[h]]]; + int vj = vtx_reindex[m.v_rep[m.to[m.opp[h]]]]; + Th_hat[vi] += M_PI / 2.; + Th_hat[vj] -= M_PI / 2.; + } + } +} + +void make_interior_free(Mesh& m) +{ + m.fixed_dof = std::vector(m.n_ind_vertices(), true); + auto bd_vertices = find_boundary_vertices(m); + for (int vi : bd_vertices) { + m.fixed_dof[m.v_rep[vi]] = false; + } +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/holonomy/constraint.cpp b/src/holonomy/holonomy/constraint.cpp new file mode 100644 index 0000000..1e97192 --- /dev/null +++ b/src/holonomy/holonomy/constraint.cpp @@ -0,0 +1,393 @@ +#include "holonomy/holonomy/constraint.h" + +#include "holonomy/holonomy/holonomy.h" + +#include "optimization/core/constraint.h" + +namespace Penner { +namespace Holonomy { + +VectorX Theta(const Mesh& m, const VectorX& alpha) +{ + // Sum up angles around vertices + VectorX t(m.n_ind_vertices()); + t.setZero(); + for (int h = 0; h < m.n_halfedges(); h++) { + t[m.v_rep[m.to[m.n[h]]]] += alpha[h]; + } + SPDLOG_DEBUG("Cone angles with mean {} and norm {}", t.mean(), t.norm()); + return t; +} + +VectorX Kappa( + const Mesh& m, + const std::vector>& homology_basis_loops, + const VectorX& alpha) +{ + int n_s = homology_basis_loops.size(); + VectorX k(n_s); + for (int i = 0; i < n_s; ++i) { + k[i] = compute_dual_loop_holonomy(m, alpha, *homology_basis_loops[i]); + } + + return k; +} + +// Compute dual loop holonomy for a similarity metric with given dual loops +VectorX Kappa(const MarkedPennerConeMetric& marked_metric, const VectorX& alpha) +{ + const auto& homology_basis_loops = marked_metric.get_homology_basis_loops(); + return Kappa(marked_metric, homology_basis_loops, alpha); +} + +// Add vertex angle constraints +// TODO Allow for additional fixed degrees of freedom +void add_vertex_constraints( + const MarkedPennerConeMetric& marked_metric, + const std::vector v_map, + const VectorX& angles, + VectorX& constraint, + int offset) +{ + int n_v = marked_metric.n_ind_vertices(); + VectorX t = Theta(marked_metric, angles); + for (int i = 0; i < n_v; i++) { + if (v_map[i] < 0) continue; + constraint[offset + v_map[i]] = marked_metric.Th_hat[i] - t(i); + } +} + +void add_basis_loop_constraints( + const MarkedPennerConeMetric& marked_metric, + const VectorX& angles, + VectorX& constraint, + int offset) +{ + // Add holonomy constraints + // TODO Allow for additional fixed degrees of freedom + int n_s = marked_metric.n_homology_basis_loops(); + VectorX k = Kappa(marked_metric, angles); + for (int i = 0; i < n_s; i++) { + constraint[offset + i] = marked_metric.kappa_hat[i] - k(i); + } +} + +std::vector compute_dependent_edges(const MarkedPennerConeMetric& marked_metric) +{ + // find boundary edges + int num_halfedges = marked_metric.n_halfedges(); + int num_edges = marked_metric.n_edges(); + std::vector is_boundary_edge(num_edges, false); + for (int h = 0; h < num_halfedges; ++h) { + if (marked_metric.opp[marked_metric.R[h]] == h) { + is_boundary_edge[marked_metric.he2e[h]] = true; + } + } + + // make list of dependent edges (using lowest index halfedge) + std::vector dependent_edges = {}; + for (int h = 0; h < num_halfedges; ++h) { + if (is_boundary_edge[marked_metric.he2e[h]]) continue; + if (marked_metric.opp[h] < h) continue; + if (marked_metric.R[h] < h) continue; + if (marked_metric.opp[marked_metric.R[h]] < h) continue; + + dependent_edges.push_back(marked_metric.he2e[h]); + } + spdlog::debug("{}/{} dependent edges", dependent_edges.size(), num_edges); + + return dependent_edges; +} + +void add_symmetry_constraints( + const MarkedPennerConeMetric& marked_metric, + const std::vector& dependent_edges, + VectorX& constraint, + int offset) +{ + // add symmetry constraints + int num_dep_edges = dependent_edges.size(); + for (int i = 0; i < num_dep_edges; i++) { + int e = dependent_edges[i]; + int h = marked_metric.e2he[e]; + int Rh = marked_metric.R[h]; + constraint[offset + i] = marked_metric.original_coords[h] - marked_metric.original_coords[Rh]; + } +} + +VectorX compute_vertex_constraint( + const MarkedPennerConeMetric& marked_metric, + const VectorX& angles) +{ + // Use all vertices + std::vector v_map = marked_metric.v_rep; + int n_v = marked_metric.n_ind_vertices(); + + // Build the constraint + VectorX constraint = VectorX::Zero(n_v); + add_vertex_constraints(marked_metric, v_map, angles, constraint, 0); + + return constraint; +} + +VectorX compute_metric_constraint( + const MarkedPennerConeMetric& marked_metric, + const VectorX& angles, + bool only_free_vertices) +{ + int n_s = marked_metric.n_homology_basis_loops(); + + std::vector v_map; + int n_v; + if (only_free_vertices) { + Optimization::build_free_vertex_map(marked_metric, v_map, n_v); + } else { + v_map = marked_metric.v_rep; + n_v = marked_metric.n_ind_vertices(); + } + + // Build the constraint + VectorX constraint = VectorX::Zero(n_v + n_s); + add_vertex_constraints(marked_metric, v_map, angles, constraint, 0); + add_basis_loop_constraints(marked_metric, angles, constraint, n_v); + + return constraint; +} + +VectorX _compute_metric_constraint( + const MarkedPennerConeMetric& marked_metric, + const VectorX& angles) +{ + int n_s = marked_metric.n_homology_basis_loops(); + + std::vector v_map; + int n_v; + Optimization::build_free_vertex_map(marked_metric, v_map, n_v); + spdlog::debug("{} vertex constraints", n_v); + + // Build the constraint + std::vector dependent_edges = compute_dependent_edges(marked_metric); + VectorX constraint = VectorX::Zero(n_v + n_s + dependent_edges.size()); + add_vertex_constraints(marked_metric, v_map, angles, constraint, 0); + add_basis_loop_constraints(marked_metric, angles, constraint, n_v); + add_symmetry_constraints(marked_metric, dependent_edges, constraint, n_v + n_s); + + return constraint; +} + +// Compute the derivatives of metric corner angles with respect to halfedge lengths +MatrixX compute_metric_corner_angle_jacobian( + const MarkedPennerConeMetric& marked_metric, + const VectorX& cotangents) +{ + // Create list of triplets of Jacobian indices and values + int num_halfedges = marked_metric.n_halfedges(); + typedef Eigen::Triplet T; + std::vector tripletList; + tripletList.reserve(3 * num_halfedges); + for (int hjk = 0; hjk < num_halfedges; ++hjk) { + int hki = marked_metric.n[hjk]; + int hij = marked_metric.n[hki]; + + // Get cotangents (opposite halfedge) + Scalar cj = cotangents[hki]; + Scalar ck = cotangents[hij]; + + // Add entries + tripletList.push_back(T(hjk, hij, 0.5 * cj)); + tripletList.push_back(T(hjk, hjk, -0.5 * (cj + ck))); + tripletList.push_back(T(hjk, hki, 0.5 * ck)); + } + + // Build reduced coordinate Jacobian from triplet list + return marked_metric.change_metric_to_reduced_coordinates(tripletList, num_halfedges); +} + +// Compute the linear map from corner angles to vertex and dual loop holonomy constraints +MatrixX compute_holonomy_matrix( + const Mesh& m, + const std::vector& v_map, + const std::vector>& dual_loops, + int num_vertex_forms) +{ + int num_halfedges = m.n_halfedges(); + typedef Eigen::Triplet T; + std::vector tripletList; + tripletList.reserve(2 * num_halfedges); + + // Add vertex constraints + for (int h = 0; h < num_halfedges; ++h) { + int v = v_map[m.v_rep[m.to[m.n[h]]]]; + if (v < 0) continue; + tripletList.push_back(T(v, h, 1.0)); + } + + // Add dual loop holonomy constraints + int num_loops = dual_loops.size(); + for (int i = 0; i < num_loops; ++i) { + for (const auto& dual_segment : *dual_loops[i]) { + int h_start = dual_segment[0]; + int h_end = dual_segment[1]; + + // Negative angle if the segment subtends an angle to the right + if (h_end == m.n[h_start]) { + int h_opp = m.n[h_end]; // halfedge opposite subtended angle + tripletList.push_back(T(num_vertex_forms + i, h_opp, -1.0)); + } + // Positive angle if the segment subtends an angle to the left + else if (h_start == m.n[h_end]) { + int h_opp = m.n[h_start]; // halfedge opposite subtended angle + tripletList.push_back(T(num_vertex_forms + i, h_opp, 1.0)); + } + } + } + + // Create the matrix from the triplets + MatrixX holonomy_matrix; + holonomy_matrix.resize(num_vertex_forms + num_loops, num_halfedges); + holonomy_matrix.reserve(tripletList.size()); + holonomy_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); + return holonomy_matrix; +} + +MatrixX compute_metric_constraint_jacobian( + const MarkedPennerConeMetric& marked_metric, + const VectorX& cotangents, + bool only_free_vertices) +{ + // Get vertex representation + std::vector v_map; + int num_vertex_forms; + if (only_free_vertices) { + Optimization::build_free_vertex_map(marked_metric, v_map, num_vertex_forms); + } else { + v_map = marked_metric.v_rep; + num_vertex_forms = marked_metric.n_ind_vertices(); + } + + // Get corner angle derivatives with respect to metric coordinates + MatrixX J_corner_angle_metric = compute_metric_corner_angle_jacobian(marked_metric, cotangents); + + // Get matrix summing up corner angles to form holonomy matrix + const auto& homology_basis_loops = marked_metric.get_homology_basis_loops(); + MatrixX holonomy_matrix = + compute_holonomy_matrix(marked_metric, v_map, homology_basis_loops, num_vertex_forms); + + // Build holonomy constraint jacobian + return holonomy_matrix * J_corner_angle_metric; +} + +MatrixX compute_symmetry_constraint_jacobian( + const MarkedPennerConeMetric& marked_metric, + const std::vector& dependent_edges) +{ + int num_dep_edges = dependent_edges.size(); + int num_edges = marked_metric.n_edges(); + typedef Eigen::Triplet T; + std::vector tripletList; + tripletList.reserve(2 * num_dep_edges); + + for (int i = 0; i < num_dep_edges; i++) { + int e = dependent_edges[i]; + int h = marked_metric.e2he[e]; + int Rh = marked_metric.R[h]; + int Re = marked_metric.he2e[Rh]; + tripletList.push_back(T(i, e, 1.)); + tripletList.push_back(T(i, Re, -1.)); + } + + // Create the matrix from the triplets + MatrixX symmetry_matrix; + symmetry_matrix.resize(num_dep_edges, num_edges); + symmetry_matrix.reserve(tripletList.size()); + symmetry_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); + //spdlog::info("Matrix {}", symmetry_matrix); + return symmetry_matrix; +} + +MatrixX _compute_metric_constraint_jacobian( + const MarkedPennerConeMetric& marked_metric, + const VectorX& cotangents) +{ + // Get vertex representation + int num_vertex_forms; + std::vector v_map; + Optimization::build_free_vertex_map(marked_metric, v_map, num_vertex_forms); + + // Get corner angle derivatives with respect to metric coordinates + MatrixX J_corner_angle_metric = compute_metric_corner_angle_jacobian(marked_metric, cotangents); + + // Get matrix summing up corner angles to form holonomy matrix + const auto& homology_basis_loops = marked_metric.get_homology_basis_loops(); + MatrixX holonomy_matrix = + compute_holonomy_matrix(marked_metric, v_map, homology_basis_loops, num_vertex_forms); + + // Build holonomy constraint jacobian + MatrixX J_holonomy_constraint = holonomy_matrix * J_corner_angle_metric; + + // Build symmetry constraint jacobian + std::vector dependent_edges = compute_dependent_edges(marked_metric); + MatrixX J_symmetry_constraint = + compute_symmetry_constraint_jacobian(marked_metric, dependent_edges); + + // Assemble matrix from two components + int r0 = J_holonomy_constraint.rows(); + int r1 = J_symmetry_constraint.rows(); + int c = J_holonomy_constraint.cols(); + assert(c == J_symmetry_constraint.cols()); + MatrixX J_transpose(c, r0 + r1); + J_transpose.leftCols(r0) = J_holonomy_constraint.transpose(); + J_transpose.rightCols(r1) = J_symmetry_constraint.transpose(); + + return J_transpose.transpose(); +} + +// Helper function to compute metric constraint assuming a discrete metric +void metric_constraint_with_jacobian_helper( + const MarkedPennerConeMetric& marked_metric, + VectorX& constraint, + MatrixX& J_constraint, + bool need_jacobian, + bool only_free_vertices) +{ + // Get angles and cotangent of angles of faces opposite halfedges + VectorX he2angle; + VectorX cotangents; + marked_metric.get_corner_angles(he2angle, cotangents); + + // Compute constraint and (optionally) the Jacobian + constraint = compute_metric_constraint(marked_metric, he2angle, only_free_vertices); + if (need_jacobian) { + J_constraint = compute_metric_constraint_jacobian(marked_metric, cotangents, only_free_vertices); + } +} + +void compute_metric_constraint_with_jacobian( + const MarkedPennerConeMetric& marked_metric, + VectorX& constraint, + MatrixX& J_constraint, + bool need_jacobian, + bool only_free_vertices) +{ + // Ensure current cone metric coordinates are log lengths + if (marked_metric.is_discrete_metric()) { + metric_constraint_with_jacobian_helper( + marked_metric, + constraint, + J_constraint, + need_jacobian, + only_free_vertices); + } else { + MarkedPennerConeMetric marked_metric_copy = marked_metric; + marked_metric_copy.make_discrete_metric(); + metric_constraint_with_jacobian_helper( + marked_metric_copy, + constraint, + J_constraint, + need_jacobian, + only_free_vertices); + } +} + +} // namespace Holonomy +} // namespace Penner diff --git a/src/holonomy/holonomy/holonomy.cpp b/src/holonomy/holonomy/holonomy.cpp new file mode 100644 index 0000000..df8f5d8 --- /dev/null +++ b/src/holonomy/holonomy/holonomy.cpp @@ -0,0 +1,90 @@ +#include "holonomy/holonomy/holonomy.h" + +#include "holonomy/core/dual_loop.h" + +#include "optimization/core/area.h" + +#include + +namespace Penner { +namespace Holonomy { + +Scalar compute_dual_segment_holonomy( + const Mesh& m, + const VectorX& he2angle, + const DualSegment& dual_segment) +{ + assert(is_valid_dual_segment(m, dual_segment)); + int h_start = dual_segment[0]; + int h_end = dual_segment[1]; + + // Return negative angle if the segment subtends an angle to the right + if (h_end == m.n[h_start]) { + int h_opp = m.n[h_end]; // halfedge opposite subtended angle + return -he2angle[h_opp]; + } + // Return positive angle if the segment subtends an angle to the left + else if (h_start == m.n[h_end]) { + int h_opp = m.n[h_start]; // halfedge opposite subtended angle + return he2angle[h_opp]; + } + // Trivial dual segment + else if (h_start == h_end) { + throw std::runtime_error("Cannot compute holonomy for a trivial dual segment"); + return 0.0; + } + // Segment in non-triangular face + else { + throw std::runtime_error("Cannot compute holonomy for a nontriangular face"); + return 0.0; + } +} + +Scalar compute_dual_loop_holonomy( + const Mesh& m, + const VectorX& he2angle, + const DualLoop& dual_loop) +{ + // Sum up holonomy for all segments of the loop + Scalar holonomy = 0.0; + for (const auto& dual_segment : dual_loop) { + holonomy += compute_dual_segment_holonomy(m, he2angle, dual_segment); + } + + return holonomy; +} + +Scalar compute_dual_segment_rotation( + const Mesh& m, + const VectorX& rotation_form, + const DualSegment& dual_segment, + bool remove_boundary_rotation=false) +{ + int h_start = dual_segment[0]; + + // Skip boundary edges + if ((remove_boundary_rotation) && (m.type[h_start] != 0) && (m.opp[m.R[h_start]] == h_start)) + { + return 0.; + } + + // Return angle of first edge + return rotation_form(h_start); +} + +Scalar compute_dual_loop_rotation( + const Mesh& m, + const VectorX& rotation_form, + const DualLoop& dual_loop) +{ + // Sum up rotation for all segments of the loop + Scalar rotation = 0.0; + for (const auto& dual_segment : dual_loop) { + rotation += compute_dual_segment_rotation(m, rotation_form, dual_segment); + } + + return rotation; +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/holonomy/marked_penner_cone_metric.cpp b/src/holonomy/holonomy/marked_penner_cone_metric.cpp new file mode 100644 index 0000000..7d3d8a9 --- /dev/null +++ b/src/holonomy/holonomy/marked_penner_cone_metric.cpp @@ -0,0 +1,427 @@ +#include "holonomy/holonomy/marked_penner_cone_metric.h" + +#include "util/vector.h" +#include "holonomy/core/viewer.h" +#include "holonomy/holonomy/constraint.h" +#include "holonomy/holonomy/holonomy.h" +#include "holonomy/holonomy/newton.h" + +#include "optimization/core/constraint.h" +#include "optimization/core/projection.h" + +#include "conformal_ideal_delaunay/ConformalInterface.hh" + +#ifdef ENABLE_VISUALIZATION +#include "polyscope/surface_mesh.h" +#endif + +namespace Penner { +namespace Holonomy { + +bool is_reflection_structure_valid( + const std::vector& next, + const std::vector& prev, + const std::vector& opp, + const std::vector& R, + const std::vector& type) +{ + int num_halfedges = next.size(); + + // check reflection structure + for (int hij = 0; hij < num_halfedges; ++hij) { + // check R has order 2 + if (R[R[hij]] != hij) { + spdlog::warn("{} is not invariant under R-R", hij); + return false; + } + + // check R preserves opp + if (R[opp[hij]] != opp[R[hij]]) { + spdlog::warn("{} under opp is not preserved by R", hij); + return false; + } + + // check R inverts next + if (R[prev[hij]] != next[R[hij]]) { + spdlog::warn("{} under next is not inverted by R", hij); + return false; + } + + // check edge typing + if ((type[hij] == 1) && (type[R[hij]] != 2)) + { + spdlog::warn("{} type (1) is not changed by R", hij); + return false; + } + if ((type[hij] == 2) && (type[R[hij]] != 1)) + { + spdlog::warn("{} type (2) is not changed by R", hij); + return false; + } + if ((type[hij] == 3) && (type[R[hij]] != 3)) + { + spdlog::warn("{} type (3) is not fixed by R", hij); + return false; + } + if ((type[hij] == 4) && (type[R[hij]] != 4)) + { + spdlog::warn("{} type (4) is not fixed by R", hij); + return false; + } + } + + return true; +} + +bool is_valid_mesh(const Mesh& m) +{ + // build previous map + std::vector prev = invert_map(m.n); + + // check edge, face, and vertex conditions + if (!are_polygon_mesh_edges_valid(m.n, prev)) + { + spdlog::warn("Edges are invalid"); + return false; + } + if (!are_polygon_mesh_faces_valid(m.n, m.f, m.h)) + { + spdlog::warn("Faces are invalid"); + return false; + } + if (!are_polygon_mesh_vertices_valid(m.opp, prev, m.to, m.out)) + { + spdlog::warn("Vertices are invalid"); + return false; + } + + // check reflection if doubled mesh + if ((m.type[0] != 0) && (!is_reflection_structure_valid(m.n, prev, m.opp, m.R, m.type))) + { + spdlog::warn("Symmetry structure is invalid"); + return false; + } + + return true; +} + +MarkedPennerConeMetric::MarkedPennerConeMetric( + const Mesh& m, + const VectorX& metric_coords, + const std::vector>& homology_basis_loops, + const std::vector& kappa) + : Optimization::PennerConeMetric(m, metric_coords) + , kappa_hat(kappa) + , m_dual_loop_manager(m.n_edges()) +{ + assert(is_valid_mesh(m)); + + int num_basis_loops = homology_basis_loops.size(); + m_homology_basis_loops.reserve(num_basis_loops); + for (int i = 0; i < num_basis_loops; ++i) { + m_homology_basis_loops.push_back(homology_basis_loops[i]->clone()); + m_dual_loop_manager.register_loop_edges(i, m, *homology_basis_loops[i]); + } + + // TODO + int num_halfedges = m.n_halfedges(); + original_coords.resize(num_halfedges); + for (int h = 0; h < num_halfedges; ++h) { + original_coords[h] = 2. * log(l[h]); + } +} + + +MarkedPennerConeMetric::MarkedPennerConeMetric(const MarkedPennerConeMetric& marked_metric) + : MarkedPennerConeMetric( + marked_metric, + marked_metric.get_metric_coordinates(), + marked_metric.get_homology_basis_loops(), + marked_metric.kappa_hat) +{ + assert(is_valid_mesh(marked_metric)); +} + +void MarkedPennerConeMetric::operator=(const MarkedPennerConeMetric& m) +{ + n = m.n; + to = m.to; + f = m.f; + h = m.h; + out = m.out; + opp = m.opp; + l = m.l; + type = m.type; + type_input = m.type_input; + R = m.R; + v_rep = m.v_rep; + Th_hat = m.Th_hat; + fixed_dof = m.fixed_dof; + pts = m.pts; + pt_in_f = m.pt_in_f; + + he2e = m.he2e; + e2he = m.e2he; + m_is_discrete_metric = m.m_is_discrete_metric; + m_flip_seq = m.m_flip_seq; + m_identification = m.m_identification; + m_embed = m.m_embed; + m_proj = m.m_proj; + m_projection = m.m_projection; + m_need_jacobian = m.m_need_jacobian; + m_transition_jacobian_lol = m.m_transition_jacobian_lol; + + kappa_hat = m.kappa_hat; + m_dual_loop_manager = m.m_dual_loop_manager; + int num_basis_loops = m.get_homology_basis_loops().size(); + m_homology_basis_loops.clear(); + m_homology_basis_loops.reserve(num_basis_loops); + for (int i = 0; i < num_basis_loops; ++i) { + m_homology_basis_loops.push_back(m.get_homology_basis_loops()[i]->clone()); + } +} + +void MarkedPennerConeMetric::reset_connectivity(const MarkedPennerConeMetric& m) +{ + // Halfedge arrays + int num_halfedges = n_halfedges(); + for (int h = 0; h < num_halfedges; ++h) { + n[h] = m.n[h]; + to[h] = m.to[h]; + f[h] = m.f[h]; + l[h] = m.l[h]; + type[h] = m.type[h]; + R[h] = m.R[h]; + + // opp, he2e, e2he do not change + } + + // Vertex arrays + int num_vertices = n_vertices(); + for (int v = 0; v < num_vertices; ++v) { + out[v] = m.out[v]; + + // v_rep, Th_hat, fixed_dof do not change + } + + // Face arrays + int num_faces = n_faces(); + for (int f = 0; f < num_faces; ++f) { + h[f] = m.h[f]; + } +} + +void MarkedPennerConeMetric::reset_markings(const MarkedPennerConeMetric& m) +{ + // Loop data + int num_basis_loops = n_homology_basis_loops(); + for (int i = 0; i < num_basis_loops; ++i) { + m_homology_basis_loops[i] = m.get_homology_basis_loops()[i]->clone(); + + // kappa_hat does not change + } +} + +void MarkedPennerConeMetric::reset_marked_metric(const MarkedPennerConeMetric& m) +{ + // Reset connectivity and markings + reset_connectivity(m); + reset_markings(m); + + // Clear flip data + m_is_discrete_metric = false; + m_flip_seq.clear(); + PennerConeMetric::reset(); +} + +void MarkedPennerConeMetric::change_metric( + const MarkedPennerConeMetric& m, + const VectorX& metric_coords, + bool need_jacobian, + bool do_repeat_flips) +{ + // Restore connectivity to that of m + reset_connectivity(m); + + // Change metric coordinates + PennerConeMetric::expand_metric_coordinates(metric_coords); + std::vector flip_seq = m_flip_seq; + spdlog::debug("Repeating {} flips", m_flip_seq.size()); + m_flip_seq.clear(); + m_is_discrete_metric = false; + m_need_jacobian = need_jacobian; + PennerConeMetric::reset(); + + // TODO + int num_halfedges = m.n_halfedges(); + original_coords.resize(num_halfedges); + for (int h = 0; h < num_halfedges; ++h) { + original_coords[h] = 2. * log(l[h]); + } + + // Flip back to current connectivity if flag set + if (do_repeat_flips) { + for (int h : flip_seq) { + PennerConeMetric::flip_ccw(h); + } + spdlog::debug("{} flips performed", m_flip_seq.size()); + } + // Reset markings if using original connectivity + else { + reset_markings(m); + } +} + +std::unique_ptr MarkedPennerConeMetric::set_metric_coordinates( + const VectorX& metric_coords) const +{ + return std::make_unique( + MarkedPennerConeMetric(*this, metric_coords, m_homology_basis_loops, kappa_hat)); +} + +bool MarkedPennerConeMetric::constraint( + VectorX& constraint, + MatrixX& J_constraint, + bool need_jacobian, + bool only_free_vertices) const +{ + compute_metric_constraint_with_jacobian( + *this, + constraint, + J_constraint, + need_jacobian, + only_free_vertices); + return true; +} + +Scalar MarkedPennerConeMetric::max_constraint_error() const +{ + VectorX cons; + MatrixX J_constraint; + bool need_jacobian = false; + bool only_free_vertices = true; + constraint(cons, J_constraint, need_jacobian, only_free_vertices); + return cons.cwiseAbs().maxCoeff(); +} + +VectorX MarkedPennerConeMetric::constraint(const VectorX& angles) +{ + return compute_metric_constraint(*this, angles); +} + +MatrixX MarkedPennerConeMetric::constraint_jacobian(const VectorX& cotangents) +{ + return compute_metric_constraint_jacobian(*this, cotangents); +} + +std::unique_ptr MarkedPennerConeMetric::project_to_constraint( + SolveStats& solve_stats, + std::shared_ptr proj_params) const +{ + // Copy parameters + NewtonParameters alg_params; + alg_params.max_itr = proj_params->max_itr; + alg_params.bound_norm_thres = proj_params->bound_norm_thres; + alg_params.error_eps = proj_params->error_eps; + alg_params.do_reduction = proj_params->do_reduction; + alg_params.output_dir = proj_params->output_dir; + + // Optimize metric angles + MatrixX identity = id_matrix(n_reduced_coordinates()); + NewtonLog log; + MarkedPennerConeMetric optimized_metric = + optimize_subspace_metric_angles_log(*this, identity, alg_params, log); + + // Return output + solve_stats.n_solves = log.num_iter; + return std::make_unique(optimized_metric); +} + +bool MarkedPennerConeMetric::flip_ccw(int _h, bool Ptolemy) +{ + // Flip the homology basis loops + bool do_bypass_manager = false; + if (do_bypass_manager) { + for (auto& homology_basis_loop : m_homology_basis_loops) { + homology_basis_loop->update_under_ccw_flip(*this, _h); + } + } else { + for (int loop_index : m_dual_loop_manager.get_edge_loops(he2e[_h])) { + // Update loop + m_homology_basis_loops[loop_index]->update_under_ccw_flip(*this, _h); + + // Add all adjacent edges (conservative guess at actual adjacent edges) + for (int adj_h : {n[_h], n[n[_h]], n[opp[_h]], n[n[opp[_h]]]}) { + m_dual_loop_manager.add_loop(he2e[adj_h], loop_index); + } + } + } + + // Perform the flip in the base class + bool success = PennerConeMetric::flip_ccw(_h, Ptolemy); + + return success; +} + +void MarkedPennerConeMetric::write_status_log(std::ostream& stream, bool write_header) +{ + if (write_header) { + stream << "num_flips,"; + stream << std::endl; + } + + stream << num_flips() << ","; + stream << std::endl; +} + +void view_homology_basis( + const MarkedPennerConeMetric& marked_metric, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + int num_homology_basis_loops, + std::string mesh_handle, + bool show) +{ + int num_vertices = V.rows(); + if (show) { + spdlog::info( + "Viewing {} loops on mesh {} with {} vertices", + num_homology_basis_loops, + mesh_handle, + num_vertices); + } + auto [F_mesh, F_halfedge] = generate_mesh_faces(marked_metric, vtx_reindex); + +#ifdef ENABLE_VISUALIZATION + if (num_homology_basis_loops < 0) { + num_homology_basis_loops = marked_metric.n_homology_basis_loops(); + } else { + num_homology_basis_loops = + std::min(marked_metric.n_homology_basis_loops(), num_homology_basis_loops); + } + polyscope::init(); + if (mesh_handle == "") { + mesh_handle = "homology_basis_mesh"; + polyscope::registerSurfaceMesh(mesh_handle, V, F_mesh); + polyscope::getSurfaceMesh(mesh_handle)->setSurfaceColor(MUSTARD); + } + for (int i = 0; i < num_homology_basis_loops; ++i) { + const auto& homology_basis_loops = marked_metric.get_homology_basis_loops(); + std::vector dual_loop_faces = + homology_basis_loops[i]->generate_face_sequence(marked_metric); + + int num_faces = marked_metric.n_faces(); + Eigen::VectorXd is_dual_loop_face; + is_dual_loop_face.setZero(num_faces); + for (const auto& dual_loop_face : dual_loop_faces) { + is_dual_loop_face(dual_loop_face) = 1.0; + } + polyscope::getSurfaceMesh(mesh_handle) + ->addFaceScalarQuantity("dual_loop_" + std::to_string(i), is_dual_loop_face); + } + if (show) polyscope::show(); +#endif +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/holonomy/newton.cpp b/src/holonomy/holonomy/newton.cpp new file mode 100644 index 0000000..6a7e1cb --- /dev/null +++ b/src/holonomy/holonomy/newton.cpp @@ -0,0 +1,742 @@ +#include "holonomy/holonomy/newton.h" + +#include "holonomy/holonomy/constraint.h" +#include "holonomy/holonomy/holonomy.h" +#include "holonomy/core/viewer.h" +#include "util/vector.h" + +#include +#include "optimization/metric_optimization/energies.h" +#include "optimization/metric_optimization/energy_functor.h" +#include "optimization/core/projection.h" +#include "optimization/core/shear.h" +#include "util/io.h" + +#ifdef USE_SUITESPARSE +#include +#include +#endif + +#ifdef ENABLE_VISUALIZATION +#include "polyscope/surface_mesh.h" +#endif + +namespace Penner { +namespace Holonomy { + +// Initialize logging level +void OptimizeHolonomyNewton::initialize_logging() { + switch (alg_params.log_level) { + case 6: spdlog::set_level(spdlog::level::trace); break; + case 5: spdlog::set_level(spdlog::level::debug); break; + case 4: spdlog::set_level(spdlog::level::info); break; + case 3: spdlog::set_level(spdlog::level::warn); break; + case 2: spdlog::set_level(spdlog::level::err); break; + case 1: spdlog::set_level(spdlog::level::critical); break; + case 0: spdlog::set_level(spdlog::level::off); break; + } +} + +void OptimizeHolonomyNewton::initialize_metric_status_log(MarkedPennerConeMetric& marked_metric) +{ + // Open main logging file + std::string data_log_path = join_path(alg_params.output_dir, "metric_status_log.csv"); + spdlog::info("Writing data to {}", data_log_path); + metric_status_file = std::ofstream(data_log_path, std::ios::out | std::ios::trunc); + marked_metric.write_status_log(metric_status_file, true); +} + +// Open a per iteration data log and write a header +void OptimizeHolonomyNewton::initialize_data_log() +{ + // Do nothing if error logging disabled + if (!alg_params.error_log) return; + + // Generate data log path + std::filesystem::create_directory(alg_params.output_dir); + std::string data_log_path; + + // Open main logging file + data_log_path = join_path(alg_params.output_dir, "iteration_data_log.csv"); + spdlog::info("Writing data to {}", data_log_path); + log_file = std::ofstream(data_log_path, std::ios::out | std::ios::trunc); + log_file << "num_iter,"; + log_file << "max_error,"; + log_file << "step_size,"; + log_file << "rmsre,"; + log_file << "time,"; + log_file << "solve_time,"; + log_file << std::endl; +} + +// Write newton log iteration data to file +void OptimizeHolonomyNewton::write_data_log_entry() +{ + // Do nothing if error logging disabled + if (!alg_params.error_log) return; + + // Write iteration row + log_file << log.num_iter << ","; + log_file << std::fixed << std::setprecision(17) << log.max_error << ","; + log_file << std::fixed << std::setprecision(17) << log.step_size << ","; + log_file << std::fixed << std::setprecision(17) << log.rmsre << ","; + log_file << std::fixed << std::setprecision(17) << log.time << ","; + log_file << std::fixed << std::setprecision(17) << log.solve_time << ","; + log_file << std::endl; +} + +void OptimizeHolonomyNewton::initialize_timing_log() +{ + // Open timing logging file + std::string data_log_path = join_path(alg_params.output_dir, "iteration_timing_log.csv"); + spdlog::info("Writing timing data to {}", data_log_path); + timing_file = std::ofstream(data_log_path, std::ios::out | std::ios::trunc); + timing_file << "time,"; + timing_file << "solve_time,"; + timing_file << "constraint_time,"; + timing_file << "direction_time,"; + timing_file << "line_search_time,"; + timing_file << std::endl; +} + +// Write newton log iteration data to file +void OptimizeHolonomyNewton::write_timing_log_entry() +{ + // Do nothing if error logging disabled + if (!alg_params.error_log) return; + + // Write iteration row + timing_file << std::fixed << std::setprecision(8) << log.time << ","; + timing_file << std::fixed << std::setprecision(8) << log.solve_time << ","; + timing_file << std::fixed << std::setprecision(8) << log.constraint_time << ","; + timing_file << std::fixed << std::setprecision(8) << log.direction_time << ","; + timing_file << std::fixed << std::setprecision(8) << log.line_search_time << ","; + timing_file << std::endl; +} + + +void OptimizeHolonomyNewton::initialize_energy_log() +{ + std::string data_log_path = join_path(alg_params.output_dir, "iteration_energy_log.csv"); + spdlog::info("Writing energy data to {}", data_log_path); + energy_file = std::ofstream(data_log_path, std::ios::out | std::ios::trunc); + energy_file << "l2_energy,"; + energy_file << "rmsre,"; + energy_file << "rmse,"; + energy_file << "rrmse,"; + energy_file << std::endl; +} + +// Write newton log iteration data to file +void OptimizeHolonomyNewton::write_energy_log_entry() +{ + // Do nothing if error logging disabled + if (!alg_params.error_log) return; + + energy_file << std::fixed << std::setprecision(8) << log.l2_energy << ","; + energy_file << std::fixed << std::setprecision(8) << log.rmsre << ","; + energy_file << std::fixed << std::setprecision(8) << log.rmse << ","; + energy_file << std::fixed << std::setprecision(8) << log.rrmse << ","; + energy_file << std::endl; +} + +void OptimizeHolonomyNewton::initialize_stability_log() +{ + std::string data_log_path = join_path(alg_params.output_dir, "iteration_stability_log.csv"); + spdlog::info("Writing stability data to {}", data_log_path); + stability_file = std::ofstream(data_log_path, std::ios::out | std::ios::trunc); + stability_file << "max_error,"; + stability_file << "step_size,"; + stability_file << "num_flips,"; + stability_file << "min_corner_angle,"; + stability_file << "max_corner_angle,"; + stability_file << "direction_angle_change,"; + stability_file << "direction_norm,"; + stability_file << "direction_residual,"; + stability_file << std::endl; + +} + +// Write newton log iteration data to file +void OptimizeHolonomyNewton::write_stability_log_entry() +{ + // Do nothing if error logging disabled + if (!alg_params.error_log) return; + + // Write iteration row + stability_file << std::scientific << std::setprecision(8) << log.max_error << ","; + stability_file << std::scientific << std::setprecision(8) << log.step_size << ","; + stability_file << log.num_flips << ","; + stability_file << std::scientific << std::setprecision(8) << log.min_corner_angle << ","; + stability_file << std::scientific << std::setprecision(8) << log.max_corner_angle << ","; + stability_file << std::scientific << std::setprecision(8) << log.direction_angle_change << ","; + stability_file << std::scientific << std::setprecision(8) << log.direction_norm << ","; + stability_file << std::scientific << std::setprecision(8) << log.direction_residual << ","; + stability_file << std::endl; +} + +// Open all logs +void OptimizeHolonomyNewton::initialize_logs() +{ + initialize_data_log(); + initialize_timing_log(); + initialize_energy_log(); + initialize_stability_log(); +} + +void OptimizeHolonomyNewton::write_log_entries() +{ + write_data_log_entry(); + write_timing_log_entry(); + write_energy_log_entry(); + write_stability_log_entry(); +} + +// Close the error log file +void OptimizeHolonomyNewton::close_logs() +{ + // Do nothing if error logging disabled + if (!alg_params.error_log) return; + + log_file.close(); + timing_file.close(); + energy_file.close(); + stability_file.close(); +} + +// Prepare output directory for checkpoints +void OptimizeHolonomyNewton::initialize_checkpoints() +{ + // Do nothing if checkpointing disabled + if (alg_params.checkpoint_frequency <= 0) return; + + // Create output directory for checkpoints + checkpoint_dir = join_path(alg_params.output_dir, "checkpoint"); + std::filesystem::create_directory(checkpoint_dir); +} + +// Write metric and descent direction data to file +// WARNING: Assumes the written data is updated and consistent +void OptimizeHolonomyNewton::checkpoint_direction() +{ + // Do nothing if this is not a checkpointing iteration + if (alg_params.checkpoint_frequency <= 0) return; + if ((log.num_iter % alg_params.checkpoint_frequency) != 0) return; + std::string checkpoint_path; + std::string suffix = std::to_string(log.num_iter); + + // Write metric coordinates + checkpoint_path = join_path(checkpoint_dir, "metric_coords_" + suffix); + write_vector(reduced_metric_coords, checkpoint_path); + + // Write corner angles + checkpoint_path = join_path(checkpoint_dir, "angles_" + suffix); + write_vector(alpha, checkpoint_path); + + // Write constraint vector + checkpoint_path = join_path(checkpoint_dir, "constraint_" + suffix); + write_vector(constraint, checkpoint_path); + + // Write descent direction + checkpoint_path = join_path(checkpoint_dir, "direction_" + suffix); + write_vector(descent_direction, checkpoint_path); + + // Write Jacobian + checkpoint_path = join_path(checkpoint_dir, "jacobian_" + suffix); + write_sparse_matrix(J, checkpoint_path); +} + +void OptimizeHolonomyNewton::checkpoint_metric(const MarkedPennerConeMetric& marked_metric) { + // Do nothing if this is not a checkpointing iteration + if (alg_params.checkpoint_frequency <= 0) return; + if ((log.num_iter % alg_params.checkpoint_frequency) != 0) return; + std::string checkpoint_path; + std::string suffix = std::to_string(log.num_iter); + + // Write best fit scale factors + int num_halfedges = marked_metric.n_halfedges(); + VectorX scale_factors = Optimization::best_fit_conformal(marked_metric, VectorX::Zero(num_halfedges)); + checkpoint_path = join_path(checkpoint_dir, "scale_factors_" + suffix); + write_vector(scale_factors, checkpoint_path); + + // Write edge shears + int num_edges = marked_metric.n_edges(); + MatrixX shear_dual_matrix; + std::vector edges; + arange(num_edges, edges); + Optimization::compute_shear_dual_matrix(marked_metric, edges, shear_dual_matrix); + VectorX metric_coords = marked_metric.get_metric_coordinates(); + VectorX shears = shear_dual_matrix.transpose() * metric_coords; + checkpoint_path = join_path(checkpoint_dir, "shears_" + suffix); + write_vector(shears, checkpoint_path); + + // Write dual loop face sequences + for (int i = 0; i < marked_metric.n_homology_basis_loops(); ++i) { + std::string checkpoint_file = "dual_loop_" + std::to_string(i) + "_" + suffix; + checkpoint_path = join_path(checkpoint_dir, checkpoint_file); + write_vector( + marked_metric.get_homology_basis_loops()[i]->generate_face_sequence(marked_metric), + checkpoint_path); + } +} + +// Update the holonomy and length error log data +void OptimizeHolonomyNewton::update_log_error(const MarkedPennerConeMetric& marked_metric) +{ + // Get edge lengths + int num_edges = reduced_metric_coords.size(); + VectorX l_init(num_edges); + VectorX l(num_edges); + for (int E = 0; E < num_edges; ++E) { + l_init[E] = exp(reduced_metric_init[E] / 2.0); + l[E] = exp(reduced_metric_coords[E] / 2.0); + } + + // Update holonomy error + log.max_error = constraint.cwiseAbs().maxCoeff(); + + // Update metric error + log.l2_energy = l2_energy->EnergyFunctor::energy(marked_metric); + log.rmse = Optimization::root_mean_square_error(l, l_init); + log.rrmse = Optimization::relative_root_mean_square_error(l, l_init); + log.rmsre = Optimization::root_mean_square_relative_error(l, l_init); + + // Update corner angle measurements + log.min_corner_angle = alpha.minCoeff(); + log.max_corner_angle = alpha.maxCoeff(); + + // Update changes in angle for the gradient and direction + auto cos_angle = [](const VectorX& v, const VectorX& w) + { + return acos(v.dot(w) / (v.norm() * w.norm())); + }; + if (log.num_iter > 1) + { + log.direction_angle_change = cos_angle(descent_direction, prev_descent_direction); + } +} + +void OptimizeHolonomyNewton::solve_linear_system(const MatrixX& metric_basis_matrix) +{ + // Make matrix for optimization + + // TODO: Split into individual methods + // Generally unstable and slow dense matrix approach + // WARNING: Only use for debugging + spdlog::debug("Using {} solver", alg_params.solver); + if (alg_params.solver == "dense_qr") { +#ifndef MULTIPRECISION + Eigen::MatrixXd A = J * metric_basis_matrix; + double t_solve_start = timer.getElapsedTime(); + Eigen::ColPivHouseholderQR solver(A); + VectorX rhs = solver.solve(constraint); + log.solve_time = timer.getElapsedTime() - t_solve_start; + + descent_direction = -metric_basis_matrix * rhs; +#else + spdlog::error("Dense QR solver not supported for multiprecision"); +#endif + } + // QR based minimum norm computation: more numerically stable than direct pseudo-inverse + else if (alg_params.solver == "qr") { +#ifdef USE_SUITESPARSE + typedef int32_t Int; + typedef Eigen::SparseMatrix PinvMatrix; + + // Make cholmod views of the linear system + PinvMatrix A = J * metric_basis_matrix; + A.makeCompressed(); + PinvMatrix g = constraint.sparseView(); + cholmod_sparse M = + Eigen::viewAsCholmod(Eigen::Ref(A)); + cholmod_sparse b = + Eigen::viewAsCholmod(Eigen::Ref(g)); + + // Run SuiteSparse QR method + double t_solve_start = timer.getElapsedTime(); + int ordering = 7; + Scalar pivotThreshold = -2; + cholmod_common m_cc; // Workspace and parameters + cholmod_start(&m_cc); + cholmod_sparse* cholmod_descent_direction = + SuiteSparseQR_min2norm(ordering, pivotThreshold, &M, &b, &m_cc); + log.solve_time = timer.getElapsedTime() - t_solve_start; + + // Copy descent direction to Eigen vector + descent_direction = + -metric_basis_matrix * + Eigen::viewAsEigen(*cholmod_descent_direction); +#else + spdlog::error("QR with SuiteSparse not available. Set USE_SUITESPARSE to use."); +#endif + } else if (alg_params.solver == "cholmod") { +#ifdef USE_SUITESPARSE + // Build pseudo-inverse + MatrixX A = J * metric_basis_matrix; + MatrixX AAt = A * A.transpose(); + AAt.makeCompressed(); + + // Solve for descent direction (timer consistent with conformal) + double t_solve_start = timer.getElapsedTime(); + Eigen::CholmodSupernodalLLT solver; + solver.compute(AAt); + VectorX rhs = solver.solve(constraint); + log.solve_time = timer.getElapsedTime() - t_solve_start; + if (solver.info() != Eigen::Success) spdlog::error("Solve failed"); + + descent_direction = -metric_basis_matrix * (A.transpose() * rhs); +#else + spdlog::error("Cholmod with SuiteSparse not available. Set USE_SUITESPARSE to use."); +#endif + } else { + // Build pseudo-inverse + MatrixX A = J * metric_basis_matrix; + MatrixX AAt = A * A.transpose(); + + // Solve for descent direction (timer consistent with conformal) + double t_solve_start = timer.getElapsedTime(); + Eigen::SimplicialLDLT solver; + solver.compute(AAt); + VectorX rhs = solver.solve(constraint); + log.solve_time = timer.getElapsedTime() - t_solve_start; + + descent_direction = -metric_basis_matrix * (A.transpose() * rhs); + } +} + +// Determine initial lambda for next line search based on method parameters +void OptimizeHolonomyNewton::update_lambda() +{ + if (alg_params.reset_lambda) { + lambda = alg_params.lambda0; + } else { + lambda = std::min(1, 2 * lambda); // adaptive step length + } +} + +// Update the corner angles and constraint given the marked metric +void OptimizeHolonomyNewton::update_holonomy_constraint(MarkedPennerConeMetric& marked_metric) +{ + // TODO Make method + // Check current metric coordinates for validity + if (vector_contains_nan(reduced_metric_coords)) { + spdlog::error("Coordinates contain NaN"); + } + assert(!vector_contains_nan(reduced_metric_coords)); + SPDLOG_DEBUG( + "Coordinates in range [{}, {}]", + reduced_metric_coords.minCoeff(), + reduced_metric_coords.maxCoeff()); + SPDLOG_DEBUG("Coordinates have average {}", reduced_metric_coords.mean()); + + // Get corner angles and metric constraints + marked_metric.make_discrete_metric(); + log.num_flips = marked_metric.num_flips(); + marked_metric.get_corner_angles(alpha, cot_alpha); + constraint = marked_metric.constraint(alpha); + + // TODO Make method + // Check angles for validity + assert(!vector_contains_nan(alpha)); + SPDLOG_DEBUG("Angles in range [{}, {}]", alpha.minCoeff(), alpha.maxCoeff()); + SPDLOG_DEBUG("Angles have average {}", alpha.mean()); +} + + +// Update the corner angles, constraint, constraint jacobian, and descent direction given the +// marked metric +void OptimizeHolonomyNewton::update_descent_direction( + MarkedPennerConeMetric& marked_metric, + const MatrixX& metric_basis_matrix) +{ + double t_start; + prev_descent_direction = descent_direction; + + // Compute corner angles and the constraint with its jacobian + // TODO Add safety checks + t_start = timer.getElapsedTime(); + marked_metric.make_discrete_metric(); + marked_metric.get_corner_angles(alpha, cot_alpha); + constraint = marked_metric.constraint(alpha); + J = marked_metric.constraint_jacobian(cot_alpha); + log.constraint_time = timer.getElapsedTime() - t_start; + marked_metric.write_status_log(metric_status_file); + + // Compute Newton descent direction from the constraint and jacobian + t_start = timer.getElapsedTime(); + + SPDLOG_DEBUG("Jacobian with maximum value {}", J.coeffs().maxCoeff()); + SPDLOG_DEBUG("Jacobian with mean {}", J.coeffs().mean()); + SPDLOG_DEBUG("Metric basis with average {}", metric_basis_matrix.coeffs().mean()); + SPDLOG_DEBUG("Constraint with average {}", constraint.mean()); + + solve_linear_system(metric_basis_matrix); + + SPDLOG_TRACE("Descent direction found with norm {}", descent_direction.norm()); + SPDLOG_TRACE("Descent direction error is {}", (J * descent_direction + constraint).norm()); + SPDLOG_TRACE("Projected constraint is {}", constraint.dot(J * descent_direction)); + log.direction_time = timer.getElapsedTime() - t_start; + log.direction_norm = descent_direction.norm(); + log.direction_residual = (J * descent_direction + constraint).norm(); +} + +// Perform a backtracking line search along the current descent direction from the current +// metric coordinates using the initial metric connectivity +void OptimizeHolonomyNewton::perform_line_search( + const MarkedPennerConeMetric& initial_marked_metric, + MarkedPennerConeMetric& marked_metric) +{ + double t_start = timer.getElapsedTime(); + + // Get starting metric coordinates + VectorX reduced_metric_start = reduced_metric_coords; + + // Get the constraint norm and its dot product with the jacobian-projected descent direction + // Note: the product of the jacobian and descent direction should be the negative + // constraint, but this may fail due to numerical instability or regularization + Scalar l2_c0_sq = constraint.squaredNorm(); + Scalar proj_g0 = constraint.dot(J * descent_direction); + spdlog::debug("Initial squared error norm is {}", l2_c0_sq); + spdlog::debug("Initial projected constraint is {}", proj_g0); + + // Reduce descent direction range to avoid nans/infs + if (alg_params.do_reduction) { + while (lambda * (descent_direction.maxCoeff() - descent_direction.minCoeff()) > 2.5) { + lambda /= 2; + spdlog::debug("Reducing lambda to {} for stability", lambda); + } + } + + // Make initial line step with updated constraint + reduced_metric_coords = reduced_metric_start + lambda * descent_direction; + marked_metric.change_metric(initial_marked_metric, reduced_metric_coords, false, false); + update_holonomy_constraint(marked_metric); + + // Line search until the constraint norm decreases and the projected constraint is + // nonpositive We also allow the norm bound to be dropped or made approximate with some + // relative term alpha + spdlog::debug("Starting line search"); + Scalar l2_c_sq = constraint.squaredNorm(); + Scalar proj_cons = constraint.dot(J * descent_direction); + Scalar gamma = 1.; // require ratio of current to initial constraint norm to be below alpha + bool bound_norm = true; + while ((bound_norm && (l2_c_sq > (gamma * l2_c0_sq))) || (proj_cons > 0)) { + // Backtrack one step + lambda /= 2; + + // If lambda low enough, stop bounding the norm + if (lambda <= alg_params.bound_norm_thres) { + bound_norm = false; + spdlog::debug("Dropping norm bound."); + } + + // Change metric and update constraint + reduced_metric_coords = reduced_metric_start + lambda * descent_direction; + marked_metric.change_metric(initial_marked_metric, reduced_metric_coords, false, false); + update_holonomy_constraint(marked_metric); + + // Update squared constraint norm and projected constraint + l2_c_sq = constraint.squaredNorm(); + proj_cons = constraint.dot(J * descent_direction); + spdlog::debug("Squared error norm is {}", l2_c_sq); + spdlog::debug("Projected constraint is {}", proj_cons); + + // Check if lambda is below the termination threshold + if (lambda < alg_params.min_lambda) break; + } + + // Make final line step in original connectivity + spdlog::debug("Updating metric"); + reduced_metric_coords = reduced_metric_start + lambda * descent_direction; + marked_metric.change_metric(initial_marked_metric, reduced_metric_coords, true, false); + + log.line_search_time = timer.getElapsedTime() - t_start; +} + +// Determine if the optimization has converged or maximum time/iteration is reached +bool OptimizeHolonomyNewton::is_converged() +{ + if (constraint.cwiseAbs().maxCoeff() < alg_params.error_eps) { + spdlog::info("Stopping optimization as max error {} reached", alg_params.error_eps); + return true; + } + if (lambda < alg_params.min_lambda) { + spdlog::info("Stopping optimization as step size {} too small", lambda); + return true; + } + if (log.num_iter >= alg_params.max_itr) { + spdlog::trace( + "Stopping optimization as reached maximum iteration {}", + alg_params.max_itr); + return true; + } + if (timer.getElapsedTime() >= alg_params.max_time) { + spdlog::trace("Stopping optimization as reached maximum time {}", alg_params.max_time); + return true; + } + + return false; +} + +// Main method to run the optimization with a given metric, metric basis, and parameters +// Each run completely resets the state of the optimization +MarkedPennerConeMetric OptimizeHolonomyNewton::run( + const MarkedPennerConeMetric& initial_marked_metric, + const MatrixX& metric_basis_matrix, + const NewtonParameters& input_alg_params) +{ + // Initialize logging methods + timer.start(); + alg_params = input_alg_params; + lambda = alg_params.lambda0; + l2_energy = std::make_unique( + Optimization::LogLengthEnergy(initial_marked_metric)); + initialize_logging(); + initialize_logs(); + initialize_checkpoints(); + checkpoint_metric(initial_marked_metric); + + // Get initial metric + std::unique_ptr marked_metric = initial_marked_metric.clone_marked_metric(); + reduced_metric_init = marked_metric->get_reduced_metric_coordinates(); + reduced_metric_coords = reduced_metric_init; + initialize_metric_status_log(*marked_metric); + + // Get initial constraint + update_holonomy_constraint(*marked_metric); + + // Get before-first-iteration information + update_log_error(*marked_metric); + write_log_entries(); + spdlog::info("itr(0) lm({}) max_error({}))", lambda, log.max_error); + + int itr = 0; + while (true) { + // Check termination conditions + if (is_converged()) break; + + // Increment iteration + itr++; + log.num_iter = itr; + + // Compute Newton descent direction + update_descent_direction(*marked_metric, metric_basis_matrix); + + // Checkpoint current state + // WARNING: Must be done after updating descent direction and before line search + checkpoint_direction(); + + // Search for updated metric, constraint, and angles + perform_line_search(initial_marked_metric, *marked_metric); + + // Log current step + log.step_size = lambda; + log.time = timer.getElapsedTime(); + update_log_error(*marked_metric); + write_log_entries(); + checkpoint_metric(*marked_metric); + spdlog::info("itr({}) lm({}) max_error({}))", itr, lambda, log.max_error); + + // Update lambda + update_lambda(); + } + + // Close logging + close_logs(); + metric_status_file.close(); + + // Change metric to final values and restore the original connectivity + marked_metric->change_metric(initial_marked_metric, reduced_metric_coords, true, false); + + return *marked_metric; +} + + +MarkedPennerConeMetric optimize_metric_angles( + const MarkedPennerConeMetric& initial_marked_metric, + const NewtonParameters& alg_params) +{ + // Optimize metric with full metric space (basis is identity) + MatrixX identity = id_matrix(initial_marked_metric.n_reduced_coordinates()); + OptimizeHolonomyNewton solver; + return solver.run(initial_marked_metric, identity, alg_params); +} + +MarkedPennerConeMetric optimize_subspace_metric_angles( + const MarkedPennerConeMetric& initial_marked_metric, + const MatrixX& metric_basis_matrix, + const NewtonParameters& alg_params) +{ + OptimizeHolonomyNewton solver; + return solver.run(initial_marked_metric, metric_basis_matrix, alg_params); +} + +MarkedPennerConeMetric optimize_subspace_metric_angles_log( + const MarkedPennerConeMetric& initial_marked_metric, + const MatrixX& metric_basis_matrix, + const NewtonParameters& alg_params, + NewtonLog& log) +{ + OptimizeHolonomyNewton solver; + MarkedPennerConeMetric marked_metric = + solver.run(initial_marked_metric, metric_basis_matrix, alg_params); + log = solver.get_log(); + return marked_metric; +} + +void view_optimization_state( + const MarkedPennerConeMetric& init_marked_metric, + const MarkedPennerConeMetric& marked_metric, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + std::string mesh_handle, + bool show) +{ + int num_ind_vertices = V.rows(); + if (show) { + spdlog::info("Viewing mesh {} with {} vertices", mesh_handle, num_ind_vertices); + } + auto [V_double, F_mesh, F_halfedge] = generate_doubled_mesh(V, marked_metric, vtx_reindex); + + // get constraint errors + VectorX constraint; + MatrixX J_constraint; + bool need_jacobian = false; + bool only_free_vertices = false; + marked_metric.constraint(constraint, J_constraint, need_jacobian, only_free_vertices); + VectorX scale_coords = best_fit_conformal(init_marked_metric, marked_metric.get_metric_coordinates()); + + // extend vertex angles to full angle map + int num_vertices = marked_metric.n_vertices(); + VectorX angle_constraint(num_vertices); + VectorX scale_distortion(num_vertices); + for (int vi = 0; vi < num_vertices; ++vi) + { + angle_constraint[vi] = constraint[marked_metric.v_rep[vi]]; + scale_distortion[vi] = scale_coords[marked_metric.v_rep[vi]]; + } + +#ifdef ENABLE_VISUALIZATION + polyscope::init(); + if (mesh_handle == "") { + mesh_handle = "optimization state"; + } + polyscope::registerSurfaceMesh(mesh_handle, V_double, F_mesh); + polyscope::getSurfaceMesh(mesh_handle) + ->addVertexScalarQuantity( + "angle error", + convert_scalar_to_double_vector(angle_constraint)) + ->setColorMap("coolwarm") + ->setEnabled(true); + polyscope::getSurfaceMesh(mesh_handle) + ->addVertexScalarQuantity( + "scale distortion", + convert_scalar_to_double_vector(scale_distortion)) + ->setColorMap("coolwarm") + ->setEnabled(true); + if (show) polyscope::show(); +#endif +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/holonomy/rotation_form.cpp b/src/holonomy/holonomy/rotation_form.cpp new file mode 100644 index 0000000..c8b06c9 --- /dev/null +++ b/src/holonomy/holonomy/rotation_form.cpp @@ -0,0 +1,157 @@ +#include "holonomy/holonomy/rotation_form.h" + +#include "holonomy/core/field.h" +#include "holonomy/core/intrinsic_field.h" +#include "holonomy/core/forms.h" + +#include +#include + +namespace Penner { +namespace Holonomy { + +// Dot product of vectors in R3 +template +double dot_prod(const VectorType& v1, const VectorType& v2) +{ + return v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]; +} + +// Cross product of vectors in R3 +template +VectorType cross_prod(const VectorType& v1, const VectorType& v2) +{ + return VectorType( + v1[1] * v2[2] - v1[2] * v2[1], + v1[2] * v2[0] - v1[0] * v2[2], + v1[0] * v2[1] - v1[1] * v2[0]); +} + +// Angle between v1 and v2 around an axis defined by normal +template +double signed_angle(const VectorType& v1, const VectorType& v2, const VectorType& normal) +{ + double s = dot_prod(normal, cross_prod(v1, v2)); + double c = dot_prod(v1, v2); + const double angle = (s == 0 && c == 0) ? 0.0 : atan2(s, c); + return angle; +} + +// Priority function for halfedges +// Assumes that vertex indices are unique +bool has_priority(const Mesh& m, const std::vector& vtx_reindex, int h) +{ + assert(m.to[h] != m.to[m.opp[h]]); + return h < m.opp[h]; + return (vtx_reindex[m.to[h]] < vtx_reindex[m.to[m.opp[h]]]); +} + +// Get vertex in the embedded mesh +int get_projected_vertex(const Mesh& m, int h) +{ + return m.v_rep[m.to[h]]; + // TODO + // if (m.type[h] < 2) return m.to[h]; + // else return m.to[m.opp[m.R[h]]]; +} + +// Get face in the embedded mesh +int get_projected_face(const Mesh& m, int h) +{ + if (m.type[h] < 2) + return m.f[h]; + else + return m.f[m.R[h]]; +} + +// Measure the intrinsic angle between frame field vectors in two faces across an edge +Scalar compute_cross_field_edge_angle( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const Eigen::MatrixXd& R, + const Eigen::MatrixXd& N, + int h) +{ + // Use consistent halfedge for stability + // if (m.type[h] > 1) { FIXME + // return -compute_cross_field_edge_angle(m, vtx_reindex, V, R, N, m.R[h]); + //} + if (!has_priority(m, vtx_reindex, h)) { + return -compute_cross_field_edge_angle(m, vtx_reindex, V, R, N, m.opp[h]); + } + + // Get halfedge direction + int hij = h; + int hji = m.opp[hij]; + int vi = vtx_reindex[get_projected_vertex(m, hji)]; + int vj = vtx_reindex[get_projected_vertex(m, hij)]; + Eigen::Vector3d h_direction = V.row(vj) - V.row(vi); + + // Get frame field directions + int f0 = get_projected_face(m, hij); + int f1 = get_projected_face(m, hji); + Eigen::Vector3d R0 = R.row(f0); + Eigen::Vector3d R1 = R.row(f1); + + // Get signed face normals + // In doubled meshes, the normal is inverted + // double s0 = (m.type[hij] < 2) ? 1.0 : -1.0; + // double s1 = (m.type[hji] < 2) ? 1.0 : -1.0; + double s0 = 1.0; + double s1 = 1.0; + Eigen::Vector3d N0 = s0 * N.row(f0); + Eigen::Vector3d N1 = s1 * N.row(f1); + + // Get angle of rotation across the edge + Scalar d0 = signed_angle(h_direction, R0, N0); + Scalar d1 = signed_angle(h_direction, R1, N1); + Scalar alpha = (2 * M_PI) + (M_PI / 4) + d0 - d1; + + return pos_fmod(double(alpha), M_PI / 2.0) - (M_PI / 4); +} + +VectorX generate_rotation_form_from_cross_field( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& frame_field) +{ + // Compute face normals + Eigen::MatrixXd N; + igl::per_face_normals(V, F, N); + + // Compute rotation form from frame field + int num_halfedges = m.n_halfedges(); + VectorX rotation_form(num_halfedges); + for (int h = 0; h < num_halfedges; ++h) { + rotation_form[h] = compute_cross_field_edge_angle(m, vtx_reindex, V, frame_field, N, h); + } + + assert(is_valid_one_form(m, rotation_form)); + return rotation_form; +} + +VectorX generate_intrinsic_rotation_form(const Mesh& m, const FieldParameters& field_params) +{ + IntrinsicNRosyField field_generator; + field_generator.min_angle = field_params.min_angle; + + return field_generator.run(m); +} + +VectorX generate_intrinsic_rotation_form( + const Mesh& m, + const std::vector& vtx_reindex, + const Eigen::MatrixXd& V, + const FieldParameters& field_params) +{ + IntrinsicNRosyField field_generator; + field_generator.min_angle = field_params.min_angle; + + return field_generator.run_with_viewer(m, vtx_reindex, V); +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/interface.cpp b/src/holonomy/interface.cpp new file mode 100644 index 0000000..1af78e0 --- /dev/null +++ b/src/holonomy/interface.cpp @@ -0,0 +1,619 @@ +#include "holonomy/interface.h" + +#include "util/boundary.h" +#include "holonomy/core/boundary_basis.h" +#include "holonomy/core/homology_basis.h" +#include "holonomy/core/intrinsic_field.h" +#include "holonomy/core/quality.h" +#include "holonomy/holonomy/cones.h" +#include "holonomy/holonomy/rotation_form.h" +#include "holonomy/holonomy/holonomy.h" +#include "holonomy/similarity/energy.h" + +#include "optimization/core/cone_metric.h" +#include "optimization/core/constraint.h" +#include "optimization/parameterization/interpolation.h" +#include "util/io.h" +#include "util/vector.h" +#include "optimization/parameterization/refinement.h" + + +#include + +#include "conformal_ideal_delaunay/ConformalIdealDelaunayMapping.hh" +#include "conformal_ideal_delaunay/ConformalInterface.hh" + +#include "geometrycentral/surface/integer_coordinates_intrinsic_triangulation.h" +#include "geometrycentral/surface/intrinsic_triangulation.h" +#include "geometrycentral/surface/manifold_surface_mesh.h" +#include "geometrycentral/surface/surface_mesh.h" +#include "geometrycentral/surface/surface_mesh_factories.h" + +namespace Penner { +namespace Holonomy { + +std::vector extend_vtx_reindex( + const Mesh& m, + const std::vector& vtx_reindex +) { + return vector_compose(vtx_reindex, m.v_rep); +} + +std::tuple> generate_marked_metric( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& F_uv, + const std::vector& Th_hat, + const VectorX& rotation_form, + std::vector free_cones, + MarkedMetricParameters marked_metric_params) +{ + // Convert VF mesh to halfedge + bool fix_boundary = false; + std::vector vtx_reindex, indep_vtx, dep_vtx, v_rep, bnd_loops; + Mesh m = FV_to_double( + V, + F, + uv, + F_uv, + Th_hat, + vtx_reindex, + indep_vtx, + dep_vtx, + v_rep, + bnd_loops, + free_cones, + fix_boundary); + + // Check for invalid cones + if (!validate_cones(m)) { + spdlog::info("Fixing invalid cones"); + fix_cones(m); + } + + // Use halfedge mesh method + MarkedPennerConeMetric marked_metric = + generate_marked_metric_from_mesh(m, rotation_form, marked_metric_params); + if (marked_metric_params.remove_symmetry) + { + vtx_reindex = extend_vtx_reindex(m, vtx_reindex); + } + + return std::make_tuple(marked_metric, vtx_reindex); +} + +VectorX generate_penner_coordinates(const Mesh& m) { + // Make copy of mesh delaunay + Mesh m_copy = m; + VectorX scale_factors; + scale_factors.setZero(m.n_ind_vertices()); + bool use_ptolemy_flip = false; + DelaunayStats del_stats; + SolveStats solve_stats; + ConformalIdealDelaunay::MakeDelaunay( + m_copy, + scale_factors, + del_stats, + solve_stats, + use_ptolemy_flip); + + // Get flip sequence + const auto& flip_sequence = del_stats.flip_seq; + for (auto iter = flip_sequence.rbegin(); iter != flip_sequence.rend(); + ++iter) { + int flip_index = *iter; + if (flip_index < 0) { + flip_index = -flip_index - 1; + } + m_copy.flip_ccw(flip_index); + m_copy.flip_ccw(flip_index); + m_copy.flip_ccw(flip_index); + } + + // Get metric coordinates from copy + int num_halfedges = m.n_halfedges(); + VectorX metric_coords(num_halfedges); + for (int h = 0; h < num_halfedges; ++h) { + metric_coords[h] = 2.0 * log(m_copy.l[h]); + } + + return metric_coords; +} + +void generate_basis_loops( + const Mesh& m, + std::vector>& basis_loops, + MarkedMetricParameters marked_metric_params) +{ + // (optionally) generate dual loops on the surface + // If the mesh is a trivial torus, don't add constraints + int num_basis_loops = 0; + if (!(marked_metric_params.remove_loop_constraints) && (!is_trivial_torus(m))) { + spdlog::info("Adding holonomy constraints"); + HomologyBasisGenerator holonomy_basis_generator(m, 0, marked_metric_params.weighting); + BoundaryBasisGenerator boundary_basis_generator(m); + int num_homology_basis_loops = holonomy_basis_generator.n_homology_basis_loops(); + int num_basis_boundaries = boundary_basis_generator.n_basis_boundaries(); + spdlog::info( + "Adding {} homology and {} boundary constraints", + num_homology_basis_loops, + num_basis_boundaries); + + // Optionally remove some basis loops + if (marked_metric_params.max_loop_constraints >= 0) { + num_homology_basis_loops = + std::min(num_homology_basis_loops, marked_metric_params.max_loop_constraints); + } + + // Optionally remove some boundary loops + if (marked_metric_params.max_boundary_constraints >= 0) { + num_basis_boundaries = + std::min(num_basis_boundaries, marked_metric_params.max_boundary_constraints); + } + + // Initialize basis list loop and lambda to add loops + basis_loops.reserve(num_basis_loops); + bool use_connectivity = true; + auto add_basis_loop = [&](const std::vector& basis_loop) { + // increment count + num_basis_loops++; + + // Use custom data structure for dual loop tracking + if (use_connectivity) { + basis_loops.push_back(std::make_unique( + DualLoopConnectivity(build_dual_path_from_face_sequence(m, basis_loop)))); + } + // Use simpler list representation + else { + basis_loops.push_back(std::make_unique( + DualLoopList(build_dual_path_from_face_sequence(m, basis_loop)))); + } + }; + + // Add homology basis loops + for (int i = 0; i < num_homology_basis_loops; ++i) { + add_basis_loop(holonomy_basis_generator.construct_homology_basis_loop(i)); + } + + // Add boundary basis loops + for (int i = 0; i < num_basis_boundaries; ++i) { + // TODO Think if need + //add_basis_loop(boundary_basis_generator.construct_boundary_basis_loop(i)); + + add_basis_loop(boundary_basis_generator.construct_boundary_path_basis_loop(i)); + } + } +} + +MarkedPennerConeMetric generate_marked_metric_from_mesh( + const Mesh& _m, + const VectorX& rotation_form, + MarkedMetricParameters marked_metric_params) +{ + // Optionally remove symmetry structure + // TODO: Need to remake cone angles with half values + Mesh m = _m; + int num_halfedges = m.n_halfedges(); + + // Build initial metric and target metric from edge lengths + VectorX scale_factors; + scale_factors.setZero(m.n_ind_vertices()); + bool is_hyperbolic = false; + Optimization::InterpolationMesh interpolation_mesh(m, scale_factors, is_hyperbolic); + + // Get initial log length coordinates + VectorX log_length_coords = interpolation_mesh.get_halfedge_metric_coordinates(); + Optimization::DiscreteMetric discrete_metric(m, log_length_coords); + + // compute basis loops + std::vector> basis_loops; + generate_basis_loops(m, basis_loops, marked_metric_params); + + // Compute the corner angles + VectorX he2angle, he2cot; + Optimization::corner_angles(discrete_metric, he2angle, he2cot); + + // Compute rotation angles along dual loops if loop constraints are needed + int num_basis_loops = basis_loops.size(); + std::vector kappa(num_basis_loops); + for (int i = 0; i < num_basis_loops; ++i) { + // Compute field rotation and metric holonomy + Scalar rotation = compute_dual_loop_rotation(m, rotation_form, *basis_loops[i]); + Scalar holonomy = compute_dual_loop_holonomy(m, he2angle, *basis_loops[i]); + + // Constraint is the difference of the holonomy and rotation + kappa[i] = holonomy - rotation; + spdlog::debug("Holonomy constraint {} is {}", i, kappa[i]); + } + + // optionally make interior vertices free + if (marked_metric_params.free_interior) + { + m.fixed_dof = std::vector(m.n_ind_vertices(), true); + auto bd_vertices = find_boundary_vertices(m); + for (int vi : bd_vertices) + { + m.fixed_dof[m.v_rep[vi]] = false; + } + + // handle trivial interior case + int num_bd_vertices = bd_vertices.size(); + if (num_bd_vertices == m.n_ind_vertices()) + { + m.fixed_dof[0] = true; + } + } + + // optionally remove symmetry + if (marked_metric_params.remove_symmetry) { + m.Th_hat = std::vector(m.n_vertices(), 0.); + m.fixed_dof = std::vector(m.n_vertices(), false); + arange(m.n_vertices(), m.v_rep); + for (int hij = 0; hij < num_halfedges; ++hij) { + m.type[hij] = 0; + //m.R[hij] = 0; + + // split interior cones + m.Th_hat[m.v_rep[m.to[hij]]] = _m.Th_hat[_m.v_rep[_m.to[hij]]] / 2.; + if (_m.type[hij] == 2) + { + m.fixed_dof[m.v_rep[m.to[hij]]] = true; + } else { + m.fixed_dof[m.v_rep[m.to[hij]]] = _m.fixed_dof[_m.v_rep[_m.to[hij]]]; + } + } + + std::vector bd_vertices = find_boundary_vertices(_m); + for (int vi : bd_vertices) + { + m.Th_hat[m.v_rep[vi]] = _m.Th_hat[_m.v_rep[vi]]; + m.fixed_dof[m.v_rep[vi]] = _m.fixed_dof[_m.v_rep[vi]]; + } + } + + + // Build initial metric coordinates + VectorX metric_coords; + if (marked_metric_params.use_initial_zero) { + metric_coords = VectorX::Zero(num_halfedges); + } else if (marked_metric_params.use_log_length) { + metric_coords = log_length_coords; + } else { + metric_coords = generate_penner_coordinates(m); + } + + + return MarkedPennerConeMetric(m, metric_coords, basis_loops, kappa); +} + +std::tuple, std::vector> +generate_mesh( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& F_uv, + const std::vector& Th_hat, + std::vector free_cones) +{ + // Convert VF mesh to halfedge + bool fix_boundary = false; + std::vector vtx_reindex, indep_vtx, dep_vtx, v_rep, bnd_loops; + Mesh m = FV_to_double( + V, + F, + uv, + F_uv, + Th_hat, + vtx_reindex, + indep_vtx, + dep_vtx, + v_rep, + bnd_loops, + free_cones, + fix_boundary); + return std::make_tuple(m, vtx_reindex); +} + +std::tuple, VectorX, std::vector> +infer_marked_metric( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + bool use_intrinsic, + MarkedMetricParameters marked_metric_params) +{ + auto [frame_field, field_Th_hat] = generate_cross_field(V, F); + + // Convert VF mesh to halfedge + std::vector vtx_reindex_mesh, indep_vtx, dep_vtx, v_rep, bnd_loops; + std::vector free_cones(0); + bool fix_boundary = false; + Mesh m = FV_to_double( + V, + F, + V, + F, + field_Th_hat, + vtx_reindex_mesh, + indep_vtx, + dep_vtx, + v_rep, + bnd_loops, + free_cones, + fix_boundary); + + // Generate rotation form and cones + VectorX rotation_form; + if (use_intrinsic) { + FieldParameters field_params; + rotation_form = generate_intrinsic_rotation_form(m, field_params); + } else { + rotation_form = generate_rotation_form_from_cross_field(m, vtx_reindex_mesh, V, F, frame_field); + } + bool has_boundary = bnd_loops.size() >= 1; + std::vector Th_hat = + generate_cones_from_rotation_form(m, vtx_reindex_mesh, rotation_form, has_boundary); + + // Generate marked mesh + auto [marked_metric, vtx_reindex] = + generate_marked_metric(V, F, V, F, Th_hat, rotation_form, free_cones, marked_metric_params); + if (marked_metric_params.remove_symmetry) + { + vtx_reindex = extend_vtx_reindex(m, vtx_reindex); + } + + return std::make_tuple(marked_metric, vtx_reindex, rotation_form, Th_hat); +} + +std::tuple> generate_intrinsic_rotation_form( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const FieldParameters& field_params) +{ + // generate halfedge mesh + std::vector vtx_reindex; + std::vector free_cones(0); + std::vector Th_hat = std::vector(V.rows(), 2 * M_PI); + bool fix_boundary = false; + bool use_discrete_metric = true; + std::unique_ptr cone_metric = + Optimization::generate_initial_mesh( + V, + F, + V, + F, + Th_hat, + vtx_reindex, + free_cones, + fix_boundary, + use_discrete_metric); + + // compute rotation form + VectorX rotation_form = generate_intrinsic_rotation_form(*cone_metric, vtx_reindex, V, field_params); + + // generate cones from the rotation form + bool has_bd = (cone_metric->type[0] != 0); + Th_hat = generate_cones_from_rotation_form( + *cone_metric, + vtx_reindex, + rotation_form, + has_bd); + + return std::make_tuple(rotation_form, Th_hat); +} + +std::tuple> generate_refined_marked_metric( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + double min_angle, + MarkedMetricParameters marked_metric_params) +{ + using namespace geometrycentral; + using namespace geometrycentral::surface; + + // Get input geometry + std::unique_ptr mesh; + std::unique_ptr geometry; + std::tie(mesh, geometry) = makeManifoldSurfaceMeshAndGeometry(V, F); + + // Flip edges to get the intrinsic Delaunay triangulation + std::unique_ptr intTri( + new IntegerCoordinatesIntrinsicTriangulation(*mesh, *geometry)); + + // Make the mesh delaunay and refine + intTri->flipToDelaunay(); + intTri->delaunayRefine(min_angle); + intTri->intrinsicMesh->compress(); + + // Build NOB representation with lengths + int num_halfedges = intTri->intrinsicMesh->nHalfedges(); + HalfedgeData he_indices = intTri->intrinsicMesh->getHalfedgeIndices(); + std::vector next_he(num_halfedges, -1); + std::vector opp(num_halfedges, -1); + std::vector bnd_loops = {}; + std::vector l(num_halfedges, 0.0); + intTri->requireEdgeLengths(); + for (Halfedge he : intTri->intrinsicMesh->halfedges()) { + size_t he_index = he_indices[he]; + next_he[he_index] = he_indices[he.next()]; + opp[he_index] = he_indices[he.twin()]; + l[he_index] = intTri->edgeLengths[he.edge()]; + } + + // Build the connectivity arrays from the NOB arrays + Connectivity C; + NOB_to_connectivity(next_he, opp, bnd_loops, C); + + // Create trivial reflection information + std::vector type(num_halfedges, 0); + std::vector R(num_halfedges, 0); + + // Create a halfedge structure for the mesh + int num_vertices = C.out.size(); + Mesh m; + m.n = C.n; + m.to = C.to; + m.f = C.f; + m.h = C.h; + m.out = C.out; + m.opp = C.opp; + m.type = type; + m.type_input = type; + m.R = R; + m.l = l; + m.Th_hat = std::vector(num_vertices, 2 * M_PI); + m.v_rep = range(0, num_vertices); + m.fixed_dof = std::vector(num_vertices, false); + m.fixed_dof[0] = true; + + // Get rotation form and corresponding cones + FieldParameters field_params; + VectorX rotation_form = generate_intrinsic_rotation_form(m, field_params); + std::vector Th_hat = generate_cones_from_rotation_form(m, rotation_form); + m.Th_hat = Th_hat; + + // Check for invalid cones + if (!validate_cones(m)) { + spdlog::info("Fixing invalid cones"); + fix_cones(m); + } + + // Set cones and check Guass Bonnet + GaussBonnetCheck(m); + + // Get initial marked mesh for optimization + auto marked_metric = generate_marked_metric_from_mesh(m, rotation_form, marked_metric_params); + + return std::make_tuple(marked_metric, rotation_form, Th_hat); +} + +std::tuple> generate_similarity_metric( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXd& uv, + const Eigen::MatrixXi& F_uv, + const std::vector& Th_hat, + const VectorX& rotation_form, + std::vector free_cones, + MarkedMetricParameters marked_metric_params) +{ + // Convert VF mesh to halfedge + bool fix_boundary = false; + std::vector vtx_reindex, indep_vtx, dep_vtx, v_rep, bnd_loops; + Mesh m = FV_to_double( + V, + F, + uv, + F_uv, + Th_hat, + vtx_reindex, + indep_vtx, + dep_vtx, + v_rep, + bnd_loops, + free_cones, + fix_boundary); + + // Use halfedge method with appropriate parameters + SimilarityPennerConeMetric similarity_metric = + generate_similarity_metric_from_mesh(m, rotation_form, marked_metric_params); + + return std::make_tuple(similarity_metric, vtx_reindex); +} + +SimilarityPennerConeMetric generate_similarity_metric_from_mesh( + const Mesh& m, + const VectorX& rotation_form, + MarkedMetricParameters marked_metric_params) +{ + // Generate the base underlying marked metric + MarkedPennerConeMetric marked_metric = + generate_marked_metric_from_mesh(m, rotation_form, marked_metric_params); + + // Use initial zero harmonic form coordinates (corresponding to a metric) + VectorX harmonic_form_coords = VectorX::Zero(marked_metric.n_homology_basis_loops()); + + return SimilarityPennerConeMetric( + m, + marked_metric.get_reduced_metric_coordinates(), + marked_metric.get_homology_basis_loops(), + marked_metric.kappa_hat, + harmonic_form_coords); +} + +void regularize_metric(MarkedPennerConeMetric& marked_metric, double max_triangle_quality) +{ + // Get initial mesh quality + VectorX reduced_metric_coords = marked_metric.get_reduced_metric_coordinates(); + VectorX mesh_quality = compute_mesh_quality(marked_metric); + spdlog::info("Initial quality is {}", mesh_quality.maxCoeff()); + + // Get average + int num_edges = marked_metric.n_edges(); + Scalar average_initial_coord = reduced_metric_coords.mean(); + spdlog::info("Average metric coordinate is {}", average_initial_coord); + + // Regularize + bool changed = false; + while (mesh_quality.maxCoeff() > max_triangle_quality) { + reduced_metric_coords = 0.9 * reduced_metric_coords; + marked_metric.change_metric(marked_metric, reduced_metric_coords); + mesh_quality = compute_mesh_quality(marked_metric); + spdlog::info("Quality is {}", mesh_quality.maxCoeff()); + changed = true; + } + + // Make sure average is unchanged if regularized + if (changed) { + Scalar difference = average_initial_coord - reduced_metric_coords.mean(); + reduced_metric_coords += VectorX::Constant(num_edges, difference); + marked_metric.change_metric(marked_metric, reduced_metric_coords); + mesh_quality = compute_mesh_quality(marked_metric); + spdlog::info("Final quality is {}", mesh_quality.maxCoeff()); + spdlog::info("Final average is {}", reduced_metric_coords.mean()); + } +} + +void optimize_triangle_quality(MarkedPennerConeMetric& marked_metric, double max_triangle_quality) +{ + std::vector flip_seq = {}; + marked_metric.make_discrete_metric(); + flip_seq = marked_metric.get_flip_sequence(); + VectorX mesh_quality = compute_mesh_quality(marked_metric); + for (auto iter = flip_seq.rbegin(); iter != flip_seq.rend(); ++iter) { + int h = *iter; + spdlog::trace("Flipping {} cw", h); + marked_metric.flip_ccw(h, true); + marked_metric.flip_ccw(h, true); + marked_metric.flip_ccw(h, true); + } + spdlog::info("Initial quality is {}", mesh_quality.maxCoeff()); + + // Regularize until quality is sufficiently low + while (mesh_quality.maxCoeff() > max_triangle_quality) { + marked_metric.make_discrete_metric(); + flip_seq = marked_metric.get_flip_sequence(); + mesh_quality = compute_mesh_quality(marked_metric); + spdlog::info("New quality is {}", mesh_quality.maxCoeff()); + + LogTriangleQualityEnergy energy(marked_metric); + // FIXME TriangleQualityEnergy energy(marked_metric); + VectorX gradient = energy.EnergyFunctor::gradient(marked_metric); + spdlog::info("Gradient in range [{}, {}]", gradient.minCoeff(), gradient.maxCoeff()); + gradient /= (gradient.norm() + 1e-10); + VectorX reduced_metric_coords = marked_metric.get_reduced_metric_coordinates(); + marked_metric.change_metric(marked_metric, reduced_metric_coords - gradient, true, false); + + // Undo any flips to make Delaunay + for (auto iter = flip_seq.rbegin(); iter != flip_seq.rend(); ++iter) { + int h = *iter; + spdlog::trace("Flipping {} cw", h); + marked_metric.flip_ccw(h, true); + marked_metric.flip_ccw(h, true); + marked_metric.flip_ccw(h, true); + } + } +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/pybind.cpp b/src/holonomy/pybind.cpp new file mode 100644 index 0000000..972cf4b --- /dev/null +++ b/src/holonomy/pybind.cpp @@ -0,0 +1,128 @@ +#include +#include +#include +#include + +#include "holonomy/interface.h" +#include "holonomy/core/common.h" +#include "holonomy/core/quality.h" +#include "holonomy/holonomy/cones.h" +#include "holonomy/holonomy/marked_penner_cone_metric.h" +#include "holonomy/holonomy/newton.h" +#include "holonomy/holonomy/rotation_form.h" +#include "holonomy/similarity/conformal.h" +#include "holonomy/similarity/energy.h" +#include "holonomy/similarity/layout.h" +#include "holonomy/similarity/similarity_penner_cone_metric.h" +#include "util/boundary.h" + +namespace Penner { +namespace Holonomy { + +#ifdef PYBIND +#ifndef MULTIPRECISION + +// wrap as Python module +PYBIND11_MODULE(holonomy_py, m) +{ + m.doc() = "pybind for optimization module"; + spdlog::set_level(spdlog::level::info); + pybind11::call_guard + default_call_guard; + + pybind11::class_ homotopy_basis_generator(m, "HomotopyBasisGenerator"); + pybind11::enum_(homotopy_basis_generator, "Weighting") + .value("minimal_homotopy", HomotopyBasisGenerator::Weighting::minimal_homotopy) + .value("maximal_homotopy", HomotopyBasisGenerator::Weighting::maximal_homotopy) + .value("dual_min_primal_max", HomotopyBasisGenerator::Weighting::dual_min_primal_max) + .value("primal_min_dual_max", HomotopyBasisGenerator::Weighting::primal_min_dual_max) + .export_values(); + + pybind11::class_>(m, "NewtonParameters") + .def(pybind11::init<>()) + .def_readwrite("output_dir", &NewtonParameters::output_dir) + .def_readwrite("error_log", &NewtonParameters::error_log) + .def_readwrite("reset_lambda", &NewtonParameters::reset_lambda) + .def_readwrite("do_reduction", &NewtonParameters::do_reduction) + .def_readwrite("lambda0", &NewtonParameters::lambda0) + .def_readwrite("error_eps", &NewtonParameters::error_eps) + .def_readwrite("max_itr", &NewtonParameters::max_itr) + .def_readwrite("max_time", &NewtonParameters::max_time) + .def_readwrite("min_lambda", &NewtonParameters::min_lambda) + .def_readwrite("solver", &NewtonParameters::solver) + .def_readwrite("log_level", &NewtonParameters::log_level); + + pybind11::class_>( + m, + "MarkedMetricParameters") + .def(pybind11::init<>()) + .def_readwrite("use_initial_zero", &MarkedMetricParameters::use_initial_zero) + .def_readwrite("remove_loop_constraints", &MarkedMetricParameters::remove_loop_constraints) + .def_readwrite("free_interior", &MarkedMetricParameters::free_interior) + .def_readwrite("weighting", &MarkedMetricParameters::weighting); + + pybind11::class_>(m, "FieldParameters") + .def(pybind11::init<>()) + .def_readwrite("min_angle", &FieldParameters::min_angle); + + pybind11::class_(m, "MarkedPennerConeMetric") + .def_readwrite("kappa_hat", &MarkedPennerConeMetric::kappa_hat) + .def("flip_ccw", &MarkedPennerConeMetric::flip_ccw) + .def("undo_flips", &MarkedPennerConeMetric::undo_flips) + .def("clone_cone_metric", &MarkedPennerConeMetric::clone_cone_metric) + .def("make_discrete_metric", &MarkedPennerConeMetric::make_discrete_metric) + .def("get_flip_sequence", &MarkedPennerConeMetric::get_flip_sequence) + .def("max_constraint_error", &MarkedPennerConeMetric::max_constraint_error) + .def("n_vertices", &MarkedPennerConeMetric::n_vertices) + .def("n_edges", &MarkedPennerConeMetric::n_edges) + .def("n_faces", &MarkedPennerConeMetric::n_faces) + .def("n_homology_basis_loops", &MarkedPennerConeMetric::n_homology_basis_loops); + + pybind11::class_(m, "DualLoop"); + + pybind11::class_(m, "CoordinateEnergy") + .def(pybind11::init>()); + pybind11::class_(m, "IntegratedEnergy") + .def(pybind11::init()); + + m.def("compute_mesh_quality", &compute_mesh_quality, default_call_guard); + m.def("compute_min_angle", &compute_min_angle, default_call_guard); + m.def("fix_cones", &fix_cones, default_call_guard); + m.def("add_optimal_cone_pair", &add_optimal_cone_pair, default_call_guard); + m.def( + "find_boundary_vertices", + pybind11::overload_cast&, const std::vector&>( + &find_boundary_vertices), + default_call_guard); + + m.def("generate_mesh", &generate_mesh, default_call_guard); + m.def("generate_marked_metric", &generate_marked_metric, default_call_guard); + m.def("generate_refined_marked_metric", &generate_refined_marked_metric, default_call_guard); + m.def("generate_similarity_metric", &generate_similarity_metric, default_call_guard); + m.def( + "compute_conformal_similarity_metric", + &compute_conformal_similarity_metric, + default_call_guard); + + m.def("optimize_subspace_metric_angles", &optimize_subspace_metric_angles, default_call_guard); + m.def("optimize_metric_angles", &optimize_metric_angles, default_call_guard); + m.def( + "generate_intrinsic_rotation_form", + pybind11:: + overload_cast( + &generate_intrinsic_rotation_form), + default_call_guard); + + m.def("make_interior_free", &make_interior_free, default_call_guard); + + m.def( + "generate_VF_mesh_from_similarity_metric", + &generate_VF_mesh_from_similarity_metric, + default_call_guard); +} + +#endif +#endif + +} // namespace Holonomy +} // namespace Penner diff --git a/src/holonomy/similarity/conformal.cpp b/src/holonomy/similarity/conformal.cpp new file mode 100644 index 0000000..83f79c5 --- /dev/null +++ b/src/holonomy/similarity/conformal.cpp @@ -0,0 +1,235 @@ +#include "holonomy/similarity/conformal.h" + +#include +#include "holonomy/similarity/constraint.h" +#include "holonomy/holonomy/holonomy.h" + +namespace Penner { +namespace Holonomy { + +// Compute the descent direction for a similarity metric with given angles and angle cotangents +VectorX compute_descent_direction( + const SimilarityPennerConeMetric& similarity_metric, + const VectorX& alpha, + const VectorX& cot_alpha) +{ + // Build constraint system for holonomy constraints in terms of the scaling one form + // Also includes closed one form constraints + Eigen::SparseMatrix J = compute_similarity_constraint_jacobian(similarity_metric, cot_alpha); + VectorX constraint = compute_similarity_constraint(similarity_metric, alpha); + + // Solve for descent direction (in one form edge coordinates) + VectorX descent_direction = solve_linear_system(J, constraint); + + // TODO Determine method to solve for newton decrement and add steepest descent + // weighting if necessary + + return descent_direction; +} + +// Make a line step with direction lambda along the metric's one form descent direction +void line_step_one_form(SimilarityPennerConeMetric& similarity_metric, Scalar lambda) +{ + // Update the metric + VectorX xi0 = similarity_metric.get_one_form(); + VectorX d = similarity_metric.get_one_form_direction(); + VectorX xi = xi0 + lambda * d; + similarity_metric.set_one_form(xi); + + // Make sure the metric remains Delaunay + similarity_metric.make_discrete_metric(); +} + +// Compute the gradient of the closed one form constraint energy given the angles of the metric +void compute_gradient( + const SimilarityPennerConeMetric& similarity_metric, + const VectorX& alpha, + VectorX& gradient) +{ + int n_v = similarity_metric.n_vertices(); + int n_s = similarity_metric.n_homology_basis_loops(); + + // Extract the gradient from the constraint vector + VectorX constraint = compute_similarity_constraint(similarity_metric, alpha); + gradient = constraint.topRows(n_v - 1 + n_s); +} + +// Compute the gradient of the closed one form constraint energy +void compute_gradient(const SimilarityPennerConeMetric& similarity_metric, VectorX& gradient) +{ + // Compute the angles and cotangents of the scaled metric + VectorX alpha, cot_alpha; + similarity_corner_angles(similarity_metric, alpha, cot_alpha); + + // Compute the gradient using the computed angles + compute_gradient(similarity_metric, alpha, gradient); +} + +// Compute the Newton decrement in terms of the reduced vertex and dual loop variables +Scalar compute_newton_decrement( + const SimilarityPennerConeMetric& similarity_metric, + const VectorX& gradient, + const VectorX& descent_direction) +{ + // Convert descent direction to signed halfedge coordinates + VectorX d(similarity_metric.n_halfedges()); + for (int h = 0; h < similarity_metric.n_halfedges(); h++) { + d[h] = similarity_metric.sign(h) * descent_direction[similarity_metric.he2e[h]]; + } + assert(is_closed_one_form(similarity_metric, d)); + + // Get reduced descent direction coefficients + VectorX y = similarity_metric.reduce_one_form(d); + + // Get newton decrement + return gradient.dot(y); +} + +// Perform backtracking line search along the given descent direction, starting from +// step size lambda, and update the metric and its gradient +void line_search( + SimilarityPennerConeMetric& similarity_metric, + VectorX& gradient, + const VectorX& descent_direction, + Scalar& lambda, + bool& bound_norm, + const AlgorithmParameters& alg_params, + const LineSearchParameters& ls_params) +{ + // Convert descent direction to signed halfedge coordinates + VectorX d(similarity_metric.n_halfedges()); + for (int h = 0; h < similarity_metric.n_halfedges(); h++) { + d[h] = similarity_metric.sign(h) * descent_direction[similarity_metric.he2e[h]]; + } + similarity_metric.set_one_form_direction(d); + + // Get reduced descent direction coefficients + VectorX y = similarity_metric.reduce_one_form(d); + + // Line step reduction to avoid nans/infs + if (ls_params.do_reduction) { + while (lambda * (d.maxCoeff() - d.minCoeff()) > 2.5) { + lambda /= 2; + spdlog::info("Reducing lambda to {}", lambda); + } + } + + // Get initial gradient before the line step and its norm + compute_gradient(similarity_metric, gradient); + Scalar l2_g0_sq = gradient.squaredNorm(); + + // Initial line search + line_step_one_form(similarity_metric, lambda); + compute_gradient(similarity_metric, gradient); + Scalar l2_g_sq = gradient.squaredNorm(); // Squared norm of the gradient + Scalar proj_grad = y.dot(gradient); // Projected gradient onto descent direction + + // Backtrack until the gradient norm decreases and the projected gradient is negative + while ((proj_grad > 0) || (l2_g_sq > l2_g0_sq && bound_norm)) { + // Backtrack one step + lambda /= 2; + line_step_one_form(similarity_metric, -lambda); // Backtrack by halved lambda + compute_gradient(similarity_metric, gradient); + + // TODO Line search condition to ensure quadratic convergence + + // Update squared gradient norm and projected gradient + l2_g_sq = gradient.squaredNorm(); + proj_grad = gradient.dot(y); + + // Check if gradient norm is below the threshold to drop the bound + if ((bound_norm) && (lambda <= ls_params.bound_norm_thres)) { + bound_norm = false; + spdlog::debug("Dropping norm bound."); + } + + // Check if lambda is below the termination threshold + if (lambda < alg_params.min_lambda) break; + } + spdlog::debug("Used lambda {} ", lambda); + return; +} + +void compute_conformal_similarity_metric( + SimilarityPennerConeMetric& similarity_metric, + const AlgorithmParameters& alg_params, + const LineSearchParameters& ls_params) +{ + Scalar lambda = ls_params.lambda0; + bool bound_norm = + (ls_params.lambda0 > ls_params.bound_norm_thres); // prevents the grad norm from increasing + if (bound_norm) spdlog::debug("Using norm bound."); + + // Get initial angles + similarity_metric.make_discrete_metric(); + VectorX alpha, cot_alpha, gradient; + similarity_corner_angles(similarity_metric, alpha, cot_alpha); + compute_gradient(similarity_metric, alpha, gradient); + + // Iterate until the gradient has sup norm below a threshold + spdlog::info("itr(0) lm({}) max_error({}))", lambda, gradient.cwiseAbs().maxCoeff()); + int itr = 0; + while (gradient.cwiseAbs().maxCoeff() >= alg_params.error_eps) { + itr++; + + // Compute gradient and descent direction from Hessian (with efficient solver) + // Warning: need to have updated angles + VectorX descent_direction = compute_descent_direction(similarity_metric, alpha, cot_alpha); + + // Terminate if newton decrement sufficiently smalll + Scalar newton_decr = + compute_newton_decrement(similarity_metric, gradient, descent_direction); + + // Alternative termination conditons to error threshold + if (lambda < alg_params.min_lambda) { + spdlog::info("Stopping projection as step size {} too small", lambda); + break; + } + if (itr >= alg_params.max_itr) { + spdlog::info("Stopping projection as reached maximum iteration {}", alg_params.max_itr); + break; + } + if (newton_decr > alg_params.newton_decr_thres) { + spdlog::info("Stopping projection as newton decrement {} large enough", newton_decr); + break; + } + + // Determine initial lambda for line search based on method parameters + if (ls_params.reset_lambda) { + lambda = ls_params.lambda0; + } else { + lambda = std::min(1, 2 * lambda); // adaptive step length + } + + // reset lambda when it goes above norm bound threshold + if ((lambda > ls_params.bound_norm_thres) && (!bound_norm)) { + bound_norm = true; + lambda = ls_params.lambda0; + spdlog::debug("Using norm bound."); + } + + // Search for updated metric, gradient, and angles + line_search( + similarity_metric, + gradient, + descent_direction, + lambda, + bound_norm, + alg_params, + ls_params); + + // Update current angles + similarity_corner_angles(similarity_metric, alpha, cot_alpha); + + // Display current iteration information + spdlog::info( + "itr({}) lm({}) newton_decr({}) max_error({}))", + itr, + lambda, + newton_decr, + gradient.cwiseAbs().maxCoeff()); + } +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/similarity/constraint.cpp b/src/holonomy/similarity/constraint.cpp new file mode 100644 index 0000000..f6c9b07 --- /dev/null +++ b/src/holonomy/similarity/constraint.cpp @@ -0,0 +1,242 @@ +#include "holonomy/similarity/constraint.h" + +#include "optimization/core/constraint.h" +#include "holonomy/holonomy/holonomy.h" +#include "holonomy/holonomy/constraint.h" + +namespace Penner { +namespace Holonomy { + +VectorX compute_similarity_constraint( + const SimilarityPennerConeMetric& similarity_metric, + const VectorX& angles) +{ + VectorX constraint; + int n_f = similarity_metric.n_faces(); + int n_s = similarity_metric.n_homology_basis_loops(); + + std::vector v_map; + int num_vertex_forms; + Optimization::build_free_vertex_map(similarity_metric, v_map, num_vertex_forms); + + // Initialize the constraint + constraint.setZero(num_vertex_forms + n_s + n_f - 1); + constraint.head(num_vertex_forms + n_s) = compute_metric_constraint(similarity_metric, angles); + + return constraint; +} + +// The Jacobian of the constraints +MatrixX compute_one_form_constraint_jacobian( + const SimilarityPennerConeMetric& similarity_metric, + const VectorX& cotangents) +{ + int n_f = similarity_metric.n_faces(); + int n_e = similarity_metric.n_edges(); + int n_h = similarity_metric.n_halfedges(); + int n_s = similarity_metric.n_homology_basis_loops(); + + typedef Eigen::Triplet Trip; + std::vector trips(0); + trips.reserve(3 * n_h + 3 * n_f); + + // Get free vertex map + std::vector v_rep; + int num_vertex_forms; + Optimization::build_free_vertex_rep(similarity_metric, v_rep, num_vertex_forms); + + // Add entries for vertex angles constraints + for (int h = 0; h < n_h; h++) { + int v0 = similarity_metric.v0(h); + if (v_rep[v0] >= 0) { + trips.push_back(Trip( + v_rep[v0], + similarity_metric.he2e[h], + similarity_metric.sign(h) * 0.5 * cotangents[h])); + } + + int v1 = similarity_metric.v1(h); + if (v_rep[v1] >= 0) { + trips.push_back(Trip( + v_rep[v1], + similarity_metric.he2e[h], + -similarity_metric.sign(h) * 0.5 * cotangents[h])); + } + } + + // Add entries for holonomy constraints + const auto& homology_basis_loops = similarity_metric.get_homology_basis_loops(); + for (int s = 0; s < n_s; ++s) { + for (const auto& dual_segment : *homology_basis_loops[s]) { + int h = dual_segment[0]; // Get just one halfedge of the path + int ho = similarity_metric.opp[h]; + Scalar val = similarity_metric.sign(h) * 0.5 * (cotangents[h] + cotangents[ho]); + int i = num_vertex_forms + s; + int j = similarity_metric.he2e[h]; + trips.push_back(Trip(i, j, val)); + spdlog::trace("Adding constraint ({}, {}, {})", i, j, val); + } + } + + // Add closed 1-form constraints + for (int f = 0; f < n_f - 1; f++) { + int hi = similarity_metric.h[f]; + int hj = similarity_metric.n[hi]; + int hk = similarity_metric.n[hj]; + int ei = similarity_metric.he2e[hi]; + int ej = similarity_metric.he2e[hj]; + int ek = similarity_metric.he2e[hk]; + trips.push_back(Trip(num_vertex_forms + n_s + f, ei, similarity_metric.sign(hi))); + trips.push_back(Trip(num_vertex_forms + n_s + f, ej, similarity_metric.sign(hj))); + trips.push_back(Trip(num_vertex_forms + n_s + f, ek, similarity_metric.sign(hk))); + } + + + assert((num_vertex_forms + n_s + n_f - 1) == n_e); + Eigen::SparseMatrix J(n_e, n_e); + J.reserve(trips.size()); + J.setFromTriplets(trips.begin(), trips.end()); + return J; +} + +// Matrix to expand per-edge one form to per-halfedge one form +MatrixX compute_one_form_expansion_matrix( + const DifferentiableConeMetric& cone_metric) +{ + int n_h = cone_metric.n_halfedges(); + int n_e = cone_metric.n_edges(); + typedef Eigen::Triplet Trip; + std::vector trips(0); + trips.reserve(n_h); + + // Add entries + for (int h = 0; h < n_h; h++) { + int e = cone_metric.he2e[h]; + trips.push_back(Trip(h, e, cone_metric.sign(h))); + } + + Eigen::SparseMatrix M(n_h, n_e); + M.reserve(trips.size()); + M.setFromTriplets(trips.begin(), trips.end()); + return M; +} + +// Matrix to reduce per-halfedge one form to per-edge one form +MatrixX compute_one_form_reduction_matrix( + const DifferentiableConeMetric& cone_metric) +{ + int n_h = cone_metric.n_halfedges(); + int n_e = cone_metric.n_edges(); + typedef Eigen::Triplet Trip; + std::vector trips(0); + trips.reserve(n_h); + + // Add entries + for (int e = 0; e < n_e; e++) { + int h = cone_metric.e2he[e]; + trips.push_back(Trip(e, h, cone_metric.sign(h))); + // trips.push_back(Trip(e, h, 1.0)); + } + + Eigen::SparseMatrix M(n_e, n_h); + M.reserve(trips.size()); + M.setFromTriplets(trips.begin(), trips.end()); + return M; +} + +MatrixX compute_similarity_constraint_jacobian( + const SimilarityPennerConeMetric& similarity_metric, + const VectorX& cotangents) +{ + // Get vertex representation + int num_vertices = similarity_metric.n_ind_vertices(); + + // Get component matrices + MatrixX J_constraint_0 = compute_metric_constraint_jacobian(similarity_metric, cotangents); + + MatrixX J_constraint_one_form = + compute_one_form_constraint_jacobian(similarity_metric, cotangents); + MatrixX M_one_form_reduction = compute_one_form_reduction_matrix(similarity_metric); + MatrixX M_dual_loop_basis_one_form = build_dual_loop_basis_one_form_matrix( + similarity_metric, + similarity_metric.get_homology_basis_loops()); + MatrixX J_constraint_1 = + -J_constraint_one_form * (M_one_form_reduction * M_dual_loop_basis_one_form); + + // Combine matrices and remove closed one form constraint + typedef Eigen::Triplet Trip; + std::vector trips(0); + int num_metric_coords = J_constraint_0.cols(); + int num_form_coords = similarity_metric.n_homology_basis_loops(); + int num_coords = num_metric_coords + num_form_coords; + int num_constraints = num_vertices + num_form_coords - 1; + trips.reserve(3 * similarity_metric.n_halfedges()); + for (int k = 0; k < J_constraint_0.outerSize(); ++k) { + for (MatrixX::InnerIterator it(J_constraint_0, k); it; ++it) { + assert(it.row() < num_form_coords + num_vertices - 1); + assert(it.col() < num_metric_coords); + trips.push_back(Trip(it.row(), it.col(), it.value())); + } + } + for (int k = 0; k < J_constraint_1.outerSize(); ++k) { + for (MatrixX::InnerIterator it(J_constraint_1, k); it; ++it) { + assert(it.col() < num_form_coords); + if (it.row() >= num_constraints) continue; // Skip closed form constraints + trips.push_back(Trip(it.row(), it.col() + num_metric_coords, it.value())); + } + } + + Eigen::SparseMatrix J_constraint(num_constraints, num_coords); + J_constraint.reserve(trips.size()); + J_constraint.setFromTriplets(trips.begin(), trips.end()); + return J_constraint; +} + +// Helper function to compute similarity constraint assuming a discrete metric +void similarity_constraint_with_jacobian_helper( + const SimilarityPennerConeMetric& similarity_metric, + VectorX& constraint, + MatrixX& J_constraint, + bool need_jacobian) +{ + // Get angles and cotangent of angles of faces opposite halfedges + VectorX he2angle; + VectorX cotangents; + similarity_metric.get_corner_angles(he2angle, cotangents); + + // Compute constraint and (optionally) the Jacobian + int num_vertices = similarity_metric.n_ind_vertices(); + int num_form_coords = similarity_metric.n_homology_basis_loops(); + constraint = compute_similarity_constraint(similarity_metric, he2angle); + constraint = constraint.head(num_vertices + num_form_coords - 1); + if (need_jacobian) { + J_constraint = compute_similarity_constraint_jacobian(similarity_metric, cotangents); + } +} + +void compute_similarity_constraint_with_jacobian( + const SimilarityPennerConeMetric& similarity_metric, + VectorX& constraint, + MatrixX& J_constraint, + bool need_jacobian) +{ + // Ensure current cone metric coordinates are log lengths + if (similarity_metric.is_discrete_metric()) { + similarity_constraint_with_jacobian_helper( + similarity_metric, + constraint, + J_constraint, + need_jacobian); + } else { + SimilarityPennerConeMetric similarity_metric_copy = similarity_metric; + similarity_metric_copy.make_discrete_metric(); + similarity_constraint_with_jacobian_helper( + similarity_metric_copy, + constraint, + J_constraint, + need_jacobian); + } +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/similarity/energy.cpp b/src/holonomy/similarity/energy.cpp new file mode 100644 index 0000000..2e7770a --- /dev/null +++ b/src/holonomy/similarity/energy.cpp @@ -0,0 +1,369 @@ +#include "holonomy/similarity/energy.h" + +#include "holonomy/core/forms.h" + +namespace Penner { +namespace Holonomy { + +JumpEnergy::JumpEnergy(const Mesh& m) + : m_opp(m.opp) +{} + +Scalar JumpEnergy::energy(const VectorX& metric_coords) const +{ + int num_halfedges = m_opp.size(); + assert(metric_coords.size() == num_halfedges); + Scalar energy = 0; + for (int h = 0; h < num_halfedges; ++h) { + Scalar jump = metric_coords[h] - metric_coords[m_opp[h]]; + energy += jump * jump; + } + + return 0.25 * energy; +} + +VectorX JumpEnergy::gradient(const VectorX& metric_coords) const +{ + int num_halfedges = m_opp.size(); + assert(metric_coords.size() == num_halfedges); + VectorX gradient(num_halfedges); + for (int h = 0; h < num_halfedges; ++h) { + Scalar jump = metric_coords[h] - metric_coords[m_opp[h]]; + gradient[h] = jump; + } + + return gradient; +} + +MatrixX JumpEnergy::hessian(const VectorX& metric_coords) const +{ + throw std::runtime_error("No hessian defined"); + return id_matrix(metric_coords.size()); +} + +MatrixX JumpEnergy::hessian_inverse(const VectorX& metric_coords) const +{ + throw std::runtime_error("No hessian defined"); + return id_matrix(metric_coords.size()); +} + +CoordinateEnergy::CoordinateEnergy( + const DifferentiableConeMetric& target_cone_metric, + std::vector coordinate_indices) + : m_metric_target(target_cone_metric.get_reduced_metric_coordinates()) + , m_coordinate_indices(coordinate_indices) +{} + +Scalar CoordinateEnergy::energy(const VectorX& metric_coords) const +{ + Scalar energy = 0; + for (const auto i : m_coordinate_indices) { + Scalar coord_diff = metric_coords[i] - m_metric_target[i]; + energy += coord_diff * coord_diff; + } + + return 0.5 * energy; +} + +VectorX CoordinateEnergy::gradient(const VectorX& metric_coords) const +{ + int num_coordinates = metric_coords.size(); + VectorX gradient; + gradient.setZero(num_coordinates); + for (const auto i : m_coordinate_indices) { + Scalar coord_diff = metric_coords[i] - m_metric_target[i]; + gradient[i] = coord_diff; + } + + return gradient; +} + +MatrixX CoordinateEnergy::hessian(const VectorX& metric_coords) const +{ + throw std::runtime_error("No hessian defined"); + return id_matrix(metric_coords.size()); +} + +MatrixX CoordinateEnergy::hessian_inverse(const VectorX& metric_coords) const +{ + throw std::runtime_error("No hessian defined"); + return id_matrix(metric_coords.size()); +} + +IntegratedEnergy::IntegratedEnergy(const SimilarityPennerConeMetric& target_similarity_metric) + : m_target_similarity_metric(target_similarity_metric) +{ + // Integrate the scaling form on the target metric + std::vector cut_h, is_cut_h; + MatrixX one_form_matrix = build_dual_loop_basis_one_form_matrix( + target_similarity_metric, + target_similarity_metric.get_homology_basis_loops()); + MatrixX integral_matrix = + build_one_form_integral_matrix(target_similarity_metric, cut_h, is_cut_h); + MatrixX integrated_scaling_matrix = + build_integrated_one_form_scaling_matrix(target_similarity_metric); + + // Get metric expansion matrix + MatrixX identification, projection; + std::vector he2e, e2he, proj, embed; + build_edge_maps(target_similarity_metric, he2e, e2he); + build_refl_proj(target_similarity_metric, he2e, e2he, proj, embed); + identification = build_edge_matrix(he2e, e2he); + projection = build_refl_matrix(proj, embed); + + // Build energy matrices + m_scaling_matrix = integrated_scaling_matrix * (integral_matrix * one_form_matrix); + m_expansion_matrix = identification * projection; + m_metric_target = m_target_similarity_metric.get_metric_coordinates(); + Axx = m_expansion_matrix.transpose() * m_expansion_matrix; + Axy = m_expansion_matrix.transpose() * m_scaling_matrix; + Ayx = m_scaling_matrix.transpose() * m_expansion_matrix; + Ayy = m_scaling_matrix.transpose() * m_scaling_matrix; + bx = -m_expansion_matrix.transpose() * m_metric_target; + by = -m_scaling_matrix.transpose() * m_metric_target; +} + +Scalar IntegratedEnergy::energy(const VectorX& metric_coords) const +{ + // Separate metric and one-form coordinates + VectorX reduced_length_coords, harmonic_form_coords; + m_target_similarity_metric.separate_coordinates( + metric_coords, + reduced_length_coords, + harmonic_form_coords); + + // Compute the integrated metric coordinates + VectorX integrated_metric_coords = + m_expansion_matrix * reduced_length_coords + m_scaling_matrix * harmonic_form_coords; + VectorX difference = integrated_metric_coords - m_metric_target; + + return 0.5 * difference.squaredNorm(); +} + +VectorX IntegratedEnergy::gradient(const VectorX& metric_coords) const +{ + // Separate metric and one-form coordinates and relabel to x and y + VectorX reduced_length_coords, harmonic_form_coords; + m_target_similarity_metric.separate_coordinates( + metric_coords, + reduced_length_coords, + harmonic_form_coords); + const VectorX& x = reduced_length_coords; + const VectorX& y = harmonic_form_coords; + + // Compute the gradient in two parts + VectorX gradient(metric_coords.size()); + + // Compute the metric part of the gradient from precomputed matrices + int num_length_coordinates = reduced_length_coords.size(); + gradient.head(num_length_coordinates) = Axx * x + Axy * y + bx; + + // Compute the one-form part of the gradient from precomputed matrices + int num_form_coordinates = harmonic_form_coords.size(); + gradient.tail(num_form_coordinates) = Ayx * x + Ayy * y + by; + + return gradient; +} + +MatrixX IntegratedEnergy::hessian(const VectorX& metric_coords) const +{ + throw std::runtime_error("No hessian defined"); + return id_matrix(metric_coords.size()); +} + +MatrixX IntegratedEnergy::hessian_inverse(const VectorX& metric_coords) const +{ + throw std::runtime_error("No hessian defined"); + return id_matrix(metric_coords.size()); +} + +// Utility power 2 function +Scalar pow2(Scalar x) +{ + return x * x; +} + +TriangleQualityEnergy::TriangleQualityEnergy(const MarkedPennerConeMetric& target_marked_metric) + : m_target_marked_metric(target_marked_metric) +{ } + +Scalar TriangleQualityEnergy::energy(const VectorX& metric_coords) const +{ + if (metric_coords.size() == 0) return 0; + const auto& n = m_target_marked_metric.n; + + // Build edge maps + std::vector he2e, e2he, proj, embed; + build_edge_maps(m_target_marked_metric, he2e, e2he); + build_refl_proj(m_target_marked_metric, he2e, e2he, proj, embed); + + // Compute embedded edge lengths + int num_halfedges = he2e.size(); + VectorX l(num_halfedges); + for (int h = 0; h < num_halfedges; ++h) { + l[h] = exp(metric_coords[proj[he2e[h]]] / 2.0); + } + + int num_faces = m_target_marked_metric.n_faces(); + Scalar energy = 0; + for (int f = 0; f < num_faces; ++f) { + // Get face halfedges + int hij = m_target_marked_metric.h[f]; + int hjk = n[hij]; + int hki = n[hjk]; + + // Compute ratio of inradius to outradius for face + Scalar numer = 2*l[hij]*l[hjk]*l[hki]; + Scalar denom = ((-l[hij] + l[hjk] + l[hki])*(l[hij] - l[hjk] + l[hki])*(l[hij] + l[hjk] - l[hki])); + energy += numer / denom; + } + + return energy; +} + +VectorX TriangleQualityEnergy::gradient(const VectorX& metric_coords) const +{ + const auto& n = m_target_marked_metric.n; + + // Build edge maps + std::vector he2e, e2he, proj, embed; + build_edge_maps(m_target_marked_metric, he2e, e2he); + build_refl_proj(m_target_marked_metric, he2e, e2he, proj, embed); + + // Compute embedded edge lengths + int num_halfedges = he2e.size(); + VectorX l(num_halfedges); + for (int h = 0; h < num_halfedges; ++h) { + l[h] = exp(metric_coords[proj[he2e[h]]] / 2.0); + } + + // Compute gradient by edge iteration + VectorX gradient; + int num_edges = m_target_marked_metric.n_edges(); + gradient.setZero(metric_coords.size()); + for (int e = 0; e < num_edges; ++e) { + int h = m_target_marked_metric.e2he[e]; + + // Gradient has components for both halfedges of the edge + for (int hij : {h, m_target_marked_metric.opp[h]}) { + // Get other halfedges in face + int hjk = n[hij]; + int hki = n[hjk]; + + // Gradient computed by symbolic computation + Scalar numer = 2 * l[hjk] * l[hki] * + (-l[hij] * (-l[hij] + l[hjk] + l[hki]) * (l[hij] - l[hjk] + l[hki]) - + l[hij] * (-l[hij] + l[hjk] + l[hki]) * (l[hij] + l[hjk] - l[hki]) + + l[hij] * (l[hij] - l[hjk] + l[hki]) * (l[hij] + l[hjk] - l[hki]) + + (-l[hij] + l[hjk] + l[hki]) * (l[hij] - l[hjk] + l[hki]) * + (l[hij] + l[hjk] - l[hki])); + Scalar denom = + (pow2(-l[hij] + l[hjk] + l[hki]) * pow2(l[hij] - l[hjk] + l[hki]) * + pow2(l[hij] + l[hjk] - l[hki])); + + gradient[e] += numer / denom; + } + } + + return gradient; +} + +MatrixX TriangleQualityEnergy::hessian(const VectorX& metric_coords) const +{ + throw std::runtime_error("No hessian defined"); + return id_matrix(metric_coords.size()); +} + +MatrixX TriangleQualityEnergy::hessian_inverse(const VectorX& metric_coords) const +{ + throw std::runtime_error("No hessian defined"); + return id_matrix(metric_coords.size()); +} + +LogTriangleQualityEnergy::LogTriangleQualityEnergy(const MarkedPennerConeMetric& target_marked_metric) + : m_target_marked_metric(target_marked_metric) +{ } + +Scalar LogTriangleQualityEnergy::energy(const VectorX& metric_coords) const +{ + const auto& n = m_target_marked_metric.n; + + // Build edge maps + std::vector he2e, e2he, proj, embed; + build_edge_maps(m_target_marked_metric, he2e, e2he); + build_refl_proj(m_target_marked_metric, he2e, e2he, proj, embed); + + // Get sum of face energies + int num_faces = m_target_marked_metric.n_faces(); + Scalar energy = 0; + for (int f = 0; f < num_faces; ++f) { + // Get halfedges of face + int hij = m_target_marked_metric.h[f]; + int hjk = n[hij]; + int hki = n[hjk]; + + // Get log penner coordinates of face + Scalar llij = metric_coords[proj[he2e[hij]]]; + Scalar lljk = metric_coords[proj[he2e[hjk]]]; + Scalar llki = metric_coords[proj[he2e[hki]]]; + + // Compute sum of squared coordinate differences + energy += pow2((-2 * llij) + lljk + llki); + energy += pow2(llij - (2 * lljk) + llki); + energy += pow2(llij + lljk - (2 * llki)); + } + + return 0.5 * energy; +} + +VectorX LogTriangleQualityEnergy::gradient(const VectorX& metric_coords) const +{ + const auto& n = m_target_marked_metric.n; + + // Build edge maps + std::vector he2e, e2he, proj, embed; + build_edge_maps(m_target_marked_metric, he2e, e2he); + build_refl_proj(m_target_marked_metric, he2e, e2he, proj, embed); + + // Compute gradient by edge iteration + VectorX gradient; + int num_edges = m_target_marked_metric.n_edges(); + gradient.setZero(metric_coords.size()); + for (int e = 0; e < num_edges; ++e) { + int h = m_target_marked_metric.e2he[e]; + + // Gradient has components for both halfedges of the edge + for (int hij : {h, m_target_marked_metric.opp[h]}) { + // Get other halfedges in face + int hjk = n[hij]; + int hki = n[hjk]; + + // Get log edge lengths of face + Scalar llij = metric_coords[proj[he2e[hij]]]; + Scalar lljk = metric_coords[proj[he2e[hjk]]]; + Scalar llki = metric_coords[proj[he2e[hki]]]; + + // Compute gradients for each term containing the edge coordinate + gradient[e] += -2 * ((-2 * llij) + lljk + llki); + gradient[e] += (llij - (2 * lljk) + llki); + gradient[e] += (llij + lljk - (2 * llki)); + } + } + + return gradient; +} + +MatrixX LogTriangleQualityEnergy::hessian(const VectorX& metric_coords) const +{ + throw std::runtime_error("No hessian defined"); + return id_matrix(metric_coords.size()); +} + +MatrixX LogTriangleQualityEnergy::hessian_inverse(const VectorX& metric_coords) const +{ + throw std::runtime_error("No hessian defined"); + return id_matrix(metric_coords.size()); +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/similarity/layout.cpp b/src/holonomy/similarity/layout.cpp new file mode 100644 index 0000000..252ea5b --- /dev/null +++ b/src/holonomy/similarity/layout.cpp @@ -0,0 +1,191 @@ +#include "holonomy/similarity/layout.h" + +#include "optimization/parameterization/layout.h" +#include "optimization/parameterization/translation.h" +#include "conformal_ideal_delaunay/ConformalInterface.hh" + +namespace Penner { +namespace Holonomy { + +// Use interpolation method that also tracks the flips in the marked metric +// TODO Replace with method in Penner code that tracks flip sequence +void interpolate_penner_coordinates( + const Mesh& mesh, + const SimilarityPennerConeMetric& initial_marked_metric, + SimilarityPennerConeMetric& marked_metric, + Optimization::InterpolationMesh& interpolation_mesh, + Optimization::InterpolationMesh& reverse_interpolation_mesh) +{ + marked_metric = initial_marked_metric; + + // Get forward Euclidean interpolation mesh + VectorX trivial_scale_factors; + trivial_scale_factors.setZero(mesh.n_ind_vertices()); + bool is_hyperbolic = false; + interpolation_mesh = + Optimization::InterpolationMesh(mesh, trivial_scale_factors, is_hyperbolic); + + // Get initial reflection structure + Mesh& mc = interpolation_mesh.get_mesh(); + std::vector initial_type = mc.type; + std::vector initial_R = mc.R; + + spdlog::trace("Making surface Delaunay"); + std::vector euclidean_flip_sequence; + interpolation_mesh.convert_to_delaunay_hyperbolic_surface(euclidean_flip_sequence); + VectorX initial_metric_coords = interpolation_mesh.get_halfedge_metric_coordinates(); + + // Get Euclidean Delaunay reflection structure + std::vector eucl_del_type = mc.type; + std::vector eucl_del_R = mc.R; + + // Copy flip sequence with ptolemy flips and new metric to get metric coordinates + spdlog::trace("Getting flipped metric coordinates"); + for (const auto& h : euclidean_flip_sequence) { + marked_metric.flip_ccw(-h - 1); + } + VectorX flipped_metric_coords = marked_metric.get_metric_coordinates(); + + // Compute translations for reparametrization + VectorX translations; + Optimization::compute_as_symmetric_as_possible_translations( + mc, + flipped_metric_coords, + initial_metric_coords, + translations); + SPDLOG_INFO("Translations in range [{}, {}]", translations.minCoeff(), translations.maxCoeff()); + + // Change the metric and reparameterize + spdlog::trace("Changing underlying metric"); + interpolation_mesh.change_hyperbolic_surface_metric( + flipped_metric_coords, + trivial_scale_factors, + translations); + + // Make delaunay with new metric + spdlog::trace("Making new surface Delaunay"); + std::vector flip_sequence; + marked_metric.make_delaunay(flip_sequence); + interpolation_mesh.follow_flip_sequence(flip_sequence); + + // Build a clean overlay mesh with the final metric + spdlog::trace("Building reverse interpolation mesh"); + Mesh m_layout = interpolation_mesh.get_mesh(); + is_hyperbolic = true; + reverse_interpolation_mesh = + Optimization::InterpolationMesh(m_layout, trivial_scale_factors, is_hyperbolic); + + // Undo the flips to make the hyperbolic surface with new metric Delaunay + reverse_interpolation_mesh.reverse_flip_sequence(flip_sequence); + reverse_interpolation_mesh.get_mesh().type = eucl_del_type; + reverse_interpolation_mesh.get_mesh().R = eucl_del_R; + + // Undo the reparametrization + VectorX inverse_translations = -translations; + reverse_interpolation_mesh.change_hyperbolic_surface_metric( + initial_metric_coords, + trivial_scale_factors, + inverse_translations); + + // Generate reverse map for Euclidean flips + reverse_interpolation_mesh.force_convert_to_euclidean_surface(); + reverse_interpolation_mesh.reverse_flip_sequence(euclidean_flip_sequence); + reverse_interpolation_mesh.get_mesh().type = initial_type; + reverse_interpolation_mesh.get_mesh().R = initial_R; +} + +std:: + tuple< + OverlayMesh, // m_o + Eigen::MatrixXd, // V_o + Eigen::MatrixXi, // F_o + Eigen::MatrixXd, // uv_o + Eigen::MatrixXi, // FT_o + std::vector, // is_cut_h + std::vector, // is_cut_o + std::vector, // Fn_to_F + std::vector> // endpoints_o + > + generate_VF_mesh_from_similarity_metric( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const std::vector& Th_hat, + const SimilarityPennerConeMetric& initial_similarity_metric, + std::vector cut_h) +{ + // Get mesh with vertex reindexing + std::vector vtx_reindex, indep_vtx, dep_vtx, v_rep, bnd_loops; + Mesh m = + FV_to_double(V, F, V, F, Th_hat, vtx_reindex, indep_vtx, dep_vtx, v_rep, bnd_loops); + + // Find boundary halfedges + std::vector is_bd(m.n_ind_vertices(), false); + for (int i = 0; i < m.n_halfedges(); i++) { + if ((m.type[i] == 1) && (m.type[m.opp[i]] == 2)) + { + is_bd[m.v_rep[m.to[i]]] = true; + } + } + + // Compute interpolation overlay mesh + // TODO: Use consistent interpolation code from the Penner codebase + Eigen::MatrixXd V_overlay; + Optimization::InterpolationMesh interpolation_mesh, reverse_interpolation_mesh; + SimilarityPennerConeMetric similarity_metric = initial_similarity_metric; + spdlog::trace("Interpolating penner coordinates"); + interpolate_penner_coordinates( + m, + initial_similarity_metric, + similarity_metric, + interpolation_mesh, + reverse_interpolation_mesh); + spdlog::trace("Interpolating vertex positions"); + interpolate_vertex_positions( + V, + vtx_reindex, + interpolation_mesh, + reverse_interpolation_mesh, + V_overlay); + OverlayMesh m_o = interpolation_mesh.get_overlay_mesh(); + + // Scale the overlay mesh and make tufted + auto [metric_coords, u_integral, is_cut_integral] = + similarity_metric.get_integrated_metric_coordinates(); + Mesh& mc = m_o.cmesh(); + for (int h = 0; h < metric_coords.size(); ++h) { + mc.l[h] = exp(metric_coords[h] / 2.0); + } + Optimization::make_tufted_overlay(m_o); + + // Get endpoints + std::vector> endpoints; + find_origin_endpoints(m_o, endpoints); + + // Convert overlay mesh to transposed vector format + std::vector> V_overlay_vec(3); + for (int i = 0; i < 3; ++i) { + V_overlay_vec[i].resize(V_overlay.rows()); + for (int j = 0; j < V_overlay.rows(); ++j) { + V_overlay_vec[i][j] = V_overlay(j, i); + } + } + + // Get layout topology from original mesh + std::vector is_cut = Optimization::compute_layout_topology(m, cut_h); + + // Convert overlay mesh to VL format + spdlog::trace("Getting layout"); + std::vector u(m.n_ind_vertices(), 0.0); + return Optimization::consistent_overlay_mesh_to_VL( + m_o, + vtx_reindex, + is_bd, + u, + V_overlay_vec, + endpoints, + is_cut, + is_cut_integral); +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/holonomy/similarity/similarity_penner_cone_metric.cpp b/src/holonomy/similarity/similarity_penner_cone_metric.cpp new file mode 100644 index 0000000..55ff2e8 --- /dev/null +++ b/src/holonomy/similarity/similarity_penner_cone_metric.cpp @@ -0,0 +1,447 @@ +#include "holonomy/similarity/similarity_penner_cone_metric.h" + +#include "holonomy/similarity/conformal.h" +#include "holonomy/similarity/constraint.h" +#include "holonomy/holonomy/holonomy.h" + +#include + +#include "optimization/core/area.h" +#include "optimization/core/constraint.h" + +namespace Penner { +namespace Holonomy { + +void similarity_corner_angles( + const SimilarityPennerConeMetric& similarity_metric, + VectorX& he2angle, + VectorX& he2cot) +{ + int num_halfedges = similarity_metric.n_halfedges(); + he2angle.setZero(num_halfedges); + he2cot.setZero(num_halfedges); + const Scalar cot_infty = 1e10; + + // Get integrated metric coordinates + auto [metric_coords, u, is_cut_h] = similarity_metric.get_integrated_metric_coordinates(); + + // Compute maps from halfedges to opposite angles and cotangents of opposite + // angles + int num_faces = similarity_metric.h.size(); + for (int f = 0; f < num_faces; f++) { + // Halfedges of face f + int hi = similarity_metric.h[f]; + int hj = similarity_metric.n[hi]; + int hk = similarity_metric.n[hj]; + + // Lengths of the halfedges + Scalar li = exp(metric_coords[hi] / 2.0); + Scalar lj = exp(metric_coords[hj] / 2.0); + Scalar lk = exp(metric_coords[hk] / 2.0); + + // Compute the cotangent of the angles + Scalar Aijk4 = 4 * sqrt(std::max(Optimization::squared_area(li, lj, lk), 0.0)); + Scalar Ijk = (-li * li + lj * lj + lk * lk); + Scalar iJk = (li * li - lj * lj + lk * lk); + Scalar ijK = (li * li + lj * lj - lk * lk); + he2cot[hi] = Aijk4 == 0.0 ? copysign(cot_infty, Ijk) : (Ijk / Aijk4); + he2cot[hj] = Aijk4 == 0.0 ? copysign(cot_infty, iJk) : (iJk / Aijk4); + he2cot[hk] = Aijk4 == 0.0 ? copysign(cot_infty, ijK) : (ijK / Aijk4); + +#define USE_ACOS_HOLONOMY +#ifdef USE_ACOS_HOLONOMY + he2angle[hi] = acos(std::min(std::max(Ijk / (2.0 * lj * lk), -1.0), 1.0)); + he2angle[hj] = acos(std::min(std::max(iJk / (2.0 * lk * li), -1.0), 1.0)); + he2angle[hk] = acos(std::min(std::max(ijK / (2.0 * li * lj), -1.0), 1.0)); +#else + // atan2 is prefered for stability + he2angle[hi] = 0.0, he2angle[hj] = 0.0, he2angle[hk] = 0.0; + // li: l12, lj: l23, lk: l31 + Scalar l12 = li, l23 = lj, l31 = lk; + const Scalar t31 = +l12 + l23 - l31, + t23 = +l12 - l23 + l31, + t12 = -l12 + l23 + l31; + // valid triangle + if (t31 > 0 && t23 > 0 && t12 > 0) + { + const Scalar l123 = l12 + l23 + l31; + const Scalar denom = sqrt(t12 * t23 * t31 * l123); + he2angle[hj] = 2 * atan2(t12 * t31, denom); // a1 l23 + he2angle[hk] = 2 * atan2(t23 * t12, denom); // a2 l31 + he2angle[hi] = 2 * atan2(t31 * t23, denom); // a3 l12 + } + else if (t31 <= 0) + he2angle[hk] = M_PI; + else if (t23 <= 0) + he2angle[hj] = M_PI; + else if (t12 <= 0) + he2angle[hi] = M_PI; + else + he2angle[hj] = M_PI; +#endif + } +} + +Scalar ell(Scalar l, Scalar u0, Scalar u1, Scalar offset = 0) +{ + return l * exp((u0 + u1) / 2 - offset); +} + +bool NonDelaunay(SimilarityPennerConeMetric& m, int e, SolveStats& solve_stats) +{ + if (m.type[m.h0(e)] == 4) return false; // virtual diagonal of symmetric trapezoid + solve_stats.n_checks++; + int hij = m.h0(e); + int hjk = m.n[hij]; + int hki = m.n[hjk]; + int hji = m.h1(e); + int him = m.n[hji]; + int hmj = m.n[him]; + + // triangles: hij, hjk, hki + // hji, him, hmj + const VectorX& xi = m.get_one_form(); + if (abs(xi[hij] + xi[hjk] + xi[hki]) > 1e-10 || abs(xi[hji] + xi[him] + xi[hmj]) > 1e-10) { + std::cerr << "error! xi not closed." << std::endl; + std::cerr << "f" << m.f[hij] << ": " << hij << "," << hjk << "," << hki << std::endl; + std::cerr << "f" << m.f[hji] << ": " << hji << "," << him << "," << hmj << std::endl; + std::cerr << xi[hij] << ", " << xi[hjk] << ", " << xi[hki] << ", " + << xi[hij] + xi[hjk] + xi[hki] << std::endl; + std::cerr << xi[hji] << ", " << xi[him] << ", " << xi[hmj] << ", " + << xi[hji] + xi[him] + xi[hmj] << std::endl; + exit(0); + } + + Scalar ui = 0; + Scalar uj = xi[hij]; + Scalar uk = -xi[hki]; + Scalar um = xi[him]; + + Scalar uijk_avg = (ui + uj + uk) / 3.0; + Scalar ujim_avg = (uj + ui + um) / 3.0; + Scalar ljk = ell(m.l[m.e(hjk)], uj, uk, uijk_avg); + Scalar lki = ell(m.l[m.e(hki)], uk, ui, uijk_avg); + Scalar lij = ell(m.l[m.e(hij)], ui, uj, uijk_avg); + Scalar lji = ell(m.l[m.e(hji)], uj, ui, ujim_avg); + Scalar lmj = ell(m.l[m.e(hmj)], um, uj, ujim_avg); + Scalar lim = ell(m.l[m.e(him)], ui, um, ujim_avg); + + bool pre_flip_check = (ljk / lki + lki / ljk - (lij / ljk) * (lij / lki)) + + (lmj / lim + lim / lmj - (lji / lmj) * (lji / lim)) < + 0; + + // additionally check whether delaunay is violated after flip + // we consider the configuration to 'violate delaunay condition' only if + // it does not satisfy delaunay check AND post-flip configuration satisfies delaunay condition. + Scalar umki_avg = (um + uk + ui) / 3.0; + Scalar ukmj_avg = (uk + um + uj) / 3.0; + Scalar _lkm_non_scaled = + (m.l[m.e(hjk)] * m.l[m.e(him)] + m.l[m.e(hki)] * m.l[m.e(hmj)]) / m.l[m.e(hij)]; + Scalar _lkm = ell(_lkm_non_scaled, uk, um, ukmj_avg); + Scalar _lmj = ell(m.l[m.e(hmj)], um, uj, ukmj_avg); + Scalar _ljk = ell(m.l[m.e(hjk)], uj, uk, ukmj_avg); + Scalar _lmk = ell(_lkm_non_scaled, um, uk, umki_avg); + Scalar _lki = ell(m.l[m.e(hki)], uk, ui, umki_avg); + Scalar _lim = ell(m.l[m.e(him)], ui, um, umki_avg); + bool post_flip_check = (_lki / _lim + _lim / _lki - (_lmk / _lki) * (_lmk / _lim)) + + (_ljk / _lmj + _lmj / _ljk - (_lkm / _ljk) * (_lkm / _lmj)) < + 0; + return pre_flip_check && !post_flip_check; +} + +bool EdgeFlip( + std::set& q, + Mesh& m, + int e, + int tag, + DelaunayStats& delaunay_stats, + bool Ptolemy = true) +{ + FlipStats flip_stats; + bool success = ::Penner::EdgeFlip( + m, + e, + tag, + delaunay_stats.flip_seq, + q, + flip_stats, + Ptolemy); + delaunay_stats.n_flips += flip_stats.n_flips; + delaunay_stats.n_flips_12 += flip_stats.n_flips_12; + delaunay_stats.n_flips_q += flip_stats.n_flips_q; + delaunay_stats.n_flips_s += flip_stats.n_flips_s; + delaunay_stats.n_flips_t += flip_stats.n_flips_t; + return success; +} + +void MakeSimilarityDelaunay( + SimilarityPennerConeMetric& m, + DelaunayStats& delaunay_stats, + SolveStats& solve_stats, + bool Ptolemy) +{ + std::set q; + for (int i = 0; i < m.n_halfedges(); i++) { + if (m.opp[i] < i) // Only consider halfedges with lower index to prevent duplication + continue; + int type0 = m.type[m.h0(i)]; + int type1 = m.type[m.h1(i)]; + if (type0 == 0 || type0 == 1 || type1 == 1 || + type0 == 3) // type 22 edges are flipped below; type 44 edges (virtual diagonals) are + // never flipped. + q.insert(i); + } + while (!q.empty()) { + int e = *(q.begin()); + q.erase(q.begin()); + int type0 = m.type[m.h0(e)]; + int type1 = m.type[m.h1(e)]; + if (!(type0 == 2 && type1 == 2) && !(type0 == 4) && NonDelaunay(m, e, solve_stats)) { + int Re = -1; + if (type0 == 1 && type1 == 1) Re = m.e(m.R[m.h0(e)]); + if (!EdgeFlip(q, m, e, 0, delaunay_stats, Ptolemy)) continue; + if (type0 == 1 && type1 == 1) // flip mirror edge on sheet 2 + { + int e = Re; + if (Re == -1) spdlog::info("Negative index"); + if (!EdgeFlip(q, m, e, 1, delaunay_stats, Ptolemy)) continue; + } + // checkR(); + } + } +} + +SimilarityPennerConeMetric::SimilarityPennerConeMetric( + const Mesh& m, + const VectorX& metric_coords, + const std::vector>& homology_basis_loops, + const std::vector& kappa, + const VectorX& harmonic_form_coords) + : MarkedPennerConeMetric(m, metric_coords, homology_basis_loops, kappa) + , m_harmonic_form_coords(harmonic_form_coords) +{ + MatrixX harmonic_form_matrix = + build_dual_loop_basis_one_form_matrix(*this, m_homology_basis_loops); + m_one_form = harmonic_form_matrix * m_harmonic_form_coords; + m_one_form_direction = VectorX::Zero(m_one_form.size()); +} + +SimilarityPennerConeMetric::SimilarityPennerConeMetric( + const Mesh& m, + const VectorX& reduced_metric_coords, + const std::vector>& homology_basis_loops, + const std::vector& kappa) + : SimilarityPennerConeMetric( + m, + reduced_metric_coords.head(m.n_edges()), + homology_basis_loops, + kappa, + reduced_metric_coords.tail(reduced_metric_coords.size() - m.n_edges())) +{} + +VectorX SimilarityPennerConeMetric::get_reduced_metric_coordinates() const +{ + int num_length_coordinates = m_embed.size(); + int num_form_coordinates = m_harmonic_form_coords.size(); + VectorX reduced_metric_coords(num_length_coordinates + num_form_coordinates); + for (int E = 0; E < num_length_coordinates; ++E) { + int h = e2he[m_embed[E]]; + reduced_metric_coords[E] = 2.0 * log(l[h]); + } + for (int i = 0; i < num_form_coordinates; ++i) { + reduced_metric_coords[num_length_coordinates + i] = m_harmonic_form_coords[i]; + } + + return reduced_metric_coords; +} + +void SimilarityPennerConeMetric::get_corner_angles(VectorX& he2angle, VectorX& he2cot) const +{ + similarity_corner_angles(*this, he2angle, he2cot); +} + +std::unique_ptr SimilarityPennerConeMetric::set_metric_coordinates( + const VectorX& reduced_metric_coords) const +{ + VectorX metric_coords; + VectorX harmonic_form_coords; + separate_coordinates(reduced_metric_coords, metric_coords, harmonic_form_coords); + return std::make_unique(SimilarityPennerConeMetric( + *this, + metric_coords, + m_homology_basis_loops, + kappa_hat, + harmonic_form_coords)); +} + +// TODO Make projection to constraint, including projection of harmonic form coordinates +std::unique_ptr SimilarityPennerConeMetric::scale_conformally( + const VectorX& u) const +{ + int num_reduced_coordinates = m_embed.size(); + VectorX reduced_metric_coords(num_reduced_coordinates); + for (int E = 0; E < num_reduced_coordinates; ++E) { + int h = e2he[m_embed[E]]; + reduced_metric_coords[E] = 2.0 * log(l[h]) + (u[v_rep[to[h]]] + u[v_rep[to[opp[h]]]]); + } + + return set_metric_coordinates(reduced_metric_coords); +} + +bool SimilarityPennerConeMetric::constraint( + VectorX& constraint, + MatrixX& J_constraint, + bool need_jacobian, + bool only_free_vertices) const +{ + if (!only_free_vertices) { + spdlog::warn("Similarity metric only supports free vertices"); + } + compute_similarity_constraint_with_jacobian(*this, constraint, J_constraint, need_jacobian); + return true; +} + +std::unique_ptr SimilarityPennerConeMetric::project_to_constraint( + SolveStats& solve_stats, + std::shared_ptr proj_params) const +{ + solve_stats.n_solves++; // TODO Make accurate + AlgorithmParameters alg_params; + LineSearchParameters ls_params; + alg_params.initial_ptolemy = proj_params->initial_ptolemy; + alg_params.max_itr = proj_params->max_itr; + alg_params.error_eps = double(proj_params->error_eps); + alg_params.use_edge_flips = proj_params->use_edge_flips; + ls_params.bound_norm_thres = double(proj_params->bound_norm_thres); + ls_params.do_reduction = proj_params->do_reduction; + + // TODO Expose to interface + ls_params.lambda0 = 1; + ls_params.reset_lambda = false; + + // Get projected metric + SimilarityPennerConeMetric projected_similarity_metric = *this; + compute_conformal_similarity_metric(projected_similarity_metric, alg_params, ls_params); + projected_similarity_metric.undo_flips(); + + return std::make_unique( + projected_similarity_metric.scale_by_one_form()); +} + +void SimilarityPennerConeMetric::make_delaunay(std::vector& flip_seq) +{ + // Make the copied mesh Delaunay with Ptolemy flips + DelaunayStats del_stats; + SolveStats solve_stats; + bool use_ptolemy = true; + MakeSimilarityDelaunay(*this, del_stats, solve_stats, use_ptolemy); + flip_seq = del_stats.flip_seq; + return; +} + +void SimilarityPennerConeMetric::make_discrete_metric() +{ + // Make the copied mesh Delaunay with Ptolemy flips + DelaunayStats del_stats; + SolveStats solve_stats; + bool use_ptolemy = true; + MakeSimilarityDelaunay(*this, del_stats, solve_stats, use_ptolemy); + m_is_discrete_metric = true; + return; +} + +std::tuple> +SimilarityPennerConeMetric::get_integrated_metric_coordinates(std::vector cut_h) const +{ + std::vector is_cut_h; + VectorX u = integrate_one_form(*this, m_one_form, cut_h, is_cut_h); + VectorX metric_coords = get_metric_coordinates(); + metric_coords = scale_halfedges_by_integrated_one_form(*this, metric_coords, u); + return std::make_tuple(metric_coords, u, is_cut_h); +} + +VectorX SimilarityPennerConeMetric::reduce_one_form(const VectorX& one_form) const +{ + // Build psuedoinverse system + MatrixX closed_one_form_matrix = + build_closed_one_form_matrix(*this, m_homology_basis_loops, true); + MatrixX A = closed_one_form_matrix.transpose() * closed_one_form_matrix; + VectorX b = closed_one_form_matrix.transpose() * one_form; + + // Solve for the reduced basis coefficients + Eigen::SimplicialLDLT> solver; + solver.compute(A); + VectorX coefficients = solver.solve(b); + SPDLOG_TRACE( + "Error is {}", + sup_norm(one_form - closed_one_form_matrix * coefficients)); + return coefficients; +} + +SimilarityPennerConeMetric SimilarityPennerConeMetric::scale_by_one_form() const +{ + // Get reduced 1-form coefficients + VectorX coefficients = reduce_one_form(m_one_form); + + // Extract conformal scale factors and harmonic coefficients + int num_vertices = n_ind_vertices(); + int num_loops = m_homology_basis_loops.size(); + VectorX u; + u.setZero(num_vertices); + std::vector v_map; + int num_angles; + Optimization::build_free_vertex_map(*this, v_map, num_angles); + for (int vi = 0; vi < num_vertices; ++vi) { + if (v_map[vi] >= 0) { + u[vi] = coefficients[v_map[vi]]; + } + } + VectorX harmonic_form_coords = coefficients.tail(num_loops); + SPDLOG_TRACE("Minimum scale factor is {}", u.minCoeff()); + SPDLOG_TRACE("Maximum scale factor is {}", u.maxCoeff()); + + // Solve for scaled metric coords + int num_reduced_coordinates = m_embed.size(); + VectorX reduced_metric_coords(num_reduced_coordinates); + for (int E = 0; E < num_reduced_coordinates; ++E) { + int h = e2he[m_embed[E]]; + reduced_metric_coords[E] = 2.0 * log(l[h]) + (u[v_rep[to[h]]] + u[v_rep[to[opp[h]]]]); + } + + return SimilarityPennerConeMetric( + *this, + reduced_metric_coords, + m_homology_basis_loops, + kappa_hat, + harmonic_form_coords); +} + +bool SimilarityPennerConeMetric::flip_ccw(int _h, bool Ptolemy) +{ + // Perform the flip in the base class + bool success = MarkedPennerConeMetric::flip_ccw(_h, Ptolemy); + + // Update one form + m_one_form[_h] = -(m_one_form[n[_h]] + m_one_form[n[n[_h]]]); + m_one_form[opp[_h]] = -m_one_form[_h]; + + // Update one form direction + m_one_form_direction[_h] = -(m_one_form_direction[n[_h]] + m_one_form_direction[n[n[_h]]]); + m_one_form_direction[opp[_h]] = -m_one_form_direction[_h]; + + return success; +} + +void SimilarityPennerConeMetric::separate_coordinates( + const VectorX& reduced_metric_coords, + VectorX& metric_coords, + VectorX& harmonic_form_coords) const +{ + int num_coords = reduced_metric_coords.size(); + int num_basis_loops = m_homology_basis_loops.size(); + metric_coords = reduced_metric_coords.head(num_coords - num_basis_loops); + harmonic_form_coords = reduced_metric_coords.tail(num_basis_loops); +} + +} // namespace Holonomy +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/CMakeLists.txt b/src/metric_optimization/CMakeLists.txt deleted file mode 100644 index 46b9078..0000000 --- a/src/metric_optimization/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -add_library(MetricOptimizationLib - convergence.cpp - energies.cpp - energy_functor.cpp - energy_weights.cpp - explicit_optimization.cpp - implicit_optimization.cpp - nonlinear_optimization.cpp -) -target_include_directories(MetricOptimizationLib PUBLIC .) -target_link_libraries(MetricOptimizationLib PUBLIC - PennerOptimizationCoreLib -) -target_compile_definitions(MetricOptimizationLib PUBLIC - SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_DEBUG -) -target_compile_options(MetricOptimizationLib PRIVATE - -Wall -Wpedantic -Wextra -Werror -) diff --git a/src/optimization/CMakeLists.txt b/src/optimization/CMakeLists.txt new file mode 100644 index 0000000..933e623 --- /dev/null +++ b/src/optimization/CMakeLists.txt @@ -0,0 +1,72 @@ +set(OptimizationCoreSrc + core/area.cpp + core/common.cpp + core/cone_metric.cpp + core/constraint.cpp + core/flip_matrix_generator.cpp + core/projection.cpp + core/reparametrization.cpp + core/shear.cpp +) + +set(MetricOptimizationSrc + metric_optimization/convergence.cpp + metric_optimization/energies.cpp + metric_optimization/energy_functor.cpp + metric_optimization/energy_weights.cpp + metric_optimization/explicit_optimization.cpp + metric_optimization/implicit_optimization.cpp + metric_optimization/nonlinear_optimization.cpp +) + +set(ParameterizationSrc + parameterization/interpolation.cpp + parameterization/layout.cpp + parameterization/refinement.cpp + parameterization/translation.cpp + parameterization/triangulation.cpp +) + +set(OptimizationUtilSrc + util/shapes.cpp + util/viewers.cpp +) + +add_library(PennerOptimizationLib + interface.cpp + ${OptimizationCoreSrc} + ${MetricOptimizationSrc} + ${ParameterizationSrc} + ${OptimizationUtilSrc} +) +target_include_directories(PennerOptimizationLib PUBLIC ../../include/optimization) +target_link_libraries(PennerOptimizationLib PUBLIC + PennerUtilLib +) +target_compile_definitions(PennerOptimizationLib PUBLIC + SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_DEBUG +) +target_compile_options(PennerOptimizationLib PRIVATE + -Wall -Wpedantic -Wextra -Werror +) + +if(USE_PYBIND) + add_library(optimization_py MODULE + pybind.cpp + ) + + # Link libraries + target_link_libraries(optimization_py PUBLIC + PennerOptimizationLib + pybind11::module + ${RENDER_LIBRARIES} + ) + + # Set pybinding settings + set_target_properties(optimization_py PROPERTIES LIBRARY_OUTPUT_DIRECTORY + ${PROJECT_SOURCE_DIR}/py + ) + set_target_properties(optimization_py PROPERTIES PREFIX + "${PYTHON_MODULE_PREFIX}" + ) +endif() diff --git a/src/core/area.cpp b/src/optimization/core/area.cpp similarity index 80% rename from src/core/area.cpp rename to src/optimization/core/area.cpp index 114e434..1b76cb0 100644 --- a/src/core/area.cpp +++ b/src/optimization/core/area.cpp @@ -28,11 +28,12 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "area.hh" +#include "optimization/core/area.h" -#include "embedding.hh" +#include "util/embedding.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { Scalar squared_area(Scalar li, Scalar lj, Scalar lk) { @@ -98,23 +99,23 @@ Scalar squared_area_length_derivative(Scalar variable_length, Scalar lj, Scalar } } -VectorX squared_areas(const DifferentiableConeMetric& cone_metric) +VectorX squared_areas(const Mesh& m) { - int num_halfedges = cone_metric.n_halfedges(); + int num_halfedges = m.n_halfedges(); VectorX he2areasq(num_halfedges); - int num_faces = cone_metric.h.size(); + int num_faces = m.h.size(); // #pragma omp parallel for for (int f = 0; f < num_faces; f++) { // Get halfedges of face f - int hi = cone_metric.h[f]; - int hj = cone_metric.n[hi]; - int hk = cone_metric.n[hj]; + int hi = m.h[f]; + int hj = m.n[hi]; + int hk = m.n[hj]; // Get lengths of the halfedges - Scalar li = cone_metric.l[hi]; - Scalar lj = cone_metric.l[hj]; - Scalar lk = cone_metric.l[hk]; + Scalar li = m.l[hi]; + Scalar lj = m.l[hj]; + Scalar lk = m.l[hk]; // Compute the area of the face adjacent to the halfedges Scalar areasq = squared_area(li, lj, lk); @@ -126,12 +127,12 @@ VectorX squared_areas(const DifferentiableConeMetric& cone_metric) return he2areasq; } -VectorX areas(const DifferentiableConeMetric& cone_metric) +VectorX areas(const Mesh& m) { - int num_halfedges = cone_metric.n_halfedges(); + int num_halfedges = m.n_halfedges(); // Compute squared areas - VectorX he2areasq = squared_areas(cone_metric); + VectorX he2areasq = squared_areas(m); assert(he2areasq.size() == num_halfedges); // Take square roots @@ -143,25 +144,25 @@ VectorX areas(const DifferentiableConeMetric& cone_metric) return he2area; } -VectorX squared_area_length_derivatives(const DifferentiableConeMetric& cone_metric) +VectorX squared_area_length_derivatives(const Mesh& m) { - int num_halfedges = cone_metric.n_halfedges(); + int num_halfedges = m.n_halfedges(); VectorX he2areasqderiv(num_halfedges); // Compute maps from halfedges to derivatives of area with respect to the edge // length - int num_faces = cone_metric.h.size(); + int num_faces = m.h.size(); // #pragma omp parallel for for (int f = 0; f < num_faces; f++) { // Get halfedges of face f - int hi = cone_metric.h[f]; - int hj = cone_metric.n[hi]; - int hk = cone_metric.n[hj]; + int hi = m.h[f]; + int hj = m.n[hi]; + int hk = m.n[hj]; // Get lengths of the halfedges - Scalar li = cone_metric.l[hi]; - Scalar lj = cone_metric.l[hj]; - Scalar lk = cone_metric.l[hk]; + Scalar li = m.l[hi]; + Scalar lj = m.l[hj]; + Scalar lk = m.l[hk]; // Compute the derivative of the area of f with respect to each halfedge he2areasqderiv[hi] = squared_area_length_derivative(li, lj, lk); @@ -172,21 +173,22 @@ VectorX squared_area_length_derivatives(const DifferentiableConeMetric& cone_met return he2areasqderiv; } -VectorX squared_area_log_length_derivatives(const DifferentiableConeMetric& cone_metric) +VectorX squared_area_log_length_derivatives(const Mesh& m) { - int num_halfedges = cone_metric.n_halfedges(); + int num_halfedges = m.n_halfedges(); // Compute squared areas length derivatives - VectorX he2areasq_deriv = squared_area_length_derivatives(cone_metric); + VectorX he2areasq_deriv = squared_area_length_derivatives(m); assert(he2areasq_deriv.size() == num_halfedges); // Apply chain rule to A(l) = A(e^(lambda/2)) VectorX he2areasq_log_deriv(num_halfedges); for (int h = 0; h < num_halfedges; ++h) { - he2areasq_log_deriv[h] = he2areasq_deriv[h] * cone_metric.l[h] / 2.0; + he2areasq_log_deriv[h] = he2areasq_deriv[h] * m.l[h] / 2.0; } return he2areasq_log_deriv; } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner diff --git a/src/core/common.cpp b/src/optimization/core/common.cpp similarity index 96% rename from src/core/common.cpp rename to src/optimization/core/common.cpp index c2be8f9..5f81e6f 100644 --- a/src/core/common.cpp +++ b/src/optimization/core/common.cpp @@ -28,9 +28,10 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "common.hh" +#include "optimization/core/common.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { bool vector_equal(VectorX v, VectorX w, Scalar eps) { @@ -83,4 +84,5 @@ Scalar matrix_sup_norm(const MatrixX& matrix) return max_value; } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/cone_metric.cpp b/src/optimization/core/cone_metric.cpp similarity index 98% rename from src/core/cone_metric.cpp rename to src/optimization/core/cone_metric.cpp index 43bb711..28630f7 100644 --- a/src/core/cone_metric.cpp +++ b/src/optimization/core/cone_metric.cpp @@ -28,15 +28,16 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "cone_metric.hh" +#include "optimization/core/cone_metric.h" #include "conformal_ideal_delaunay/ConformalInterface.hh" -#include "constraint.hh" -#include "projection.hh" +#include "optimization/core/constraint.h" +#include "optimization/core/projection.h" // TODO: Clean code -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { DifferentiableConeMetric::DifferentiableConeMetric(const Mesh& m) : Mesh(m) @@ -417,4 +418,5 @@ void DiscreteMetric::expand_metric_coordinates(const VectorX& metric_coords) // TODO error } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/constraint.cpp b/src/optimization/core/constraint.cpp similarity index 97% rename from src/core/constraint.cpp rename to src/optimization/core/constraint.cpp index 6d49ae0..8287862 100644 --- a/src/core/constraint.cpp +++ b/src/optimization/core/constraint.cpp @@ -28,13 +28,14 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "constraint.hh" +#include "optimization/core/constraint.h" -#include "area.hh" -#include "embedding.hh" -#include "linear_algebra.hh" +#include "util/embedding.h" +#include "util/linear_algebra.h" +#include "optimization/core/area.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { bool satisfies_triangle_inequality(const Mesh& cone_metric) { @@ -291,4 +292,5 @@ Scalar compute_max_constraint(const DifferentiableConeMetric& cone_metric) return sup_norm(constraint); } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/flip_matrix_generator.cpp b/src/optimization/core/flip_matrix_generator.cpp similarity index 97% rename from src/core/flip_matrix_generator.cpp rename to src/optimization/core/flip_matrix_generator.cpp index be8c413..a0a6a98 100644 --- a/src/core/flip_matrix_generator.cpp +++ b/src/optimization/core/flip_matrix_generator.cpp @@ -28,9 +28,10 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "flip_matrix_generator.hh" +#include "optimization/core/flip_matrix_generator.h" -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { FlipMatrixGenerator::FlipMatrixGenerator(int size) : m_size(size) @@ -166,4 +167,5 @@ MatrixX FlipMapMatrixGenerator::build_matrix() const return matrix; } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/projection.cpp b/src/optimization/core/projection.cpp similarity index 96% rename from src/core/projection.cpp rename to src/optimization/core/projection.cpp index e993448..45aa4a5 100644 --- a/src/core/projection.cpp +++ b/src/optimization/core/projection.cpp @@ -28,22 +28,22 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "projection.hh" +#include "optimization/core/projection.h" #include #include #include #include "conformal_ideal_delaunay/ConformalInterface.hh" -#include "constraint.hh" -#include "embedding.hh" -#include "globals.hh" -#include "linear_algebra.hh" -#include "vector.hh" +#include "util/embedding.h" +#include "util/linear_algebra.h" +#include "util/vector.h" +#include "optimization/core/constraint.h" +#include "optimization/core/globals.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { - +namespace Penner { +namespace Optimization { MatrixX conformal_scaling_matrix(const Mesh& m) { @@ -206,4 +206,5 @@ VectorX project_descent_direction( } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/reparametrization.cpp b/src/optimization/core/reparametrization.cpp similarity index 96% rename from src/core/reparametrization.cpp rename to src/optimization/core/reparametrization.cpp index 19d37b1..054b123 100644 --- a/src/core/reparametrization.cpp +++ b/src/optimization/core/reparametrization.cpp @@ -28,14 +28,15 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "reparametrization.hh" +#include "optimization/core/reparametrization.h" #include "conformal_ideal_delaunay/ConformalIdealDelaunayMapping.hh" -#include "embedding.hh" +#include "util/embedding.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { void bc_reparametrize_eq(OverlayMesh& m_o, const VectorX& tau) { @@ -107,4 +108,6 @@ void reparametrize_equilateral( #ifdef PYBIND #endif -} // namespace CurvatureMetric + +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/core/shear.cpp b/src/optimization/core/shear.cpp similarity index 98% rename from src/core/shear.cpp rename to src/optimization/core/shear.cpp index 14db29f..55b19bc 100644 --- a/src/core/shear.cpp +++ b/src/optimization/core/shear.cpp @@ -28,18 +28,18 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "shear.hh" +#include "optimization/core/shear.h" -#include #include "conformal_ideal_delaunay/ConformalIdealDelaunayMapping.hh" -#include "embedding.hh" -#include "projection.hh" -#include "reparametrization.hh" -#include "vector.hh" +#include "util/embedding.h" +#include "optimization/core/projection.h" +#include "optimization/core/reparametrization.h" +#include "util/vector.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { VectorX compute_shear(const Mesh& m, const VectorX& he_metric_coords) { @@ -426,4 +426,5 @@ void compute_shear_basis_coordinates( scale_factors)); } -} // namespace CurvatureMetric \ No newline at end of file +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/penner_optimization_interface.cpp b/src/optimization/interface.cpp similarity index 97% rename from src/penner_optimization_interface.cpp rename to src/optimization/interface.cpp index 8d0327d..0e245f9 100644 --- a/src/penner_optimization_interface.cpp +++ b/src/optimization/interface.cpp @@ -28,20 +28,21 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "penner_optimization_interface.hh" +#include "optimization/interface.h" #include "conformal_ideal_delaunay/ConformalInterface.hh" -#include "embedding.hh" -#include "interpolation.hh" -#include "layout.hh" -#include "projection.hh" -#include "translation.hh" -#include "vector.hh" -#include "viewers.hh" +#include "util/embedding.h" +#include "util/vector.h" +#include "optimization/parameterization/interpolation.h" +#include "optimization/parameterization/layout.h" +#include "optimization/core/projection.h" +#include "optimization/parameterization/translation.h" +#include "optimization/util/viewers.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { std::unique_ptr generate_initial_mesh( const Eigen::MatrixXd& V, @@ -384,4 +385,5 @@ std:: return std::make_tuple(V_l, F_l, uv, FT, cut_h); } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/convergence.cpp b/src/optimization/metric_optimization/convergence.cpp similarity index 96% rename from src/metric_optimization/convergence.cpp rename to src/optimization/metric_optimization/convergence.cpp index d08fc97..e8029ce 100644 --- a/src/metric_optimization/convergence.cpp +++ b/src/optimization/metric_optimization/convergence.cpp @@ -28,14 +28,16 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "convergence.hh" +#include "optimization/metric_optimization/convergence.h" -#include "constraint.hh" -#include "explicit_optimization.hh" -#include "implicit_optimization.hh" -#include "projection.hh" +#include "optimization/core/constraint.h" +#include "optimization/metric_optimization/explicit_optimization.h" +#include "optimization/metric_optimization/implicit_optimization.h" +#include "optimization/core/projection.h" -namespace CurvatureMetric { + +namespace Penner { +namespace Optimization { void compute_direction_energy_values( const DifferentiableConeMetric& m, @@ -234,4 +236,5 @@ step_projected_descent_direction); gradient_signs[i] = } */ -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner diff --git a/src/metric_optimization/energies.cpp b/src/optimization/metric_optimization/energies.cpp similarity index 98% rename from src/metric_optimization/energies.cpp rename to src/optimization/metric_optimization/energies.cpp index 46ccbfe..d7e7053 100644 --- a/src/metric_optimization/energies.cpp +++ b/src/optimization/metric_optimization/energies.cpp @@ -28,19 +28,20 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "energies.hh" +#include "optimization/metric_optimization/energies.h" #include #include #include -#include "area.hh" -#include "cone_metric.hh" -#include "constraint.hh" -#include "projection.hh" +#include "optimization/core/area.h" +#include "optimization/core/cone_metric.h" +#include "optimization/core/constraint.h" +#include "optimization/core/projection.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { VectorX scale_distortion_direction( const DifferentiableConeMetric& target_cone_metric, @@ -673,4 +674,5 @@ VectorX second_invariant_vf_pybind( #endif -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/energy_functor.cpp b/src/optimization/metric_optimization/energy_functor.cpp similarity index 96% rename from src/metric_optimization/energy_functor.cpp rename to src/optimization/metric_optimization/energy_functor.cpp index c26eb9b..b904f58 100644 --- a/src/metric_optimization/energy_functor.cpp +++ b/src/optimization/metric_optimization/energy_functor.cpp @@ -28,20 +28,21 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "energy_functor.hh" +#include "optimization/metric_optimization/energy_functor.h" #include #include #include -#include "area.hh" -#include "constraint.hh" -#include "energies.hh" -#include "energy_weights.hh" -#include "projection.hh" +#include "optimization/core/area.h" +#include "optimization/core/constraint.h" +#include "optimization/metric_optimization/energies.h" +#include "optimization/metric_optimization/energy_weights.h" +#include "optimization/core/projection.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Compute the Jacobian matrix of the change of coordinates from log edge /// lengths to regular edge lengths. @@ -332,4 +333,5 @@ MatrixX length_jacobian_pybind(const VectorX& lambdas_full) #endif -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/energy_weights.cpp b/src/optimization/metric_optimization/energy_weights.cpp similarity index 85% rename from src/metric_optimization/energy_weights.cpp rename to src/optimization/metric_optimization/energy_weights.cpp index ea078cb..03c058f 100644 --- a/src/metric_optimization/energy_weights.cpp +++ b/src/optimization/metric_optimization/energy_weights.cpp @@ -28,13 +28,13 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "energy_functor.hh" +#include "optimization/metric_optimization/energy_functor.h" -#include "area.hh" -#include "constraint.hh" -#include "energies.hh" -#include "projection.hh" -#include "vector.hh" +#include "optimization/core/area.h" +#include "optimization/core/constraint.h" +#include "optimization/metric_optimization/energies.h" +#include "optimization/core/projection.h" +#include "util/vector.h" #include #include @@ -42,7 +42,8 @@ /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { Scalar compute_weighted_norm(const VectorX& weights, const VectorX& values) { @@ -57,22 +58,54 @@ Scalar compute_weighted_norm(const VectorX& weights, const VectorX& values) return weighted_norm; } -VectorX compute_face_area_weights(const DifferentiableConeMetric& cone_metric) +VectorX compute_face_area_weights(const Mesh& m) { // Compute area per halfedges - VectorX he2area = areas(cone_metric); + VectorX he2area = areas(m); // Reorganize areas to be per face - int num_faces = cone_metric.h.size(); + int num_faces = m.h.size(); VectorX face_area_weights(num_faces); for (int f = 0; f < num_faces; ++f) { - face_area_weights[f] = he2area[cone_metric.h[f]]; + face_area_weights[f] = he2area[m.h[f]]; } - spdlog::trace("f to areas: {}", face_area_weights.transpose()); return face_area_weights; } +VectorX compute_vertex_area_weights(const Mesh& m) +{ + // Compute area per halfedges + VectorX he2area = areas(m); + + // sum up face area weights around vertex + int num_vertices = m.n_vertices(); + int num_halfedges = m.n_halfedges(); + VectorX vertex_area_weights = VectorX::Zero(num_vertices); + for (int hij = 0; hij < num_halfedges; ++hij) { + int vj = m.to[hij]; + vertex_area_weights[vj] += he2area[hij] / 3.; + } + + return vertex_area_weights; +} + +VectorX compute_independent_vertex_area_weights(const Mesh& m) +{ + // compute all vertex weights + VectorX vertex_area_weights = compute_vertex_area_weights(m); + + // sum up halved vertex weights for independent vertices + int num_vertices = m.n_vertices(); + int num_ind_vertices = m.n_ind_vertices(); + VectorX ind_vertex_weights = VectorX::Zero(num_ind_vertices); + for (int vi = 0; vi < num_vertices; ++vi) { + ind_vertex_weights[m.v_rep[vi]] += vertex_area_weights[vi] / 2.; + } + + return ind_vertex_weights; +} + VectorX compute_edge_area_weights(const DifferentiableConeMetric& cone_metric) { // Compute area per halfedges @@ -250,4 +283,5 @@ void compute_boundary_face_weights( } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner diff --git a/src/metric_optimization/explicit_optimization.cpp b/src/optimization/metric_optimization/explicit_optimization.cpp similarity index 98% rename from src/metric_optimization/explicit_optimization.cpp rename to src/optimization/metric_optimization/explicit_optimization.cpp index 5a4bf07..0c3cb29 100644 --- a/src/metric_optimization/explicit_optimization.cpp +++ b/src/optimization/metric_optimization/explicit_optimization.cpp @@ -28,23 +28,24 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "explicit_optimization.hh" +#include "optimization/metric_optimization/explicit_optimization.h" #include #include -#include "constraint.hh" -#include "embedding.hh" -#include "energies.hh" -#include "globals.hh" -#include "io.hh" -#include "nonlinear_optimization.hh" -#include "projection.hh" -#include "shear.hh" -#include "vector.hh" +#include "optimization/core/constraint.h" +#include "util/embedding.h" +#include "optimization/metric_optimization/energies.h" +#include "optimization/core/globals.h" +#include "util/io.h" +#include "optimization/metric_optimization/nonlinear_optimization.h" +#include "optimization/core/projection.h" +#include "optimization/core/shear.h" +#include "util/vector.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { void initialize_explicit_data_log(const std::filesystem::path& data_log_path) { @@ -713,4 +714,5 @@ VectorX optimize_shear_basis_coordinates( opt_params); } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/implicit_optimization.cpp b/src/optimization/metric_optimization/implicit_optimization.cpp similarity index 98% rename from src/metric_optimization/implicit_optimization.cpp rename to src/optimization/metric_optimization/implicit_optimization.cpp index ff640f0..56d3578 100644 --- a/src/metric_optimization/implicit_optimization.cpp +++ b/src/optimization/metric_optimization/implicit_optimization.cpp @@ -28,22 +28,24 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "implicit_optimization.hh" +#include "optimization/metric_optimization/implicit_optimization.h" #include #include -#include "area.hh" -#include "constraint.hh" -#include "embedding.hh" -#include "energies.hh" -#include "globals.hh" -#include "io.hh" -#include "nonlinear_optimization.hh" -#include "projection.hh" +#include "optimization/core/area.h" +#include "optimization/core/constraint.h" +#include "util/embedding.h" +#include "optimization/metric_optimization/energies.h" +#include "optimization/core/globals.h" +#include "util/io.h" +#include "optimization/metric_optimization/nonlinear_optimization.h" +#include "optimization/core/projection.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { + Scalar compute_convergence_ratio( const VectorX& unconstrained_descent_direction, const VectorX& constrained_descent_direction) @@ -781,4 +783,5 @@ std::unique_ptr optimize_metric( return optimize_metric_log(initial_cone_metric, opt_energy, log, proj_params, opt_params); } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/metric_optimization/nonlinear_optimization.cpp b/src/optimization/metric_optimization/nonlinear_optimization.cpp similarity index 96% rename from src/metric_optimization/nonlinear_optimization.cpp rename to src/optimization/metric_optimization/nonlinear_optimization.cpp index 64b6c3f..f43a993 100644 --- a/src/metric_optimization/nonlinear_optimization.cpp +++ b/src/optimization/metric_optimization/nonlinear_optimization.cpp @@ -28,12 +28,13 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "nonlinear_optimization.hh" -#include "vector.hh" +#include "optimization/metric_optimization/nonlinear_optimization.h" +#include "util/vector.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { void compute_conjugate_gradient_direction( const VectorX& gradient, @@ -132,4 +133,5 @@ void compute_lbfgs_direction( descent_direction = -z; } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/parameterization/interpolation.cpp b/src/optimization/parameterization/interpolation.cpp similarity index 99% rename from src/parameterization/interpolation.cpp rename to src/optimization/parameterization/interpolation.cpp index 4ffd1d3..9419489 100644 --- a/src/parameterization/interpolation.cpp +++ b/src/optimization/parameterization/interpolation.cpp @@ -28,18 +28,19 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "interpolation.hh" +#include "optimization/parameterization/interpolation.h" #include "conformal_ideal_delaunay/ConformalIdealDelaunayMapping.hh" #include "conformal_ideal_delaunay/ConformalInterface.hh" -#include "embedding.hh" -#include "projection.hh" -#include "reparametrization.hh" -#include "translation.hh" +#include "util/embedding.h" +#include "optimization/core/projection.h" +#include "optimization/core/reparametrization.h" +#include "optimization/parameterization/translation.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { void interpolate_penner_coordinates( const Mesh& mesh, @@ -942,4 +943,5 @@ bool overlay_has_all_original_halfedges(OverlayMesh& mo) return (num_missing_original_halfedges == 0); } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/parameterization/layout.cpp b/src/optimization/parameterization/layout.cpp similarity index 98% rename from src/parameterization/layout.cpp rename to src/optimization/parameterization/layout.cpp index 2525f74..096143e 100644 --- a/src/parameterization/layout.cpp +++ b/src/optimization/parameterization/layout.cpp @@ -28,26 +28,27 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "layout.hh" +#include "optimization/parameterization/layout.h" #include #include +#include #include "conformal_ideal_delaunay/ConformalIdealDelaunayMapping.hh" #include "conformal_ideal_delaunay/ConformalInterface.hh" #include "conformal_ideal_delaunay/Layout.hh" -#include "embedding.hh" -#include "igl/edge_flaps.h" -#include "interpolation.hh" -#include "projection.hh" -#include "refinement.hh" -#include "translation.hh" -#include "vector.hh" -#include "vf_mesh.hh" -#include "viewers.hh" +#include "util/embedding.h" +#include "optimization/parameterization/interpolation.h" +#include "optimization/core/projection.h" +#include "optimization/parameterization/refinement.h" +#include "optimization/parameterization/translation.h" +#include "util/vector.h" +#include "util/vf_mesh.h" +#include "optimization/util/viewers.h" // TODO: cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { OverlayMesh add_overlay(const Mesh& m, const VectorX& reduced_metric_coords) { @@ -1011,4 +1012,5 @@ std:: #endif -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/parameterization/refinement.cpp b/src/optimization/parameterization/refinement.cpp similarity index 99% rename from src/parameterization/refinement.cpp rename to src/optimization/parameterization/refinement.cpp index 2c34342..91801fa 100644 --- a/src/parameterization/refinement.cpp +++ b/src/optimization/parameterization/refinement.cpp @@ -28,7 +28,7 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "refinement.hh" +#include "optimization/parameterization/refinement.h" #include #include #include @@ -40,13 +40,13 @@ #include #include #include -#include "area.hh" +#include "optimization/core/area.h" #include "conformal_ideal_delaunay/Halfedge.hh" -#include "io.hh" -#include "triangulation.hh" -#include "vector.hh" -#include "vf_mesh.hh" -#include "viewers.hh" +#include "util/io.h" +#include "optimization/parameterization/triangulation.h" +#include "util/vector.h" +#include "util/vf_mesh.h" +#include "optimization/util/viewers.h" #if ENABLE_VISUALIZATION #include "polyscope/point_cloud.h" @@ -55,7 +55,8 @@ /// FIXME Do cleaning pass (Done through viewers) -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { RefinementMesh::RefinementMesh( const Eigen::MatrixXd& V, @@ -149,7 +150,7 @@ void RefinementMesh::get_VF_mesh( for (int j = 0; j < 3; ++j) { F_full(fi, j) = mesh_triangles[fi][j]; F_uv_full(fi, j) = uv_mesh_triangles[fi][j]; - // TODO Check if valid indices with function in common.hh + // TODO Check if valid indices with function in common.h } } @@ -1173,4 +1174,5 @@ bool RefinementMesh::is_valid_refinement_mesh() const return true; } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/parameterization/translation.cpp b/src/optimization/parameterization/translation.cpp similarity index 96% rename from src/parameterization/translation.cpp rename to src/optimization/parameterization/translation.cpp index a12be27..612e23d 100644 --- a/src/parameterization/translation.cpp +++ b/src/optimization/parameterization/translation.cpp @@ -28,19 +28,20 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "translation.hh" +#include "optimization/parameterization/translation.h" #include #include #include "conformal_ideal_delaunay/ConformalIdealDelaunayMapping.hh" -#include "embedding.hh" -#include "linear_algebra.hh" -#include "reparametrization.hh" -#include "shear.hh" +#include "util/embedding.h" +#include "util/linear_algebra.h" +#include "optimization/core/reparametrization.h" +#include "optimization/core/shear.h" /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { /// Generate the Lagrangian system Lx = b for the least squares solution to the halfedge /// translations in the hyperbolic metric needed to satisfy the per halfedge shear @@ -171,4 +172,5 @@ void compute_as_symmetric_as_possible_translations( he_translations = lagrangian_solution.head(num_halfedges); } -} // namespace CurvatureMetric \ No newline at end of file +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/parameterization/triangulation.cpp b/src/optimization/parameterization/triangulation.cpp similarity index 97% rename from src/parameterization/triangulation.cpp rename to src/optimization/parameterization/triangulation.cpp index 2fe99c6..7db865d 100644 --- a/src/parameterization/triangulation.cpp +++ b/src/optimization/parameterization/triangulation.cpp @@ -28,6 +28,8 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ +#include "optimization/parameterization/triangulation.h" + #include #include #include @@ -38,11 +40,11 @@ #include #include #include -#include "area.hh" -#include "io.hh" -#include "refinement.hh" -#include "vector.hh" -#include "viewers.hh" +#include "optimization/core/area.h" +#include "util/io.h" +#include "optimization/parameterization/refinement.h" +#include "util/vector.h" +#include "optimization/util/viewers.h" #if ENABLE_VISUALIZATION #include "polyscope/point_cloud.h" @@ -51,7 +53,8 @@ /// FIXME Do cleaning pass -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { Scalar compute_face_area(const std::array& vertices) { @@ -61,7 +64,7 @@ Scalar compute_face_area(const std::array& vertices) Scalar lk = (vertices[0] - vertices[2]).norm(); // Compute area from lengths - return sqrt(max(squared_area(li, lj, lk), 0)); + return sqrt(max(squared_area(li, lj, lk), Scalar(0.))); } bool is_inverted_triangle(const std::array& vertices) @@ -316,4 +319,5 @@ void view_triangulation( } -} // namespace CurvatureMetric \ No newline at end of file +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/penner_optimization_pybind.cpp b/src/optimization/pybind.cpp similarity index 95% rename from src/penner_optimization_pybind.cpp rename to src/optimization/pybind.cpp index b9a3ff2..bafaeb2 100644 --- a/src/penner_optimization_pybind.cpp +++ b/src/optimization/pybind.cpp @@ -33,25 +33,25 @@ #include #include -#include "common.hh" - -#include "area.hh" -#include "constraint.hh" -#include "convergence.hh" -#include "embedding.hh" -#include "energies.hh" -#include "energy_functor.hh" -#include "explicit_optimization.hh" -#include "implicit_optimization.hh" -#include "interpolation.hh" -#include "layout.hh" -#include "penner_optimization_interface.hh" -#include "projection.hh" -#include "refinement.hh" -#include "reparametrization.hh" -#include "shapes.hh" -#include "shear.hh" -#include "translation.hh" +#include "optimization/core/common.h" + +#include "optimization/core/area.h" +#include "optimization/core/constraint.h" +#include "optimization/metric_optimization/convergence.h" +#include "util/embedding.h" +#include "optimization/metric_optimization/energies.h" +#include "optimization/metric_optimization/energy_functor.h" +#include "optimization/metric_optimization/explicit_optimization.h" +#include "optimization/metric_optimization/implicit_optimization.h" +#include "optimization/parameterization/interpolation.h" +#include "optimization/parameterization/layout.h" +#include "optimization/interface.h" +#include "optimization/core/projection.h" +#include "optimization/parameterization/refinement.h" +#include "optimization/core/reparametrization.h" +#include "optimization/util/shapes.h" +#include "optimization/core/shear.h" +#include "optimization/parameterization/translation.h" #ifdef USE_HIGHFIVE #include @@ -60,10 +60,11 @@ #ifdef RENDER_TEXTURE #include "conformal_ideal_delaunay/ConformalInterface.hh" #include "conformal_ideal_delaunay/Sampling.hh" -#include "visualization.hh" +#include "visualization.h" #endif -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { #ifdef PYBIND #ifndef MULTIPRECISION @@ -546,4 +547,6 @@ PYBIND11_MODULE(optimization_py, m) } #endif #endif -} // namespace CurvatureMetric + +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/tests/regression/eight_Th_hat b/src/optimization/tests/regression/eight_Th_hat similarity index 100% rename from src/tests/regression/eight_Th_hat rename to src/optimization/tests/regression/eight_Th_hat diff --git a/src/tests/regression/eight_metric_coords b/src/optimization/tests/regression/eight_metric_coords similarity index 100% rename from src/tests/regression/eight_metric_coords rename to src/optimization/tests/regression/eight_metric_coords diff --git a/src/tests/regression/knot1_Th_hat b/src/optimization/tests/regression/knot1_Th_hat similarity index 100% rename from src/tests/regression/knot1_Th_hat rename to src/optimization/tests/regression/knot1_Th_hat diff --git a/src/tests/regression/knot1_metric_coords b/src/optimization/tests/regression/knot1_metric_coords similarity index 100% rename from src/tests/regression/knot1_metric_coords rename to src/optimization/tests/regression/knot1_metric_coords diff --git a/src/tests/test_area.cpp b/src/optimization/tests/test_area.cpp similarity index 100% rename from src/tests/test_area.cpp rename to src/optimization/tests/test_area.cpp diff --git a/src/tests/test_energies.cpp b/src/optimization/tests/test_energies.cpp similarity index 100% rename from src/tests/test_energies.cpp rename to src/optimization/tests/test_energies.cpp diff --git a/src/tests/test_optimize.cpp b/src/optimization/tests/test_optimize.cpp similarity index 100% rename from src/tests/test_optimize.cpp rename to src/optimization/tests/test_optimize.cpp diff --git a/src/tests/test_refinement.cpp b/src/optimization/tests/test_refinement.cpp similarity index 100% rename from src/tests/test_refinement.cpp rename to src/optimization/tests/test_refinement.cpp diff --git a/src/tests/test_regression.cpp b/src/optimization/tests/test_regression.cpp similarity index 100% rename from src/tests/test_regression.cpp rename to src/optimization/tests/test_regression.cpp diff --git a/src/tests/test_shear.cpp b/src/optimization/tests/test_shear.cpp similarity index 100% rename from src/tests/test_shear.cpp rename to src/optimization/tests/test_shear.cpp diff --git a/src/tests/tests.cpp b/src/optimization/tests/tests.cpp similarity index 100% rename from src/tests/tests.cpp rename to src/optimization/tests/tests.cpp diff --git a/src/util/shapes.cpp b/src/optimization/util/shapes.cpp similarity index 97% rename from src/util/shapes.cpp rename to src/optimization/util/shapes.cpp index 1c563b6..42ef311 100644 --- a/src/util/shapes.cpp +++ b/src/optimization/util/shapes.cpp @@ -28,8 +28,8 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "shapes.hh" -#include "vector.hh" +#include "optimization/util/shapes.h" +#include "util/vector.h" #include "conformal_ideal_delaunay/ConformalInterface.hh" // Some good simple tests are simplex embeddings that are natural (one vertex @@ -38,7 +38,8 @@ // embedded case has three symmetric edges adjacent to the origin and three // symmetric edges not adjacent to the origin. -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { void map_to_sphere(size_t num_vertices, std::vector& Th_hat) { @@ -158,4 +159,6 @@ generate_double_triangle_mesh_pybind() generate_double_triangle_mesh(m, vtx_reindex); return std::make_tuple(m, vtx_reindex); } -} + +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/util/viewers.cpp b/src/optimization/util/viewers.cpp similarity index 96% rename from src/util/viewers.cpp rename to src/optimization/util/viewers.cpp index 7572f2c..826ba13 100644 --- a/src/util/viewers.cpp +++ b/src/optimization/util/viewers.cpp @@ -28,17 +28,18 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "viewers.hh" +#include "optimization/util/viewers.h" -#include "vector.hh" -#include "vf_mesh.hh" +#include "util/vector.h" +#include "util/vf_mesh.h" #include #if ENABLE_VISUALIZATION #include "polyscope/surface_mesh.h" #endif // ENABLE_VISUALIZATION -namespace CurvatureMetric { +namespace Penner { +namespace Optimization { void view_flipped_triangles( const Eigen::MatrixXd& V, @@ -131,4 +132,5 @@ void view_parameterization( #endif } -} // namespace CurvatureMetric +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/util/visualization.cc b/src/optimization/util/visualization.cc similarity index 97% rename from src/util/visualization.cc rename to src/optimization/util/visualization.cc index df65c5b..0dec28c 100644 --- a/src/util/visualization.cc +++ b/src/optimization/util/visualization.cc @@ -28,10 +28,10 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "visualization.hh" +#include "visualization.h" -namespace CurvatureMetric -{ +namespace Penner { +namespace Optimization { Viewer generate_mesh_viewer(const Eigen::MatrixXd &v, @@ -104,4 +104,5 @@ void save_mesh_screen_capture(Viewer &viewer, igl::png::writePNG(R, G, B, A, image_path); } -} +} // namespace Optimization +} // namespace Penner \ No newline at end of file diff --git a/src/parameterization/CMakeLists.txt b/src/parameterization/CMakeLists.txt deleted file mode 100644 index 0f39bc0..0000000 --- a/src/parameterization/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -add_library(ParameterizationLib - interpolation.cpp - layout.cpp - refinement.cpp - translation.cpp - triangulation.cpp -) -target_include_directories(ParameterizationLib PUBLIC .) -target_link_libraries(ParameterizationLib PUBLIC - PennerOptimizationCoreLib -) -target_compile_definitions(ParameterizationLib PUBLIC - SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_DEBUG -) -target_compile_options(ParameterizationLib PRIVATE - -Wall -Wpedantic -Wextra -Werror -) diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 720ad1f..82bbb10 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -1,14 +1,34 @@ -add_library(PennerOptimizationUtilLib - shapes.cpp - viewers.cpp +set(UtilSrc + boundary.cpp + embedding.cpp + io.cpp + linear_algebra.cpp + map.cpp + spanning_tree.cpp + vector.cpp + vf_mesh.cpp ) -target_include_directories(PennerOptimizationUtilLib PUBLIC .) -target_link_libraries(PennerOptimizationUtilLib PUBLIC - PennerOptimizationCoreLib + +add_library(PennerUtilLib + ${UtilSrc} ) -target_compile_definitions(PennerOptimizationUtilLib PUBLIC - SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_DEBUG +target_include_directories(PennerUtilLib PUBLIC ../../include/util) +target_link_libraries(PennerUtilLib PUBLIC + Eigen3::Eigen + igl::core + igl::predicates + spdlog::spdlog + conformal_cpp + ${MPFR_LIBRARIES} + ${SUITESPARSE_LIBS} + ${POLYSCOPE_LIBRARIES} ) -target_compile_options(PennerOptimizationUtilLib PRIVATE - -Wall -Wpedantic -Wextra -Werror +target_compile_definitions(PennerUtilLib PUBLIC + SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_DEBUG ) +# TODO Fix for multiprecision +if (NOT USE_MULTIPRECISION) + target_compile_options(PennerUtilLib PRIVATE + -Wall -Wpedantic -Wextra -Werror + ) +endif() diff --git a/src/util/boundary.cpp b/src/util/boundary.cpp new file mode 100644 index 0000000..c7fead6 --- /dev/null +++ b/src/util/boundary.cpp @@ -0,0 +1,152 @@ +#include "util/boundary.h" + +#include "util/vector.h" + +namespace Penner { + +int circulate_ccw_to_boundary(const Mesh& m, int halfedge_index) +{ + // circulate until doubled mesh component changes (e.g., primal to copy) + int h = halfedge_index; + while (m.type[h] == m.type[halfedge_index]) { + h = m.opp[m.n[h]]; + + // check for full circulation + if (h == halfedge_index) + { + spdlog::error("Circulating to boundary around interior vertex"); + return -1; + } + } + + return h; +} + +std::vector find_primal_boundary_halfedges(const Mesh& m) +{ + // Closed mesh case + if (m.R[0] == 0) return {}; + + // General case + int num_halfedges = m.n_halfedges(); + std::vector boundary_halfedges = {}; + boundary_halfedges.reserve(m.n_ind_vertices()); + for (int hij = 0; hij < num_halfedges; ++hij) + { + // TODO Handle flipped case + if (m.type[hij] == 3) + { + spdlog::error("Cannot find boundary vertices for flipped mesh"); + return {}; + } + + // Only process primal edges + if (m.type[hij] != 1) continue; + + // Check for edges on symmetry line and add tip + if (m.opp[m.R[hij]] == hij) + { + boundary_halfedges.push_back(hij); + } + } + + return boundary_halfedges; +} + +std::vector find_boundary_vertices( + const Mesh& m +) { + // get tip of primal boundary halfedges + std::vector boundary_halfedges = find_primal_boundary_halfedges(m); + std::vector boundary_vertices = {}; + boundary_vertices.reserve(boundary_halfedges.size()); + for (int hij : boundary_halfedges) + { + boundary_vertices.push_back(m.to[hij]); + } + + return boundary_vertices; +} + +std::vector find_boundary_vertices( + const Mesh& m, + const std::vector& vtx_reindex) +{ + std::vector boundary_vertices = find_boundary_vertices(m); + std::vector ind_boundary_vertices = vector_compose(m.v_rep, boundary_vertices); + std::vector reindexed_boundary_vertices = vector_compose(vtx_reindex, ind_boundary_vertices); + + return reindexed_boundary_vertices; +} + +std::vector compute_boundary_vertices(const Mesh& m) +{ + // Get the boundary vertices + auto bd_vertices = find_boundary_vertices(m); + + // Make list of boundary vertices into boolean mask + int num_vertices = m.n_vertices(); + std::vector is_boundary_vertex(num_vertices, false); + int num_bd_vertices = bd_vertices.size(); + for (int i = 0; i < num_bd_vertices; ++i) + { + int vi = bd_vertices[i]; + is_boundary_vertex[vi] = true; + } + + return is_boundary_vertex; +} + +std::vector build_boundary_component(const Mesh& m, int halfedge_index) +{ + std::vector component = {}; + int h = halfedge_index; + do { + component.push_back(h); + + // Circulate to next boundary edge + h = circulate_ccw_to_boundary(m, h); + h = m.opp[h]; + } while (h != halfedge_index); + + return component; +} + +std::vector find_boundary_components(const Mesh& m) +{ + // Closed mesh case + if (m.R[0] == 0) return {}; + + // Get boundary edges + int num_halfedges = m.n_halfedges(); + std::vector boundary_components = {}; + std::vector is_seen(num_halfedges, false); + for (int hij = 0; hij < num_halfedges; ++hij) { + if (is_seen[hij]) continue; + is_seen[hij] = true; + + // TODO Handle flipped case + if (m.type[hij] == 3) { + spdlog::error("Cannot find boundary vertices for flipped mesh"); + return {}; + } + + // Only process primal edges + if (m.type[hij] != 1) continue; + + // Check for edges on symmetry line + // TODO Fix all this + if (m.opp[m.R[hij]] == hij) { + // mark boundary component + boundary_components.push_back(hij); + + // add all edges in component to seen list + std::vector component = build_boundary_component(m, hij); + for (int h : component) is_seen[h] = true; + } + } + + return boundary_components; +} + +} // namespace Penner diff --git a/src/core/embedding.cpp b/src/util/embedding.cpp similarity index 99% rename from src/core/embedding.cpp rename to src/util/embedding.cpp index 4989d40..c4a39f2 100644 --- a/src/core/embedding.cpp +++ b/src/util/embedding.cpp @@ -28,9 +28,9 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "embedding.hh" +#include "util/embedding.h" -namespace CurvatureMetric { +namespace Penner { ReductionMaps::ReductionMaps(const Mesh& m, bool fix_bd_lengths) { @@ -381,4 +381,4 @@ bool is_valid_symmetry(const Mesh& m) return is_valid; } -} // namespace CurvatureMetric +} // namespace Penner diff --git a/src/core/io.cpp b/src/util/io.cpp similarity index 92% rename from src/core/io.cpp rename to src/util/io.cpp index 4343a2f..0fd9543 100644 --- a/src/core/io.cpp +++ b/src/util/io.cpp @@ -28,12 +28,14 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "io.hh" +#include "util/io.h" -#include "embedding.hh" -#include "vector.hh" +#include "spdlog/sinks/basic_file_sink.h" +#include "spdlog/sinks/ostream_sink.h" -namespace CurvatureMetric { +#include "util/vector.h" + +namespace Penner { void create_log(const std::filesystem::path& log_dir, const std::string& log_name) { @@ -66,15 +68,6 @@ void log_mesh_information(const Mesh& m, const std::string& log_name) spdlog::get(log_name)->trace("Mesh halfedge to face map: {}", formatted_vector(m.f)); } -void write_vector(const VectorX& vec, const std::string& filename, int precision) -{ - std::ofstream output_file(filename, std::ios::out | std::ios::trunc); - for (Eigen::Index i = 0; i < vec.size(); ++i) { - output_file << std::setprecision(precision) << vec[i] << std::endl; - } - output_file.close(); -} - void write_matrix(const Eigen::MatrixXd& matrix, const std::string& filename) { if (matrix.cols() == 0) { @@ -133,4 +126,4 @@ void write_sparse_matrix(const MatrixX& matrix, const std::string& filename, std output_file.close(); } -} // namespace CurvatureMetric +} // namespace Penner diff --git a/src/core/linear_algebra.cpp b/src/util/linear_algebra.cpp similarity index 87% rename from src/core/linear_algebra.cpp rename to src/util/linear_algebra.cpp index 215c9c9..2b44a36 100644 --- a/src/core/linear_algebra.cpp +++ b/src/util/linear_algebra.cpp @@ -28,15 +28,15 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "linear_algebra.hh" +#include "util/linear_algebra.h" #ifdef USE_SUITESPARSE #include #endif -#include "vector.hh" +#include "util/vector.h" -namespace CurvatureMetric { +namespace Penner { VectorX kronecker_product(const VectorX& vec_1, const VectorX& vec_2) { @@ -164,4 +164,43 @@ void compute_submatrix( submatrix.setFromTriplets(triplet_list.begin(), triplet_list.end()); } -} // namespace CurvatureMetric +Matrix2x2 compute_rotation(Scalar theta) +{ + Matrix2x2 R; + R.row(0) << cos(theta), -sin(theta); + R.row(1) << sin(theta), cos(theta); + + return R; +} + +MatrixX solve_linear_system(const MatrixX& A, const MatrixX&B) +{ +#ifdef WITH_MPFR + Eigen::SparseQR> solver; +#else + Eigen::SparseLU solver; +#endif + + solver.compute(A); + return solver.solve(B); +} + +std::vector generate_linspace(Scalar a, Scalar b, int num_steps) +{ + std::vector linspace(num_steps + 1); + + // iteratively compute linspace + Scalar delta = (b - a) / static_cast(num_steps); + linspace[0] = a; + for (int i = 0; i < num_steps; ++i) + { + linspace[i + 1] = linspace[i] + delta; + } + + // clamp last value exactly to b + linspace.back() = b; + + return linspace; +} + +} // namespace Penner diff --git a/src/util/map.cpp b/src/util/map.cpp new file mode 100644 index 0000000..629bdd4 --- /dev/null +++ b/src/util/map.cpp @@ -0,0 +1,233 @@ +#include "util/map.h" + +#include "util/vector.h" + +#include + +namespace Penner { + +Scalar +vector_max(const std::vector& v) +{ + if (v.empty()) return 0.0; + + Scalar max_value = v[0]; + for (const auto& vi : v) + { + max_value = max(max_value, vi); + } + + return max_value; +} + +std::vector vector_negate(const std::vector& v) +{ + int n = v.size(); + std::vector w(n); + for (int i = 0; i < n; ++i) { + w[i] = -v[i]; + } + + return w; +} + +bool vector_contains_nan(const VectorX& v) +{ + for (Eigen::Index i = 0; i < v.size(); ++i) { + if (isnan(v(i))) return true; + } + + return false; +} + +std::vector index_subset(size_t set_size, const std::vector& subset_indices) +{ + std::vector set_to_subset_mapping; + Penner::compute_set_to_subset_mapping(set_size, subset_indices, set_to_subset_mapping); + return set_to_subset_mapping; +} + +int compute_map_range(const std::vector& map) +{ + // get range of map + int domain = map.size(); + int range = -1; + for (int i = 0; i < domain; ++i) + { + if (range < (map[i] + 1)) + { + range = map[i] + 1; + } + } + + return range; +} + +std::vector invert_map(const std::vector& map) +{ + // get range of map + int domain = map.size(); + int range = compute_map_range(map); + + // invert map + std::vector inverse_map(range, -1); + for (int i = 0; i < domain; ++i) + { + inverse_map[map[i]] = i; + } + + return inverse_map; +} + +std::vector generate_permutation(int n) +{ + // generate permuation for the given size + std::vector permutation; + Penner::arange(n, permutation); + auto rng = std::default_random_engine {}; + std::shuffle(permutation.begin(), permutation.end(), rng); + return permutation; +} + +std::vector shuffle_map_image(const std::vector& map) +{ + // generate permuation for the map range + int range = compute_map_range(map); + std::vector permutation = generate_permutation(range); + + // compute the map with the permutation + return vector_compose(permutation, map); +} + +bool is_invariant_under_permutation(const std::vector& map, const std::vector& perm) +{ + assert(map.size() == perm.size()); + + // check if applying the permutation changes the image of any element under the map + int n = map.size(); + for (int i = 0 ; i < n; ++i) + { + if (perm[i] < 0) return false; + if (perm[i] >= n) return false; + if (map[perm[i]] != map[i]) return false; + } + + return true; +} + + +bool is_one_sided_inverse( + const std::vector& left_inverse, + const std::vector& right_inverse) +{ + long n = right_inverse.size(); + long m = left_inverse.size(); + for (long i = 0; i < n; ++i) { + // Ignore negative indices + if (right_inverse[i] < 0) { + continue; + } + if ((right_inverse[i] >= m) || (left_inverse[right_inverse[i]] != i)) { + return false; + } + } + return true; +} + +bool are_polygon_mesh_edges_valid(const std::vector& next, const std::vector& prev) +{ + if (next.size() != prev.size()) { + spdlog::warn("next and prev are not inverse"); + return false; + } + + // prev is a right and left inverse for next + if ((!is_one_sided_inverse(next, prev)) || (!is_one_sided_inverse(prev, next))) { + spdlog::warn("next and prev are not inverse"); + return false; + } + + return true; +} + + +bool are_polygon_mesh_vertices_valid( + const std::vector& opp, + const std::vector& prev, + const std::vector& to, + const std::vector& out) +{ + if (prev.size() != to.size()) { + return false; + } + long n_halfedges = to.size(); + + // Generate per halfedge vertex circulation and from maps + std::vector circ(n_halfedges); + std::vector from(n_halfedges); + for (long hi = 0; hi < n_halfedges; ++hi) { + circ[hi] = prev[opp[hi]]; + from[hi] = to[opp[hi]]; + } + + // Build vertices from vertex circulation + std::vector> vert; + build_orbits(circ, vert); + + // Number of vertices in out match the number of orbits + if (out.size() != vert.size()) { + spdlog::warn("out does not have the right number of vertices"); + return false; + } + + // to is invariant under circulation + if (!is_invariant_under_permutation(to, circ)) { + spdlog::warn("to is not invariant under vertex circulation"); + return false; + } + + // out is a right inverse for from + if (!is_one_sided_inverse(from, out)) { + spdlog::warn("out is not a right inverse for from"); + return false; + } + + return true; +} + +bool are_polygon_mesh_faces_valid( + const std::vector& next, + const std::vector& he2f, + const std::vector& f2he) +{ + if (next.size() != he2f.size()) { + return false; + } + + // Build faces from next map + std::vector> faces; + build_orbits(next, faces); + + // Number of faces in f2he match the number of orbits + if (f2he.size() != faces.size()) { + spdlog::warn("f2he does not have the right number of faces"); + return false; + } + + // he2f is invariant under next + if (!is_invariant_under_permutation(he2f, next)) { + spdlog::warn("he2f is not invariant under next"); + return false; + } + + // f2he is a right inverse for he2f + if (!is_one_sided_inverse(he2f, f2he)) { + spdlog::warn("f2he is not a right inverse for he2f"); + return false; + } + + return true; +} + + +} // namespace Penner diff --git a/src/util/spanning_tree.cpp b/src/util/spanning_tree.cpp new file mode 100644 index 0000000..a163a87 --- /dev/null +++ b/src/util/spanning_tree.cpp @@ -0,0 +1,473 @@ + +#include "util/spanning_tree.h" + +#include "util/vector.h" + +#include + +namespace Penner { + +void cut_boundary_edges(const Mesh& m, std::vector& is_cut) { + // Do nothing if not doubled mesh + if (m.type[0] == 0) return; + + int num_halfedges = m.n_halfedges(); + for (int hij = 0; hij < num_halfedges; ++hij) + { + if (m.opp[m.R[hij]] == hij) + { + is_cut[hij] = true; + } + } + +} + +void cut_copy_edges(const Mesh& m, std::vector& is_cut) { + // Do nothing if not doubled mesh + if (m.type[0] == 0) return; + + int num_halfedges = m.n_halfedges(); + for (int hij = 0; hij < num_halfedges; ++hij) + { + if ((m.type[hij] == 2) && (m.type[m.opp[hij]] == 2)) + { + is_cut[hij] = true; + } + } + +} + + +bool Forest::is_valid_forest(const Mesh& m) const +{ + int num_edges = m_edges.size(); + int num_vertices = m_out.size(); + if (m_to.size() != m_edges.size()) { + spdlog::error( + "to and edges have inconsistent sizes {} and {}", + m_to.size(), + m_edges.size()); + return false; + } + if (m_from.size() != m_edges.size()) { + spdlog::error( + "from and edges have inconsistent sizes {} and {}", + m_from.size(), + m_edges.size()); + return false; + } + + // Check number of vertices and edges compatible for a spanning tree + // TODO Add check for double mesh + if ((num_vertices != num_edges + 1) && (m.type[0] == 0)) { + spdlog::error("Spanning tree has {} edges and {} vertices", num_edges, num_vertices); + return false; + } + + // Check edge conditions + for (int eij = 0; eij < num_edges; ++eij) { + // Check out and from are inverse + if (m_out[m_from[eij]] != eij) { + spdlog::error( + "Edge {} is from {} with out edge {}", + eij, + m_from[eij], + m_out[m_from[eij]]); + return false; + } + + // Check edges are all in tree + if (!m_edge_is_in_forest[m_edges[eij]]) { + spdlog::error("Edge {} not marked in tree", eij); + return false; + } + } + + return true; +} + +PrimalTree::PrimalTree( + const Mesh& m, + const std::vector& weights, + int root, + bool use_shortest_path) +{ + // Generate minimal spanning tree data + int num_halfedges = m.n_halfedges(); + std::vector is_cut(num_halfedges, false); + cut_copy_edges(m, is_cut); + + std::vector halfedge_from_vertex = + build_primal_forest(m, weights, is_cut, root, use_shortest_path); + + // Initialize Primal Tree data structures + initialize_primal_tree(m, halfedge_from_vertex); + + assert(is_valid_primal_tree(m)); +} + +void PrimalTree::initialize_primal_tree( + const Mesh& m, + const std::vector& halfedge_from_vertex) +{ + // Get edge maps + std::vector he2e; + std::vector e2he; + build_edge_maps(m, he2e, e2he); + + // Initialize data structures + int num_vertices = m.n_vertices(); + m_edges.reserve(num_vertices); + m_from.reserve(num_vertices); + m_to.reserve(num_vertices); + m_out = std::vector(num_vertices, -1); + m_edge_is_in_forest = std::vector(e2he.size(), false); + + for (int vj = 0; vj < num_vertices; ++vj) { + // Get edge to vertex (skipping the root vertex with no incoming edge) + int hij = halfedge_from_vertex[vj]; + if (hij < 0) continue; + int eij = he2e[hij]; + int vi = m.to[m.opp[hij]]; + + // Add the edge to the spanning tree + m_out[vj] = m_edges.size(); + m_from.push_back(vj); + m_to.push_back(vi); + m_edges.push_back(eij); + m_edge_is_in_forest[eij] = true; + } +} + +bool PrimalTree::is_valid_primal_tree(const Mesh& m) const +{ + if (!is_valid_forest(m)) return false; + + // Get edge maps + std::vector he2e; + std::vector e2he; + build_edge_maps(m, he2e, e2he); + + // Check edge conditions + int num_edges = m_edges.size(); + for (int i = 0; i < num_edges; ++i) { + int ei = m_edges[i]; + int h0 = e2he[ei]; + int h1 = m.opp[h0]; + + // Check vertices adjacent to each edge are actually adjacent to the edge + int v0 = m.to[h0]; + int v1 = m.to[h1]; + if (!((m_to[i] == v0) && (m_from[i] == v1)) && !((m_to[i] == v1) && (m_from[i] == v0))) { + return false; + } + } + + return true; +} + +DualTree::DualTree( + const Mesh& m, + const std::vector& weights, + int root, + bool use_shortest_path) +{ + // Generate minimal spanning tree data + int num_halfedges = m.n_halfedges(); + std::vector is_cut(num_halfedges, false); + cut_boundary_edges(m, is_cut); + + std::vector halfedge_from_face = + build_dual_forest(m, weights, is_cut, root, use_shortest_path); + + // Initialize Dual Tree data structures + initialize_dual_tree(m, halfedge_from_face); + + assert(is_valid_dual_tree(m)); +} + +void DualTree::initialize_dual_tree( + const Mesh& m, + const std::vector& halfedge_from_face) +{ + // Get edge maps + std::vector he2e; + std::vector e2he; + build_edge_maps(m, he2e, e2he); + + // Initialize dual tree data structures + int num_faces = m.n_faces(); + m_edges.reserve(num_faces); + m_from.reserve(num_faces); + m_to.reserve(num_faces); + m_out = std::vector(num_faces, -1); + m_edge_is_in_forest = std::vector(e2he.size(), false); + + for (int fj = 0; fj < num_faces; ++fj) { + // Get edge to vertex (skipping the root vertex with no incoming edge) + int hij = halfedge_from_face[fj]; + if (hij < 0) continue; + int eij = he2e[hij]; + int fi = m.f[hij]; + + // Add the edge to the spanning tree + m_out[fj] = m_edges.size(); + m_from.push_back(fj); + m_to.push_back(fi); + m_edges.push_back(eij); + m_edge_is_in_forest[eij] = true; + } +} + +bool DualTree::is_valid_dual_tree(const Mesh& m) const +{ + if (!is_valid_forest(m)) return false; + + // Get edge maps + std::vector he2e; + std::vector e2he; + build_edge_maps(m, he2e, e2he); + + // Check edge conditions + int num_edges = m_edges.size(); + for (int i = 0; i < num_edges; ++i) { + int ei = m_edges[i]; + int h0 = e2he[ei]; + int h1 = m.opp[h0]; + + // Check dual vertices adjacent to each dual edge are actually adjacent to the edge + int f0 = m.f[h0]; + int f1 = m.f[h1]; + if (!((m_to[i] == f0) && (m_from[i] == f1)) && !((m_to[i] == f1) && (m_from[i] == f0))) { + spdlog::error("Faces {} and {} are not adjacent", f0, f1); + return false; + } + } + + return true; +} + +PrimalCotree::PrimalCotree( + const Mesh& m, + const std::vector& weights, + const DualTree& dual_tree, + int root, + bool use_shortest_path) +{ + // Get edge maps + std::vector he2e; + std::vector e2he; + build_edge_maps(m, he2e, e2he); + + // Generate maximal spanning tree data that does not intersect the primal tree + int num_halfedges = he2e.size(); + std::vector is_cut(num_halfedges, false); + for (int hij = 0; hij < num_halfedges; ++hij) { + if (dual_tree.is_edge_in_tree(he2e[hij])) { + is_cut[hij] = true; + } + } + cut_copy_edges(m, is_cut); + + std::vector halfedge_from_vertex = + build_primal_forest(m, weights, is_cut, root, use_shortest_path); + + // Initialize Primal Tree data structures + initialize_primal_tree(m, halfedge_from_vertex); + + assert(is_valid_primal_cotree(m, dual_tree)); +} + +bool PrimalCotree::is_valid_primal_cotree(const Mesh& m, const DualTree& dual_tree) const +{ + // Check if valid primal tree structure + if (!is_valid_primal_tree(m)) return false; + + // Check that it does not intersect the primal tree + int num_edges = n_edges(); + for (int i = 0; i < num_edges; ++i) { + int ei = edge(i); + + // Check edge is in the primal tree and not in the dual tree + if (dual_tree.is_edge_in_tree(ei)) { + spdlog::error("Primal Cotree edge {} also in dual tree", ei); + return false; + } + } + + return true; +} + +DualCotree::DualCotree( + const Mesh& m, + const std::vector& weights, + const PrimalTree& primal_tree, + int root, + bool use_shortest_path) +{ + // Get edge maps + std::vector he2e; + std::vector e2he; + build_edge_maps(m, he2e, e2he); + + // Generate maximal spanning tree data that does not intersect the primal tree + int num_halfedges = he2e.size(); + std::vector is_cut(num_halfedges, false); + for (int hij = 0; hij < num_halfedges; ++hij) { + if (primal_tree.is_edge_in_tree(he2e[hij])) { + is_cut[hij] = true; + } + } + cut_boundary_edges(m, is_cut); + + std::vector halfedge_from_face = + build_dual_forest(m, weights, is_cut, root, use_shortest_path); + + // Initialize Dual Tree data structures + initialize_dual_tree(m, halfedge_from_face); + + assert(is_valid_dual_cotree(m, primal_tree)); +} + + +bool DualCotree::is_valid_dual_cotree(const Mesh& m, const PrimalTree& primal_tree) const +{ + // Check if valid dual tree structure + if (!is_valid_dual_tree(m)) return false; + + // Check that it does not intersect the primal tree + int num_edges = n_edges(); + for (int i = 0; i < num_edges; ++i) { + int ei = edge(i); + + // Check edge is in the dual tree and not in the primal tree + if (primal_tree.is_edge_in_tree(ei)) { + spdlog::error("Dual Cotree edge {} also in primal tree", ei); + return false; + } + } + + return true; +} + +// Generic method to build a forest given a circulator for halfedges around a vertex and maps +// between vertices and halfedges +std::vector build_forest( + const std::vector& circ, + const std::vector& v2h, + const std::vector& h2v, + const std::vector& weights, + const std::vector& is_cut, + int v_start, + bool use_shortest_path) +{ + // Initialize an array to keep track of vertices + int num_vertices = v2h.size(); + Scalar max_cost = vector_max(weights); + std::vector is_processed_vertex(num_vertices, false); + std::vector halfedge_from_vertex(num_vertices, -1); + + // Initialize vertex cost with value above maximum possible weight + Scalar inf_cost = std::max((num_vertices * max_cost) + 1.0, max_cost + 1.0); + std::vector vertex_cost(num_vertices, inf_cost); + + // Mark root with 0 (or cost lower than other edges for all negative weights) + Scalar root_cost = min(max_cost, 0.0); + vertex_cost[v_start] = root_cost; + + // Define a custom comparison function for the priority queue + typedef std::pair WeightedVertex; + auto vertex_compare = [](const WeightedVertex& left, const WeightedVertex& right) { + return left.second > right.second; + }; + + // Initialize the stack of vertices to process with all vertices + std::priority_queue, decltype(vertex_compare)> + vertices_to_process(vertex_compare); + for (int vi = 0; vi < num_vertices; ++vi) { + vertices_to_process.push(std::make_pair(vi, vertex_cost[vi])); + } + + // Perform Prim or Dijkstra algorithm + while (!vertices_to_process.empty()) { + // Get the next vertex to process + auto [vi, vi_cost] = vertices_to_process.top(); + vertices_to_process.pop(); + + // Skip already processed vertices + if (is_processed_vertex[vi]) continue; + is_processed_vertex[vi] = true; + + // Check if vertex has uninitialized cost and give it the root cost if so + if (vertex_cost[vi] == inf_cost) { + vertex_cost[vi] = root_cost; + } + + // Iterate over the vertex circulator via halfedges + int h_start = v2h[vi]; + int hij = h_start; + do { + // Get the vertex in the one ring at the tip of the halfedge + int vj = h2v[hij]; + + // Get candidate edge cost (either path length or edge weight) + Scalar candidate_cost; + if (use_shortest_path) { + candidate_cost = weights[hij] + vertex_cost[vi]; + } else { + candidate_cost = weights[hij]; + } + + // Check if the edge to the tip vertex is the best seen so far + if ((!is_cut[hij]) && (!is_processed_vertex[vj]) && + (vertex_cost[vj] >= candidate_cost)) { + halfedge_from_vertex[vj] = hij; + vertex_cost[vj] = candidate_cost; + vertices_to_process.push(std::make_pair(vj, vertex_cost[vj])); + } + + // Progress to the next halfedge in the vertex circulator + hij = circ[hij]; + } while (hij != h_start); + } + + return halfedge_from_vertex; +} + +std::vector build_primal_forest( + const Mesh& m, + const std::vector& weights, + const std::vector& is_cut, + int v_start, + bool use_shortest_path) +{ + // Construct vertex circulator + std::vector circ = vector_compose(m.n, m.opp); + + // Construct maps from vertices to and from halfedges + const std::vector& v2h = m.out; + std::vector h2v = m.to; + + // Use generic forest constructor + return build_forest(circ, v2h, h2v, weights, is_cut, v_start, use_shortest_path); +} + +std::vector build_dual_forest( + const Mesh& m, + const std::vector& weights, + const std::vector& is_cut, + int f_start, + bool use_shortest_path) +{ + // Face circulator is next halfedge + const std::vector& circ = m.n; + + // Construct maps from faces to and from halfedges + const std::vector& f2h = m.h; + std::vector h2f = vector_compose(m.f, m.opp); + + // Use generic forest constructor + return build_forest(circ, f2h, h2f, weights, is_cut, f_start, use_shortest_path); +} + +} // namespace Penner \ No newline at end of file diff --git a/src/core/vector.cpp b/src/util/vector.cpp similarity index 98% rename from src/core/vector.cpp rename to src/util/vector.cpp index 9a0357e..1a514d4 100644 --- a/src/core/vector.cpp +++ b/src/util/vector.cpp @@ -28,9 +28,9 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "vector.hh" +#include "util/vector.h" -namespace CurvatureMetric { +namespace Penner { void convert_dense_vector_to_sparse(const VectorX& vector_dense, MatrixX& vector_sparse) { @@ -155,4 +155,4 @@ void enumerate_boolean_array( } } -} // namespace CurvatureMetric +} // namespace Penner diff --git a/src/core/vf_mesh.cpp b/src/util/vf_mesh.cpp similarity index 60% rename from src/core/vf_mesh.cpp rename to src/util/vf_mesh.cpp index 2157888..fc85897 100644 --- a/src/core/vf_mesh.cpp +++ b/src/util/vf_mesh.cpp @@ -28,11 +28,17 @@ * Courant Institute of Mathematical Sciences, New York University, USA * * * * *********************************************************************************/ -#include "vf_mesh.hh" +#include "util/vf_mesh.h" + +#include "util/vector.h" #include +#include +#include +#include +#include -namespace CurvatureMetric { +namespace Penner { int count_components(const Eigen::MatrixXi& F) { @@ -47,11 +53,12 @@ void remove_unreferenced( std::vector& new_to_old_map) { int num_faces = F.rows(); + int cols = F.cols(); // Iterate over faces to find all referenced vertices in sorted order std::vector referenced_vertices; for (int fi = 0; fi < num_faces; ++fi) { - for (int j = 0; j < 3; ++j) { + for (int j = 0; j < cols; ++j) { int vk = F(fi, j); referenced_vertices.push_back(vk); } @@ -73,9 +80,9 @@ void remove_unreferenced( } // Reindex the vertices in the face list - FN.resize(num_faces, 3); + FN.resize(num_faces, cols); for (int fi = 0; fi < num_faces; ++fi) { - for (int j = 0; j < 3; ++j) { + for (int j = 0; j < cols; ++j) { int vk = F(fi, j); int k = old_to_new_map[vk]; FN(fi, j) = k; @@ -110,4 +117,91 @@ void cut_mesh_along_parametrization_seams( } } -} // namespace CurvatureMetric +std::tuple generate_seams( + const Eigen::MatrixXd& V, + const Eigen::MatrixXi& F, + const Eigen::MatrixXi& FT) +{ + // get boundary edges of the uv map + Eigen::MatrixXi uv_edges; + Eigen::VectorXi J, K; + igl::boundary_facets(FT, uv_edges, J, K); + + // get 3D vertices on seam + int num_seam_edges = J.size(); + Eigen::MatrixXi edges(num_seam_edges, 2); + for (int i = 0; i < num_seam_edges; ++i) + { + edges(i, 0) = F(J[i], (K[i] + 1)%3); + edges(i, 1) = F(J[i], (K[i] + 2)%3); + } + + // reindex to remove redundant + Eigen::MatrixXi seam_edges; + std::vector new_to_old_map; + remove_unreferenced(edges, seam_edges, new_to_old_map); + + // get new vertices + int num_seam_vertices = new_to_old_map.size(); + Eigen::MatrixXd seam_vertices(num_seam_vertices, 3); + for (int i = 0; i < num_seam_vertices; ++i) { + seam_vertices.row(i) = V.row(new_to_old_map[i]); + } + + return std::make_tuple(seam_vertices, seam_edges); +} + +std::vector find_boundary_vertices(const Eigen::MatrixXi& F) +{ + // Get the boundary edges + Eigen::MatrixXi B; + igl::boundary_facets(F, B); + + // Get all unique vertex indices in the boundary + Eigen::VectorXi bd_vertices; + igl::unique(B, bd_vertices); + + // Convert Eigen vector to standard vector + return convert_vector(bd_vertices); +} + +std::vector compute_boundary_vertices(const Eigen::MatrixXi& F, int num_vertices) +{ + // Get the boundary vertices + auto bd_vertices = find_boundary_vertices(F); + + // Make list of boundary vertices into boolean mask + std::vector is_boundary_vertex(num_vertices, false); + int num_bd_vertices = bd_vertices.size(); + for (int i = 0; i < num_bd_vertices; ++i) + { + int vi = bd_vertices[i]; + is_boundary_vertex[vi] = true; + } + + return is_boundary_vertex; +} + +Eigen::MatrixXd inflate_mesh(const Eigen::MatrixXd& V, const Eigen::MatrixXi& F, double inflation_distance) +{ + // Get vertex normals + Eigen::MatrixXd N; + igl::per_vertex_normals(V, F, N); + + // Displace mesh vertices along normal + int num_vertices = V.rows(); + Eigen::MatrixXd V_inflated(num_vertices, 3); + for (int vi = 0; vi < num_vertices; ++vi) + { + // Add displacement along the vertex normal (if the normal is well defined) + Eigen::RowVector3d ni = Eigen::RowVector3d::Zero(); + if (!(isnan(N(vi, 0)) || isnan(N(vi, 1)) || isnan(N(vi, 2)))) { + ni = N.row(vi); + } + V_inflated.row(vi) = V.row(vi) + inflation_distance * ni; + } + + return V_inflated; +} + +} // namespace Penner